diff options
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/clockevents.c | 69 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 11 | ||||
-rw-r--r-- | kernel/time/jiffies.c | 2 | ||||
-rw-r--r-- | kernel/time/ntp.c | 30 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 53 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 38 | ||||
-rw-r--r-- | kernel/time/tick-internal.h | 15 | ||||
-rw-r--r-- | kernel/time/tick-oneshot.c | 12 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 14 | ||||
-rw-r--r-- | kernel/time/timer_list.c | 6 |
10 files changed, 151 insertions, 99 deletions
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 67932ea78c17..76212b2a99de 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -274,72 +274,3 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
274 | } | 274 | } |
275 | EXPORT_SYMBOL_GPL(clockevents_notify); | 275 | EXPORT_SYMBOL_GPL(clockevents_notify); |
276 | 276 | ||
277 | #ifdef CONFIG_SYSFS | ||
278 | |||
279 | /** | ||
280 | * clockevents_show_registered - sysfs interface for listing clockevents | ||
281 | * @dev: unused | ||
282 | * @buf: char buffer to be filled with clock events list | ||
283 | * | ||
284 | * Provides sysfs interface for listing registered clock event devices | ||
285 | */ | ||
286 | static ssize_t clockevents_show_registered(struct sys_device *dev, char *buf) | ||
287 | { | ||
288 | struct list_head *tmp; | ||
289 | char *p = buf; | ||
290 | int cpu; | ||
291 | |||
292 | spin_lock(&clockevents_lock); | ||
293 | |||
294 | list_for_each(tmp, &clockevent_devices) { | ||
295 | struct clock_event_device *ce; | ||
296 | |||
297 | ce = list_entry(tmp, struct clock_event_device, list); | ||
298 | p += sprintf(p, "%-20s F:%04x M:%d", ce->name, | ||
299 | ce->features, ce->mode); | ||
300 | p += sprintf(p, " C:"); | ||
301 | if (!cpus_equal(ce->cpumask, cpu_possible_map)) { | ||
302 | for_each_cpu_mask(cpu, ce->cpumask) | ||
303 | p += sprintf(p, " %d", cpu); | ||
304 | } else { | ||
305 | /* | ||
306 | * FIXME: Add the cpu which is handling this sucker | ||
307 | */ | ||
308 | } | ||
309 | p += sprintf(p, "\n"); | ||
310 | } | ||
311 | |||
312 | spin_unlock(&clockevents_lock); | ||
313 | |||
314 | return p - buf; | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * Sysfs setup bits: | ||
319 | */ | ||
320 | static SYSDEV_ATTR(registered, 0600, | ||
321 | clockevents_show_registered, NULL); | ||
322 | |||
323 | static struct sysdev_class clockevents_sysclass = { | ||
324 | set_kset_name("clockevents"), | ||
325 | }; | ||
326 | |||
327 | static struct sys_device clockevents_sys_device = { | ||
328 | .id = 0, | ||
329 | .cls = &clockevents_sysclass, | ||
330 | }; | ||
331 | |||
332 | static int __init clockevents_sysfs_init(void) | ||
333 | { | ||
334 | int error = sysdev_class_register(&clockevents_sysclass); | ||
335 | |||
336 | if (!error) | ||
337 | error = sysdev_register(&clockevents_sys_device); | ||
338 | if (!error) | ||
339 | error = sysdev_create_file( | ||
340 | &clockevents_sys_device, | ||
341 | &attr_registered); | ||
342 | return error; | ||
343 | } | ||
344 | device_initcall(clockevents_sysfs_init); | ||
345 | #endif | ||
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 193a0793af95..fe5c7db24247 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -55,16 +55,18 @@ static DEFINE_SPINLOCK(clocksource_lock); | |||
55 | static char override_name[32]; | 55 | static char override_name[32]; |
56 | static int finished_booting; | 56 | static int finished_booting; |
57 | 57 | ||
58 | /* clocksource_done_booting - Called near the end of bootup | 58 | /* clocksource_done_booting - Called near the end of core bootup |
59 | * | 59 | * |
60 | * Hack to avoid lots of clocksource churn at boot time | 60 | * Hack to avoid lots of clocksource churn at boot time. |
61 | * We use fs_initcall because we want this to start before | ||
62 | * device_initcall but after subsys_initcall. | ||
61 | */ | 63 | */ |
62 | static int __init clocksource_done_booting(void) | 64 | static int __init clocksource_done_booting(void) |
63 | { | 65 | { |
64 | finished_booting = 1; | 66 | finished_booting = 1; |
65 | return 0; | 67 | return 0; |
66 | } | 68 | } |
67 | late_initcall(clocksource_done_booting); | 69 | fs_initcall(clocksource_done_booting); |
68 | 70 | ||
69 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | 71 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
70 | static LIST_HEAD(watchdog_list); | 72 | static LIST_HEAD(watchdog_list); |
@@ -149,7 +151,8 @@ static void clocksource_check_watchdog(struct clocksource *cs) | |||
149 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; | 151 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; |
150 | add_timer(&watchdog_timer); | 152 | add_timer(&watchdog_timer); |
151 | } | 153 | } |
152 | } else if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) { | 154 | } else { |
155 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) | ||
153 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; | 156 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
154 | 157 | ||
155 | if (!watchdog || cs->rating > watchdog->rating) { | 158 | if (!watchdog || cs->rating > watchdog->rating) { |
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 3be8da8fed7e..4c256fdb8875 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c | |||
@@ -69,4 +69,4 @@ static int __init init_jiffies_clocksource(void) | |||
69 | return clocksource_register(&clocksource_jiffies); | 69 | return clocksource_register(&clocksource_jiffies); |
70 | } | 70 | } |
71 | 71 | ||
72 | module_init(init_jiffies_clocksource); | 72 | core_initcall(init_jiffies_clocksource); |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index eb12509e00bd..cb25649c6f50 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -32,7 +32,7 @@ static u64 tick_length, tick_length_base; | |||
32 | /* TIME_ERROR prevents overwriting the CMOS clock */ | 32 | /* TIME_ERROR prevents overwriting the CMOS clock */ |
33 | static int time_state = TIME_OK; /* clock synchronization status */ | 33 | static int time_state = TIME_OK; /* clock synchronization status */ |
34 | int time_status = STA_UNSYNC; /* clock status bits */ | 34 | int time_status = STA_UNSYNC; /* clock status bits */ |
35 | static long time_offset; /* time adjustment (ns) */ | 35 | static s64 time_offset; /* time adjustment (ns) */ |
36 | static long time_constant = 2; /* pll time constant */ | 36 | static long time_constant = 2; /* pll time constant */ |
37 | long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ | 37 | long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ |
38 | long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ | 38 | long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ |
@@ -196,7 +196,7 @@ void __attribute__ ((weak)) notify_arch_cmos_timer(void) | |||
196 | */ | 196 | */ |
197 | int do_adjtimex(struct timex *txc) | 197 | int do_adjtimex(struct timex *txc) |
198 | { | 198 | { |
199 | long ltemp, mtemp, save_adjust; | 199 | long mtemp, save_adjust, rem; |
200 | s64 freq_adj, temp64; | 200 | s64 freq_adj, temp64; |
201 | int result; | 201 | int result; |
202 | 202 | ||
@@ -277,14 +277,14 @@ int do_adjtimex(struct timex *txc) | |||
277 | time_adjust = txc->offset; | 277 | time_adjust = txc->offset; |
278 | } | 278 | } |
279 | else if (time_status & STA_PLL) { | 279 | else if (time_status & STA_PLL) { |
280 | ltemp = txc->offset * NSEC_PER_USEC; | 280 | time_offset = txc->offset * NSEC_PER_USEC; |
281 | 281 | ||
282 | /* | 282 | /* |
283 | * Scale the phase adjustment and | 283 | * Scale the phase adjustment and |
284 | * clamp to the operating range. | 284 | * clamp to the operating range. |
285 | */ | 285 | */ |
286 | time_offset = min(ltemp, MAXPHASE * NSEC_PER_USEC); | 286 | time_offset = min(time_offset, (s64)MAXPHASE * NSEC_PER_USEC); |
287 | time_offset = max(time_offset, -MAXPHASE * NSEC_PER_USEC); | 287 | time_offset = max(time_offset, (s64)-MAXPHASE * NSEC_PER_USEC); |
288 | 288 | ||
289 | /* | 289 | /* |
290 | * Select whether the frequency is to be controlled | 290 | * Select whether the frequency is to be controlled |
@@ -297,11 +297,11 @@ int do_adjtimex(struct timex *txc) | |||
297 | mtemp = xtime.tv_sec - time_reftime; | 297 | mtemp = xtime.tv_sec - time_reftime; |
298 | time_reftime = xtime.tv_sec; | 298 | time_reftime = xtime.tv_sec; |
299 | 299 | ||
300 | freq_adj = (s64)time_offset * mtemp; | 300 | freq_adj = time_offset * mtemp; |
301 | freq_adj = shift_right(freq_adj, time_constant * 2 + | 301 | freq_adj = shift_right(freq_adj, time_constant * 2 + |
302 | (SHIFT_PLL + 2) * 2 - SHIFT_NSEC); | 302 | (SHIFT_PLL + 2) * 2 - SHIFT_NSEC); |
303 | if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) { | 303 | if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) { |
304 | temp64 = (s64)time_offset << (SHIFT_NSEC - SHIFT_FLL); | 304 | temp64 = time_offset << (SHIFT_NSEC - SHIFT_FLL); |
305 | if (time_offset < 0) { | 305 | if (time_offset < 0) { |
306 | temp64 = -temp64; | 306 | temp64 = -temp64; |
307 | do_div(temp64, mtemp); | 307 | do_div(temp64, mtemp); |
@@ -314,8 +314,10 @@ int do_adjtimex(struct timex *txc) | |||
314 | freq_adj += time_freq; | 314 | freq_adj += time_freq; |
315 | freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC); | 315 | freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC); |
316 | time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC); | 316 | time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC); |
317 | time_offset = (time_offset / NTP_INTERVAL_FREQ) | 317 | time_offset = div_long_long_rem_signed(time_offset, |
318 | << SHIFT_UPDATE; | 318 | NTP_INTERVAL_FREQ, |
319 | &rem); | ||
320 | time_offset <<= SHIFT_UPDATE; | ||
319 | } /* STA_PLL */ | 321 | } /* STA_PLL */ |
320 | } /* txc->modes & ADJ_OFFSET */ | 322 | } /* txc->modes & ADJ_OFFSET */ |
321 | if (txc->modes & ADJ_TICK) | 323 | if (txc->modes & ADJ_TICK) |
@@ -328,12 +330,12 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) | |||
328 | result = TIME_ERROR; | 330 | result = TIME_ERROR; |
329 | 331 | ||
330 | if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) | 332 | if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) |
331 | txc->offset = save_adjust; | 333 | txc->offset = save_adjust; |
332 | else | 334 | else |
333 | txc->offset = shift_right(time_offset, SHIFT_UPDATE) | 335 | txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) * |
334 | * NTP_INTERVAL_FREQ / 1000; | 336 | NTP_INTERVAL_FREQ / 1000; |
335 | txc->freq = (time_freq / NSEC_PER_USEC) | 337 | txc->freq = (time_freq / NSEC_PER_USEC) << |
336 | << (SHIFT_USEC - SHIFT_NSEC); | 338 | (SHIFT_USEC - SHIFT_NSEC); |
337 | txc->maxerror = time_maxerror; | 339 | txc->maxerror = time_maxerror; |
338 | txc->esterror = time_esterror; | 340 | txc->esterror = time_esterror; |
339 | txc->status = time_status; | 341 | txc->status = time_status; |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 12b3efeb9f6f..eadfce2fff74 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -284,6 +284,49 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
284 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 284 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
285 | } | 285 | } |
286 | 286 | ||
287 | void tick_suspend_broadcast(void) | ||
288 | { | ||
289 | struct clock_event_device *bc; | ||
290 | unsigned long flags; | ||
291 | |||
292 | spin_lock_irqsave(&tick_broadcast_lock, flags); | ||
293 | |||
294 | bc = tick_broadcast_device.evtdev; | ||
295 | if (bc && tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | ||
296 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | ||
297 | |||
298 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||
299 | } | ||
300 | |||
301 | int tick_resume_broadcast(void) | ||
302 | { | ||
303 | struct clock_event_device *bc; | ||
304 | unsigned long flags; | ||
305 | int broadcast = 0; | ||
306 | |||
307 | spin_lock_irqsave(&tick_broadcast_lock, flags); | ||
308 | |||
309 | bc = tick_broadcast_device.evtdev; | ||
310 | |||
311 | if (bc) { | ||
312 | switch (tick_broadcast_device.mode) { | ||
313 | case TICKDEV_MODE_PERIODIC: | ||
314 | if(!cpus_empty(tick_broadcast_mask)) | ||
315 | tick_broadcast_start_periodic(bc); | ||
316 | broadcast = cpu_isset(smp_processor_id(), | ||
317 | tick_broadcast_mask); | ||
318 | break; | ||
319 | case TICKDEV_MODE_ONESHOT: | ||
320 | broadcast = tick_resume_broadcast_oneshot(bc); | ||
321 | break; | ||
322 | } | ||
323 | } | ||
324 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||
325 | |||
326 | return broadcast; | ||
327 | } | ||
328 | |||
329 | |||
287 | #ifdef CONFIG_TICK_ONESHOT | 330 | #ifdef CONFIG_TICK_ONESHOT |
288 | 331 | ||
289 | static cpumask_t tick_broadcast_oneshot_mask; | 332 | static cpumask_t tick_broadcast_oneshot_mask; |
@@ -311,6 +354,16 @@ static int tick_broadcast_set_event(ktime_t expires, int force) | |||
311 | } | 354 | } |
312 | } | 355 | } |
313 | 356 | ||
357 | int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | ||
358 | { | ||
359 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | ||
360 | |||
361 | if(!cpus_empty(tick_broadcast_oneshot_mask)) | ||
362 | tick_broadcast_set_event(ktime_get(), 1); | ||
363 | |||
364 | return cpu_isset(smp_processor_id(), tick_broadcast_oneshot_mask); | ||
365 | } | ||
366 | |||
314 | /* | 367 | /* |
315 | * Reprogram the broadcast device: | 368 | * Reprogram the broadcast device: |
316 | * | 369 | * |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 4500e347f1bb..bfda3f7f0716 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -77,6 +77,7 @@ static void tick_periodic(int cpu) | |||
77 | void tick_handle_periodic(struct clock_event_device *dev) | 77 | void tick_handle_periodic(struct clock_event_device *dev) |
78 | { | 78 | { |
79 | int cpu = smp_processor_id(); | 79 | int cpu = smp_processor_id(); |
80 | ktime_t next; | ||
80 | 81 | ||
81 | tick_periodic(cpu); | 82 | tick_periodic(cpu); |
82 | 83 | ||
@@ -86,12 +87,12 @@ void tick_handle_periodic(struct clock_event_device *dev) | |||
86 | * Setup the next period for devices, which do not have | 87 | * Setup the next period for devices, which do not have |
87 | * periodic mode: | 88 | * periodic mode: |
88 | */ | 89 | */ |
90 | next = ktime_add(dev->next_event, tick_period); | ||
89 | for (;;) { | 91 | for (;;) { |
90 | ktime_t next = ktime_add(dev->next_event, tick_period); | ||
91 | |||
92 | if (!clockevents_program_event(dev, next, ktime_get())) | 92 | if (!clockevents_program_event(dev, next, ktime_get())) |
93 | return; | 93 | return; |
94 | tick_periodic(cpu); | 94 | tick_periodic(cpu); |
95 | next = ktime_add(next, tick_period); | ||
95 | } | 96 | } |
96 | } | 97 | } |
97 | 98 | ||
@@ -297,6 +298,29 @@ static void tick_shutdown(unsigned int *cpup) | |||
297 | spin_unlock_irqrestore(&tick_device_lock, flags); | 298 | spin_unlock_irqrestore(&tick_device_lock, flags); |
298 | } | 299 | } |
299 | 300 | ||
301 | static void tick_suspend(void) | ||
302 | { | ||
303 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | ||
304 | unsigned long flags; | ||
305 | |||
306 | spin_lock_irqsave(&tick_device_lock, flags); | ||
307 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN); | ||
308 | spin_unlock_irqrestore(&tick_device_lock, flags); | ||
309 | } | ||
310 | |||
311 | static void tick_resume(void) | ||
312 | { | ||
313 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | ||
314 | unsigned long flags; | ||
315 | |||
316 | spin_lock_irqsave(&tick_device_lock, flags); | ||
317 | if (td->mode == TICKDEV_MODE_PERIODIC) | ||
318 | tick_setup_periodic(td->evtdev, 0); | ||
319 | else | ||
320 | tick_resume_oneshot(); | ||
321 | spin_unlock_irqrestore(&tick_device_lock, flags); | ||
322 | } | ||
323 | |||
300 | /* | 324 | /* |
301 | * Notification about clock event devices | 325 | * Notification about clock event devices |
302 | */ | 326 | */ |
@@ -324,6 +348,16 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason, | |||
324 | tick_shutdown(dev); | 348 | tick_shutdown(dev); |
325 | break; | 349 | break; |
326 | 350 | ||
351 | case CLOCK_EVT_NOTIFY_SUSPEND: | ||
352 | tick_suspend(); | ||
353 | tick_suspend_broadcast(); | ||
354 | break; | ||
355 | |||
356 | case CLOCK_EVT_NOTIFY_RESUME: | ||
357 | if (!tick_resume_broadcast()) | ||
358 | tick_resume(); | ||
359 | break; | ||
360 | |||
327 | default: | 361 | default: |
328 | break; | 362 | break; |
329 | } | 363 | } |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 54861a0f29ff..c9d203bde518 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -19,12 +19,13 @@ extern void tick_setup_oneshot(struct clock_event_device *newdev, | |||
19 | extern int tick_program_event(ktime_t expires, int force); | 19 | extern int tick_program_event(ktime_t expires, int force); |
20 | extern void tick_oneshot_notify(void); | 20 | extern void tick_oneshot_notify(void); |
21 | extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); | 21 | extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); |
22 | 22 | extern void tick_resume_oneshot(void); | |
23 | # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | 23 | # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
24 | extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); | 24 | extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); |
25 | extern void tick_broadcast_oneshot_control(unsigned long reason); | 25 | extern void tick_broadcast_oneshot_control(unsigned long reason); |
26 | extern void tick_broadcast_switch_to_oneshot(void); | 26 | extern void tick_broadcast_switch_to_oneshot(void); |
27 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | 27 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); |
28 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | ||
28 | # else /* BROADCAST */ | 29 | # else /* BROADCAST */ |
29 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 30 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
30 | { | 31 | { |
@@ -43,6 +44,10 @@ void tick_setup_oneshot(struct clock_event_device *newdev, | |||
43 | { | 44 | { |
44 | BUG(); | 45 | BUG(); |
45 | } | 46 | } |
47 | static inline void tick_resume_oneshot(void) | ||
48 | { | ||
49 | BUG(); | ||
50 | } | ||
46 | static inline int tick_program_event(ktime_t expires, int force) | 51 | static inline int tick_program_event(ktime_t expires, int force) |
47 | { | 52 | { |
48 | return 0; | 53 | return 0; |
@@ -54,6 +59,10 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
54 | } | 59 | } |
55 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | 60 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } |
56 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 61 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
62 | static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | ||
63 | { | ||
64 | return 0; | ||
65 | } | ||
57 | #endif /* !TICK_ONESHOT */ | 66 | #endif /* !TICK_ONESHOT */ |
58 | 67 | ||
59 | /* | 68 | /* |
@@ -67,6 +76,8 @@ extern int tick_check_broadcast_device(struct clock_event_device *dev); | |||
67 | extern int tick_is_broadcast_device(struct clock_event_device *dev); | 76 | extern int tick_is_broadcast_device(struct clock_event_device *dev); |
68 | extern void tick_broadcast_on_off(unsigned long reason, int *oncpu); | 77 | extern void tick_broadcast_on_off(unsigned long reason, int *oncpu); |
69 | extern void tick_shutdown_broadcast(unsigned int *cpup); | 78 | extern void tick_shutdown_broadcast(unsigned int *cpup); |
79 | extern void tick_suspend_broadcast(void); | ||
80 | extern int tick_resume_broadcast(void); | ||
70 | 81 | ||
71 | extern void | 82 | extern void |
72 | tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); | 83 | tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); |
@@ -90,6 +101,8 @@ static inline int tick_device_uses_broadcast(struct clock_event_device *dev, | |||
90 | static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } | 101 | static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } |
91 | static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { } | 102 | static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { } |
92 | static inline void tick_shutdown_broadcast(unsigned int *cpup) { } | 103 | static inline void tick_shutdown_broadcast(unsigned int *cpup) { } |
104 | static inline void tick_suspend_broadcast(void) { } | ||
105 | static inline int tick_resume_broadcast(void) { return 0; } | ||
93 | 106 | ||
94 | /* | 107 | /* |
95 | * Set the periodic handler in non broadcast mode | 108 | * Set the periodic handler in non broadcast mode |
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index 2e8b7ff863cc..f6997ab0c3c9 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c | |||
@@ -41,6 +41,18 @@ int tick_program_event(ktime_t expires, int force) | |||
41 | } | 41 | } |
42 | 42 | ||
43 | /** | 43 | /** |
44 | * tick_resume_onshot - resume oneshot mode | ||
45 | */ | ||
46 | void tick_resume_oneshot(void) | ||
47 | { | ||
48 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | ||
49 | struct clock_event_device *dev = td->evtdev; | ||
50 | |||
51 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); | ||
52 | tick_program_event(ktime_get(), 1); | ||
53 | } | ||
54 | |||
55 | /** | ||
44 | * tick_setup_oneshot - setup the event device for oneshot mode (hres or nohz) | 56 | * tick_setup_oneshot - setup the event device for oneshot mode (hres or nohz) |
45 | */ | 57 | */ |
46 | void tick_setup_oneshot(struct clock_event_device *newdev, | 58 | void tick_setup_oneshot(struct clock_event_device *newdev, |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 95e41f7f850b..51556b95f60f 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -21,6 +21,8 @@ | |||
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | #include <linux/tick.h> | 22 | #include <linux/tick.h> |
23 | 23 | ||
24 | #include <asm/irq_regs.h> | ||
25 | |||
24 | #include "tick-internal.h" | 26 | #include "tick-internal.h" |
25 | 27 | ||
26 | /* | 28 | /* |
@@ -165,7 +167,9 @@ void tick_nohz_stop_sched_tick(void) | |||
165 | goto end; | 167 | goto end; |
166 | 168 | ||
167 | cpu = smp_processor_id(); | 169 | cpu = smp_processor_id(); |
168 | BUG_ON(local_softirq_pending()); | 170 | if (unlikely(local_softirq_pending())) |
171 | printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", | ||
172 | local_softirq_pending()); | ||
169 | 173 | ||
170 | now = ktime_get(); | 174 | now = ktime_get(); |
171 | /* | 175 | /* |
@@ -191,19 +195,19 @@ void tick_nohz_stop_sched_tick(void) | |||
191 | next_jiffies = get_next_timer_interrupt(last_jiffies); | 195 | next_jiffies = get_next_timer_interrupt(last_jiffies); |
192 | delta_jiffies = next_jiffies - last_jiffies; | 196 | delta_jiffies = next_jiffies - last_jiffies; |
193 | 197 | ||
198 | if (rcu_needs_cpu(cpu)) | ||
199 | delta_jiffies = 1; | ||
194 | /* | 200 | /* |
195 | * Do not stop the tick, if we are only one off | 201 | * Do not stop the tick, if we are only one off |
196 | * or if the cpu is required for rcu | 202 | * or if the cpu is required for rcu |
197 | */ | 203 | */ |
198 | if (!ts->tick_stopped && (delta_jiffies == 1 || rcu_needs_cpu(cpu))) | 204 | if (!ts->tick_stopped && delta_jiffies == 1) |
199 | goto out; | 205 | goto out; |
200 | 206 | ||
201 | /* Schedule the tick, if we are at least one jiffie off */ | 207 | /* Schedule the tick, if we are at least one jiffie off */ |
202 | if ((long)delta_jiffies >= 1) { | 208 | if ((long)delta_jiffies >= 1) { |
203 | 209 | ||
204 | if (rcu_needs_cpu(cpu)) | 210 | if (delta_jiffies > 1) |
205 | delta_jiffies = 1; | ||
206 | else | ||
207 | cpu_set(cpu, nohz_cpu_mask); | 211 | cpu_set(cpu, nohz_cpu_mask); |
208 | /* | 212 | /* |
209 | * nohz_stop_sched_tick can be called several times before | 213 | * nohz_stop_sched_tick can be called several times before |
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index f82c635c3d5c..59df5e8555a8 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -194,9 +194,9 @@ print_tickdevice(struct seq_file *m, struct tick_device *td) | |||
194 | return; | 194 | return; |
195 | } | 195 | } |
196 | SEQ_printf(m, "%s\n", dev->name); | 196 | SEQ_printf(m, "%s\n", dev->name); |
197 | SEQ_printf(m, " max_delta_ns: %ld\n", dev->max_delta_ns); | 197 | SEQ_printf(m, " max_delta_ns: %lu\n", dev->max_delta_ns); |
198 | SEQ_printf(m, " min_delta_ns: %ld\n", dev->min_delta_ns); | 198 | SEQ_printf(m, " min_delta_ns: %lu\n", dev->min_delta_ns); |
199 | SEQ_printf(m, " mult: %ld\n", dev->mult); | 199 | SEQ_printf(m, " mult: %lu\n", dev->mult); |
200 | SEQ_printf(m, " shift: %d\n", dev->shift); | 200 | SEQ_printf(m, " shift: %d\n", dev->shift); |
201 | SEQ_printf(m, " mode: %d\n", dev->mode); | 201 | SEQ_printf(m, " mode: %d\n", dev->mode); |
202 | SEQ_printf(m, " next_event: %Ld nsecs\n", | 202 | SEQ_printf(m, " next_event: %Ld nsecs\n", |