diff options
Diffstat (limited to 'kernel/time')
| -rw-r--r-- | kernel/time/clockevents.c | 12 | ||||
| -rw-r--r-- | kernel/time/tick-broadcast.c | 23 | ||||
| -rw-r--r-- | kernel/time/tick-common.c | 14 | ||||
| -rw-r--r-- | kernel/time/tick-internal.h | 9 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 13 |
5 files changed, 52 insertions, 19 deletions
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 1876b526c778..f8d968063cea 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
| @@ -72,6 +72,16 @@ void clockevents_set_mode(struct clock_event_device *dev, | |||
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | /** | 74 | /** |
| 75 | * clockevents_shutdown - shutdown the device and clear next_event | ||
| 76 | * @dev: device to shutdown | ||
| 77 | */ | ||
| 78 | void clockevents_shutdown(struct clock_event_device *dev) | ||
| 79 | { | ||
| 80 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | ||
| 81 | dev->next_event.tv64 = KTIME_MAX; | ||
| 82 | } | ||
| 83 | |||
| 84 | /** | ||
| 75 | * clockevents_program_event - Reprogram the clock event device. | 85 | * clockevents_program_event - Reprogram the clock event device. |
| 76 | * @expires: absolute expiry time (monotonic clock) | 86 | * @expires: absolute expiry time (monotonic clock) |
| 77 | * | 87 | * |
| @@ -206,7 +216,7 @@ void clockevents_exchange_device(struct clock_event_device *old, | |||
| 206 | 216 | ||
| 207 | if (new) { | 217 | if (new) { |
| 208 | BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); | 218 | BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); |
| 209 | clockevents_set_mode(new, CLOCK_EVT_MODE_SHUTDOWN); | 219 | clockevents_shutdown(new); |
| 210 | } | 220 | } |
| 211 | local_irq_restore(flags); | 221 | local_irq_restore(flags); |
| 212 | } | 222 | } |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 2f5a38294bf9..cb01cd8f919b 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -235,9 +235,9 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: |
| 236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { | 236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { |
| 237 | cpu_set(cpu, tick_broadcast_mask); | 237 | cpu_set(cpu, tick_broadcast_mask); |
| 238 | if (td->mode == TICKDEV_MODE_PERIODIC) | 238 | if (tick_broadcast_device.mode == |
| 239 | clockevents_set_mode(dev, | 239 | TICKDEV_MODE_PERIODIC) |
| 240 | CLOCK_EVT_MODE_SHUTDOWN); | 240 | clockevents_shutdown(dev); |
| 241 | } | 241 | } |
| 242 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) | 242 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) |
| 243 | tick_broadcast_force = 1; | 243 | tick_broadcast_force = 1; |
| @@ -246,7 +246,8 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 246 | if (!tick_broadcast_force && | 246 | if (!tick_broadcast_force && |
| 247 | cpu_isset(cpu, tick_broadcast_mask)) { | 247 | cpu_isset(cpu, tick_broadcast_mask)) { |
| 248 | cpu_clear(cpu, tick_broadcast_mask); | 248 | cpu_clear(cpu, tick_broadcast_mask); |
| 249 | if (td->mode == TICKDEV_MODE_PERIODIC) | 249 | if (tick_broadcast_device.mode == |
| 250 | TICKDEV_MODE_PERIODIC) | ||
| 250 | tick_setup_periodic(dev, 0); | 251 | tick_setup_periodic(dev, 0); |
| 251 | } | 252 | } |
| 252 | break; | 253 | break; |
| @@ -254,7 +255,7 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 254 | 255 | ||
| 255 | if (cpus_empty(tick_broadcast_mask)) { | 256 | if (cpus_empty(tick_broadcast_mask)) { |
| 256 | if (!bc_stopped) | 257 | if (!bc_stopped) |
| 257 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 258 | clockevents_shutdown(bc); |
| 258 | } else if (bc_stopped) { | 259 | } else if (bc_stopped) { |
| 259 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | 260 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
| 260 | tick_broadcast_start_periodic(bc); | 261 | tick_broadcast_start_periodic(bc); |
| @@ -306,7 +307,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
| 306 | 307 | ||
| 307 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | 308 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { |
| 308 | if (bc && cpus_empty(tick_broadcast_mask)) | 309 | if (bc && cpus_empty(tick_broadcast_mask)) |
| 309 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 310 | clockevents_shutdown(bc); |
| 310 | } | 311 | } |
| 311 | 312 | ||
| 312 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 313 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| @@ -321,7 +322,7 @@ void tick_suspend_broadcast(void) | |||
| 321 | 322 | ||
| 322 | bc = tick_broadcast_device.evtdev; | 323 | bc = tick_broadcast_device.evtdev; |
| 323 | if (bc) | 324 | if (bc) |
| 324 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 325 | clockevents_shutdown(bc); |
| 325 | 326 | ||
| 326 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 327 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 327 | } | 328 | } |
| @@ -576,4 +577,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
| 576 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 577 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 577 | } | 578 | } |
| 578 | 579 | ||
| 580 | /* | ||
| 581 | * Check, whether the broadcast device is in one shot mode | ||
| 582 | */ | ||
| 583 | int tick_broadcast_oneshot_active(void) | ||
| 584 | { | ||
| 585 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | ||
| 586 | } | ||
| 587 | |||
| 579 | #endif | 588 | #endif |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index c4777193d567..df12434b43ca 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |||
| 33 | */ | 33 | */ |
| 34 | ktime_t tick_next_period; | 34 | ktime_t tick_next_period; |
| 35 | ktime_t tick_period; | 35 | ktime_t tick_period; |
| 36 | int tick_do_timer_cpu __read_mostly = -1; | 36 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
| 37 | DEFINE_SPINLOCK(tick_device_lock); | 37 | DEFINE_SPINLOCK(tick_device_lock); |
| 38 | 38 | ||
| 39 | /* | 39 | /* |
| @@ -109,7 +109,8 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) | |||
| 109 | if (!tick_device_is_functional(dev)) | 109 | if (!tick_device_is_functional(dev)) |
| 110 | return; | 110 | return; |
| 111 | 111 | ||
| 112 | if (dev->features & CLOCK_EVT_FEAT_PERIODIC) { | 112 | if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && |
| 113 | !tick_broadcast_oneshot_active()) { | ||
| 113 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); | 114 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); |
| 114 | } else { | 115 | } else { |
| 115 | unsigned long seq; | 116 | unsigned long seq; |
| @@ -148,7 +149,7 @@ static void tick_setup_device(struct tick_device *td, | |||
| 148 | * If no cpu took the do_timer update, assign it to | 149 | * If no cpu took the do_timer update, assign it to |
| 149 | * this cpu: | 150 | * this cpu: |
| 150 | */ | 151 | */ |
| 151 | if (tick_do_timer_cpu == -1) { | 152 | if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { |
| 152 | tick_do_timer_cpu = cpu; | 153 | tick_do_timer_cpu = cpu; |
| 153 | tick_next_period = ktime_get(); | 154 | tick_next_period = ktime_get(); |
| 154 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); | 155 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); |
| @@ -249,7 +250,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
| 249 | * not give it back to the clockevents layer ! | 250 | * not give it back to the clockevents layer ! |
| 250 | */ | 251 | */ |
| 251 | if (tick_is_broadcast_device(curdev)) { | 252 | if (tick_is_broadcast_device(curdev)) { |
| 252 | clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN); | 253 | clockevents_shutdown(curdev); |
| 253 | curdev = NULL; | 254 | curdev = NULL; |
| 254 | } | 255 | } |
| 255 | clockevents_exchange_device(curdev, newdev); | 256 | clockevents_exchange_device(curdev, newdev); |
| @@ -300,7 +301,8 @@ static void tick_shutdown(unsigned int *cpup) | |||
| 300 | if (*cpup == tick_do_timer_cpu) { | 301 | if (*cpup == tick_do_timer_cpu) { |
| 301 | int cpu = first_cpu(cpu_online_map); | 302 | int cpu = first_cpu(cpu_online_map); |
| 302 | 303 | ||
| 303 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1; | 304 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : |
| 305 | TICK_DO_TIMER_NONE; | ||
| 304 | } | 306 | } |
| 305 | spin_unlock_irqrestore(&tick_device_lock, flags); | 307 | spin_unlock_irqrestore(&tick_device_lock, flags); |
| 306 | } | 308 | } |
| @@ -311,7 +313,7 @@ static void tick_suspend(void) | |||
| 311 | unsigned long flags; | 313 | unsigned long flags; |
| 312 | 314 | ||
| 313 | spin_lock_irqsave(&tick_device_lock, flags); | 315 | spin_lock_irqsave(&tick_device_lock, flags); |
| 314 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN); | 316 | clockevents_shutdown(td->evtdev); |
| 315 | spin_unlock_irqrestore(&tick_device_lock, flags); | 317 | spin_unlock_irqrestore(&tick_device_lock, flags); |
| 316 | } | 318 | } |
| 317 | 319 | ||
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 0ffc2918ea6f..469248782c23 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
| @@ -1,6 +1,10 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * tick internal variable and functions used by low/high res code | 2 | * tick internal variable and functions used by low/high res code |
| 3 | */ | 3 | */ |
| 4 | |||
| 5 | #define TICK_DO_TIMER_NONE -1 | ||
| 6 | #define TICK_DO_TIMER_BOOT -2 | ||
| 7 | |||
| 4 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); | 8 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); |
| 5 | extern spinlock_t tick_device_lock; | 9 | extern spinlock_t tick_device_lock; |
| 6 | extern ktime_t tick_next_period; | 10 | extern ktime_t tick_next_period; |
| @@ -10,6 +14,8 @@ extern int tick_do_timer_cpu __read_mostly; | |||
| 10 | extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); | 14 | extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); |
| 11 | extern void tick_handle_periodic(struct clock_event_device *dev); | 15 | extern void tick_handle_periodic(struct clock_event_device *dev); |
| 12 | 16 | ||
| 17 | extern void clockevents_shutdown(struct clock_event_device *dev); | ||
| 18 | |||
| 13 | /* | 19 | /* |
| 14 | * NO_HZ / high resolution timer shared code | 20 | * NO_HZ / high resolution timer shared code |
| 15 | */ | 21 | */ |
| @@ -29,6 +35,7 @@ extern void tick_broadcast_oneshot_control(unsigned long reason); | |||
| 29 | extern void tick_broadcast_switch_to_oneshot(void); | 35 | extern void tick_broadcast_switch_to_oneshot(void); |
| 30 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | 36 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); |
| 31 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 37 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
| 38 | extern int tick_broadcast_oneshot_active(void); | ||
| 32 | # else /* BROADCAST */ | 39 | # else /* BROADCAST */ |
| 33 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 40 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
| 34 | { | 41 | { |
| @@ -37,6 +44,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
| 37 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | 44 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } |
| 38 | static inline void tick_broadcast_switch_to_oneshot(void) { } | 45 | static inline void tick_broadcast_switch_to_oneshot(void) { } |
| 39 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 46 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
| 47 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
| 40 | # endif /* !BROADCAST */ | 48 | # endif /* !BROADCAST */ |
| 41 | 49 | ||
| 42 | #else /* !ONESHOT */ | 50 | #else /* !ONESHOT */ |
| @@ -66,6 +74,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
| 66 | { | 74 | { |
| 67 | return 0; | 75 | return 0; |
| 68 | } | 76 | } |
| 77 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
| 69 | #endif /* !TICK_ONESHOT */ | 78 | #endif /* !TICK_ONESHOT */ |
| 70 | 79 | ||
| 71 | /* | 80 | /* |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a87b0468568b..cb02324bdb88 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -75,6 +75,9 @@ static void tick_do_update_jiffies64(ktime_t now) | |||
| 75 | incr * ticks); | 75 | incr * ticks); |
| 76 | } | 76 | } |
| 77 | do_timer(++ticks); | 77 | do_timer(++ticks); |
| 78 | |||
| 79 | /* Keep the tick_next_period variable up to date */ | ||
| 80 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | ||
| 78 | } | 81 | } |
| 79 | write_sequnlock(&xtime_lock); | 82 | write_sequnlock(&xtime_lock); |
| 80 | } | 83 | } |
| @@ -221,7 +224,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
| 221 | */ | 224 | */ |
| 222 | if (unlikely(!cpu_online(cpu))) { | 225 | if (unlikely(!cpu_online(cpu))) { |
| 223 | if (cpu == tick_do_timer_cpu) | 226 | if (cpu == tick_do_timer_cpu) |
| 224 | tick_do_timer_cpu = -1; | 227 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
| 225 | } | 228 | } |
| 226 | 229 | ||
| 227 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 230 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
| @@ -303,7 +306,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
| 303 | * invoked. | 306 | * invoked. |
| 304 | */ | 307 | */ |
| 305 | if (cpu == tick_do_timer_cpu) | 308 | if (cpu == tick_do_timer_cpu) |
| 306 | tick_do_timer_cpu = -1; | 309 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
| 307 | 310 | ||
| 308 | ts->idle_sleeps++; | 311 | ts->idle_sleeps++; |
| 309 | 312 | ||
| @@ -468,7 +471,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) | |||
| 468 | * this duty, then the jiffies update is still serialized by | 471 | * this duty, then the jiffies update is still serialized by |
| 469 | * xtime_lock. | 472 | * xtime_lock. |
| 470 | */ | 473 | */ |
| 471 | if (unlikely(tick_do_timer_cpu == -1)) | 474 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
| 472 | tick_do_timer_cpu = cpu; | 475 | tick_do_timer_cpu = cpu; |
| 473 | 476 | ||
| 474 | /* Check, if the jiffies need an update */ | 477 | /* Check, if the jiffies need an update */ |
| @@ -570,7 +573,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
| 570 | * this duty, then the jiffies update is still serialized by | 573 | * this duty, then the jiffies update is still serialized by |
| 571 | * xtime_lock. | 574 | * xtime_lock. |
| 572 | */ | 575 | */ |
| 573 | if (unlikely(tick_do_timer_cpu == -1)) | 576 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
| 574 | tick_do_timer_cpu = cpu; | 577 | tick_do_timer_cpu = cpu; |
| 575 | #endif | 578 | #endif |
| 576 | 579 | ||
| @@ -622,7 +625,7 @@ void tick_setup_sched_timer(void) | |||
| 622 | */ | 625 | */ |
| 623 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 626 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
| 624 | ts->sched_timer.function = tick_sched_timer; | 627 | ts->sched_timer.function = tick_sched_timer; |
| 625 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 628 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; |
| 626 | 629 | ||
| 627 | /* Get the next period (per cpu) */ | 630 | /* Get the next period (per cpu) */ |
| 628 | ts->sched_timer.expires = tick_init_jiffy_update(); | 631 | ts->sched_timer.expires = tick_init_jiffy_update(); |
