diff options
Diffstat (limited to 'kernel/time')
| -rw-r--r-- | kernel/time/clockevents.c | 15 | ||||
| -rw-r--r-- | kernel/time/ntp.c | 2 | ||||
| -rw-r--r-- | kernel/time/tick-broadcast.c | 99 | ||||
| -rw-r--r-- | kernel/time/tick-common.c | 15 | ||||
| -rw-r--r-- | kernel/time/tick-internal.h | 11 | ||||
| -rw-r--r-- | kernel/time/tick-oneshot.c | 44 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 22 |
7 files changed, 160 insertions, 48 deletions
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 3d1e3e1a1971..f8d968063cea 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
| @@ -72,6 +72,16 @@ void clockevents_set_mode(struct clock_event_device *dev, | |||
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | /** | 74 | /** |
| 75 | * clockevents_shutdown - shutdown the device and clear next_event | ||
| 76 | * @dev: device to shutdown | ||
| 77 | */ | ||
| 78 | void clockevents_shutdown(struct clock_event_device *dev) | ||
| 79 | { | ||
| 80 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | ||
| 81 | dev->next_event.tv64 = KTIME_MAX; | ||
| 82 | } | ||
| 83 | |||
| 84 | /** | ||
| 75 | * clockevents_program_event - Reprogram the clock event device. | 85 | * clockevents_program_event - Reprogram the clock event device. |
| 76 | * @expires: absolute expiry time (monotonic clock) | 86 | * @expires: absolute expiry time (monotonic clock) |
| 77 | * | 87 | * |
| @@ -177,7 +187,7 @@ void clockevents_register_device(struct clock_event_device *dev) | |||
| 177 | /* | 187 | /* |
| 178 | * Noop handler when we shut down an event device | 188 | * Noop handler when we shut down an event device |
| 179 | */ | 189 | */ |
| 180 | static void clockevents_handle_noop(struct clock_event_device *dev) | 190 | void clockevents_handle_noop(struct clock_event_device *dev) |
| 181 | { | 191 | { |
| 182 | } | 192 | } |
| 183 | 193 | ||
| @@ -199,7 +209,6 @@ void clockevents_exchange_device(struct clock_event_device *old, | |||
| 199 | * released list and do a notify add later. | 209 | * released list and do a notify add later. |
| 200 | */ | 210 | */ |
| 201 | if (old) { | 211 | if (old) { |
| 202 | old->event_handler = clockevents_handle_noop; | ||
| 203 | clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); | 212 | clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); |
| 204 | list_del(&old->list); | 213 | list_del(&old->list); |
| 205 | list_add(&old->list, &clockevents_released); | 214 | list_add(&old->list, &clockevents_released); |
| @@ -207,7 +216,7 @@ void clockevents_exchange_device(struct clock_event_device *old, | |||
| 207 | 216 | ||
| 208 | if (new) { | 217 | if (new) { |
| 209 | BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); | 218 | BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); |
| 210 | clockevents_set_mode(new, CLOCK_EVT_MODE_SHUTDOWN); | 219 | clockevents_shutdown(new); |
| 211 | } | 220 | } |
| 212 | local_irq_restore(flags); | 221 | local_irq_restore(flags); |
| 213 | } | 222 | } |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 5125ddd8196b..1ad46f3df6e7 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
| @@ -245,7 +245,7 @@ static void sync_cmos_clock(unsigned long dummy) | |||
| 245 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) | 245 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) |
| 246 | fail = update_persistent_clock(now); | 246 | fail = update_persistent_clock(now); |
| 247 | 247 | ||
| 248 | next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec; | 248 | next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2); |
| 249 | if (next.tv_nsec <= 0) | 249 | if (next.tv_nsec <= 0) |
| 250 | next.tv_nsec += NSEC_PER_SEC; | 250 | next.tv_nsec += NSEC_PER_SEC; |
| 251 | 251 | ||
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 31463d370b94..cb01cd8f919b 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -175,6 +175,8 @@ static void tick_do_periodic_broadcast(void) | |||
| 175 | */ | 175 | */ |
| 176 | static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | 176 | static void tick_handle_periodic_broadcast(struct clock_event_device *dev) |
| 177 | { | 177 | { |
| 178 | ktime_t next; | ||
| 179 | |||
| 178 | tick_do_periodic_broadcast(); | 180 | tick_do_periodic_broadcast(); |
| 179 | 181 | ||
| 180 | /* | 182 | /* |
| @@ -185,10 +187,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
| 185 | 187 | ||
| 186 | /* | 188 | /* |
| 187 | * Setup the next period for devices, which do not have | 189 | * Setup the next period for devices, which do not have |
| 188 | * periodic mode: | 190 | * periodic mode. We read dev->next_event first and add to it |
| 191 | * when the event alrady expired. clockevents_program_event() | ||
| 192 | * sets dev->next_event only when the event is really | ||
| 193 | * programmed to the device. | ||
| 189 | */ | 194 | */ |
| 190 | for (;;) { | 195 | for (next = dev->next_event; ;) { |
| 191 | ktime_t next = ktime_add(dev->next_event, tick_period); | 196 | next = ktime_add(next, tick_period); |
| 192 | 197 | ||
| 193 | if (!clockevents_program_event(dev, next, ktime_get())) | 198 | if (!clockevents_program_event(dev, next, ktime_get())) |
| 194 | return; | 199 | return; |
| @@ -205,7 +210,7 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 205 | struct clock_event_device *bc, *dev; | 210 | struct clock_event_device *bc, *dev; |
| 206 | struct tick_device *td; | 211 | struct tick_device *td; |
| 207 | unsigned long flags, *reason = why; | 212 | unsigned long flags, *reason = why; |
| 208 | int cpu; | 213 | int cpu, bc_stopped; |
| 209 | 214 | ||
| 210 | spin_lock_irqsave(&tick_broadcast_lock, flags); | 215 | spin_lock_irqsave(&tick_broadcast_lock, flags); |
| 211 | 216 | ||
| @@ -223,14 +228,16 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 223 | if (!tick_device_is_functional(dev)) | 228 | if (!tick_device_is_functional(dev)) |
| 224 | goto out; | 229 | goto out; |
| 225 | 230 | ||
| 231 | bc_stopped = cpus_empty(tick_broadcast_mask); | ||
| 232 | |||
| 226 | switch (*reason) { | 233 | switch (*reason) { |
| 227 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: | 234 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: |
| 228 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: |
| 229 | if (!cpu_isset(cpu, tick_broadcast_mask)) { | 236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { |
| 230 | cpu_set(cpu, tick_broadcast_mask); | 237 | cpu_set(cpu, tick_broadcast_mask); |
| 231 | if (td->mode == TICKDEV_MODE_PERIODIC) | 238 | if (tick_broadcast_device.mode == |
| 232 | clockevents_set_mode(dev, | 239 | TICKDEV_MODE_PERIODIC) |
| 233 | CLOCK_EVT_MODE_SHUTDOWN); | 240 | clockevents_shutdown(dev); |
| 234 | } | 241 | } |
| 235 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) | 242 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) |
| 236 | tick_broadcast_force = 1; | 243 | tick_broadcast_force = 1; |
| @@ -239,15 +246,17 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 239 | if (!tick_broadcast_force && | 246 | if (!tick_broadcast_force && |
| 240 | cpu_isset(cpu, tick_broadcast_mask)) { | 247 | cpu_isset(cpu, tick_broadcast_mask)) { |
| 241 | cpu_clear(cpu, tick_broadcast_mask); | 248 | cpu_clear(cpu, tick_broadcast_mask); |
| 242 | if (td->mode == TICKDEV_MODE_PERIODIC) | 249 | if (tick_broadcast_device.mode == |
| 250 | TICKDEV_MODE_PERIODIC) | ||
| 243 | tick_setup_periodic(dev, 0); | 251 | tick_setup_periodic(dev, 0); |
| 244 | } | 252 | } |
| 245 | break; | 253 | break; |
| 246 | } | 254 | } |
| 247 | 255 | ||
| 248 | if (cpus_empty(tick_broadcast_mask)) | 256 | if (cpus_empty(tick_broadcast_mask)) { |
| 249 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 257 | if (!bc_stopped) |
| 250 | else { | 258 | clockevents_shutdown(bc); |
| 259 | } else if (bc_stopped) { | ||
| 251 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | 260 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
| 252 | tick_broadcast_start_periodic(bc); | 261 | tick_broadcast_start_periodic(bc); |
| 253 | else | 262 | else |
| @@ -298,7 +307,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
| 298 | 307 | ||
| 299 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | 308 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { |
| 300 | if (bc && cpus_empty(tick_broadcast_mask)) | 309 | if (bc && cpus_empty(tick_broadcast_mask)) |
| 301 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 310 | clockevents_shutdown(bc); |
| 302 | } | 311 | } |
| 303 | 312 | ||
| 304 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 313 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| @@ -313,7 +322,7 @@ void tick_suspend_broadcast(void) | |||
| 313 | 322 | ||
| 314 | bc = tick_broadcast_device.evtdev; | 323 | bc = tick_broadcast_device.evtdev; |
| 315 | if (bc) | 324 | if (bc) |
| 316 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 325 | clockevents_shutdown(bc); |
| 317 | 326 | ||
| 318 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 327 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 319 | } | 328 | } |
| @@ -364,16 +373,8 @@ cpumask_t *tick_get_broadcast_oneshot_mask(void) | |||
| 364 | static int tick_broadcast_set_event(ktime_t expires, int force) | 373 | static int tick_broadcast_set_event(ktime_t expires, int force) |
| 365 | { | 374 | { |
| 366 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | 375 | struct clock_event_device *bc = tick_broadcast_device.evtdev; |
| 367 | ktime_t now = ktime_get(); | 376 | |
| 368 | int res; | 377 | return tick_dev_program_event(bc, expires, force); |
| 369 | |||
| 370 | for(;;) { | ||
| 371 | res = clockevents_program_event(bc, expires, now); | ||
| 372 | if (!res || !force) | ||
| 373 | return res; | ||
| 374 | now = ktime_get(); | ||
| 375 | expires = ktime_add(now, ktime_set(0, bc->min_delta_ns)); | ||
| 376 | } | ||
| 377 | } | 378 | } |
| 378 | 379 | ||
| 379 | int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | 380 | int tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
| @@ -491,14 +492,52 @@ static void tick_broadcast_clear_oneshot(int cpu) | |||
| 491 | cpu_clear(cpu, tick_broadcast_oneshot_mask); | 492 | cpu_clear(cpu, tick_broadcast_oneshot_mask); |
| 492 | } | 493 | } |
| 493 | 494 | ||
| 495 | static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires) | ||
| 496 | { | ||
| 497 | struct tick_device *td; | ||
| 498 | int cpu; | ||
| 499 | |||
| 500 | for_each_cpu_mask_nr(cpu, *mask) { | ||
| 501 | td = &per_cpu(tick_cpu_device, cpu); | ||
| 502 | if (td->evtdev) | ||
| 503 | td->evtdev->next_event = expires; | ||
| 504 | } | ||
| 505 | } | ||
| 506 | |||
| 494 | /** | 507 | /** |
| 495 | * tick_broadcast_setup_oneshot - setup the broadcast device | 508 | * tick_broadcast_setup_oneshot - setup the broadcast device |
| 496 | */ | 509 | */ |
| 497 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 510 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
| 498 | { | 511 | { |
| 499 | bc->event_handler = tick_handle_oneshot_broadcast; | 512 | /* Set it up only once ! */ |
| 500 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | 513 | if (bc->event_handler != tick_handle_oneshot_broadcast) { |
| 501 | bc->next_event.tv64 = KTIME_MAX; | 514 | int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; |
| 515 | int cpu = smp_processor_id(); | ||
| 516 | cpumask_t mask; | ||
| 517 | |||
| 518 | bc->event_handler = tick_handle_oneshot_broadcast; | ||
| 519 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | ||
| 520 | |||
| 521 | /* Take the do_timer update */ | ||
| 522 | tick_do_timer_cpu = cpu; | ||
| 523 | |||
| 524 | /* | ||
| 525 | * We must be careful here. There might be other CPUs | ||
| 526 | * waiting for periodic broadcast. We need to set the | ||
| 527 | * oneshot_mask bits for those and program the | ||
| 528 | * broadcast device to fire. | ||
| 529 | */ | ||
| 530 | mask = tick_broadcast_mask; | ||
| 531 | cpu_clear(cpu, mask); | ||
| 532 | cpus_or(tick_broadcast_oneshot_mask, | ||
| 533 | tick_broadcast_oneshot_mask, mask); | ||
| 534 | |||
| 535 | if (was_periodic && !cpus_empty(mask)) { | ||
| 536 | tick_broadcast_init_next_event(&mask, tick_next_period); | ||
| 537 | tick_broadcast_set_event(tick_next_period, 1); | ||
| 538 | } else | ||
| 539 | bc->next_event.tv64 = KTIME_MAX; | ||
| 540 | } | ||
| 502 | } | 541 | } |
| 503 | 542 | ||
| 504 | /* | 543 | /* |
| @@ -538,4 +577,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
| 538 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 577 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 539 | } | 578 | } |
| 540 | 579 | ||
| 580 | /* | ||
| 581 | * Check, whether the broadcast device is in one shot mode | ||
| 582 | */ | ||
| 583 | int tick_broadcast_oneshot_active(void) | ||
| 584 | { | ||
| 585 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | ||
| 586 | } | ||
| 587 | |||
| 541 | #endif | 588 | #endif |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 80c4336f4188..df12434b43ca 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |||
| 33 | */ | 33 | */ |
| 34 | ktime_t tick_next_period; | 34 | ktime_t tick_next_period; |
| 35 | ktime_t tick_period; | 35 | ktime_t tick_period; |
| 36 | int tick_do_timer_cpu __read_mostly = -1; | 36 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
| 37 | DEFINE_SPINLOCK(tick_device_lock); | 37 | DEFINE_SPINLOCK(tick_device_lock); |
| 38 | 38 | ||
| 39 | /* | 39 | /* |
| @@ -109,7 +109,8 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) | |||
| 109 | if (!tick_device_is_functional(dev)) | 109 | if (!tick_device_is_functional(dev)) |
| 110 | return; | 110 | return; |
| 111 | 111 | ||
| 112 | if (dev->features & CLOCK_EVT_FEAT_PERIODIC) { | 112 | if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && |
| 113 | !tick_broadcast_oneshot_active()) { | ||
| 113 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); | 114 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); |
| 114 | } else { | 115 | } else { |
| 115 | unsigned long seq; | 116 | unsigned long seq; |
| @@ -148,7 +149,7 @@ static void tick_setup_device(struct tick_device *td, | |||
| 148 | * If no cpu took the do_timer update, assign it to | 149 | * If no cpu took the do_timer update, assign it to |
| 149 | * this cpu: | 150 | * this cpu: |
| 150 | */ | 151 | */ |
| 151 | if (tick_do_timer_cpu == -1) { | 152 | if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { |
| 152 | tick_do_timer_cpu = cpu; | 153 | tick_do_timer_cpu = cpu; |
| 153 | tick_next_period = ktime_get(); | 154 | tick_next_period = ktime_get(); |
| 154 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); | 155 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); |
| @@ -161,6 +162,7 @@ static void tick_setup_device(struct tick_device *td, | |||
| 161 | } else { | 162 | } else { |
| 162 | handler = td->evtdev->event_handler; | 163 | handler = td->evtdev->event_handler; |
| 163 | next_event = td->evtdev->next_event; | 164 | next_event = td->evtdev->next_event; |
| 165 | td->evtdev->event_handler = clockevents_handle_noop; | ||
| 164 | } | 166 | } |
| 165 | 167 | ||
| 166 | td->evtdev = newdev; | 168 | td->evtdev = newdev; |
| @@ -248,7 +250,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
| 248 | * not give it back to the clockevents layer ! | 250 | * not give it back to the clockevents layer ! |
| 249 | */ | 251 | */ |
| 250 | if (tick_is_broadcast_device(curdev)) { | 252 | if (tick_is_broadcast_device(curdev)) { |
| 251 | clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN); | 253 | clockevents_shutdown(curdev); |
| 252 | curdev = NULL; | 254 | curdev = NULL; |
| 253 | } | 255 | } |
| 254 | clockevents_exchange_device(curdev, newdev); | 256 | clockevents_exchange_device(curdev, newdev); |
| @@ -299,7 +301,8 @@ static void tick_shutdown(unsigned int *cpup) | |||
| 299 | if (*cpup == tick_do_timer_cpu) { | 301 | if (*cpup == tick_do_timer_cpu) { |
| 300 | int cpu = first_cpu(cpu_online_map); | 302 | int cpu = first_cpu(cpu_online_map); |
| 301 | 303 | ||
| 302 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1; | 304 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : |
| 305 | TICK_DO_TIMER_NONE; | ||
| 303 | } | 306 | } |
| 304 | spin_unlock_irqrestore(&tick_device_lock, flags); | 307 | spin_unlock_irqrestore(&tick_device_lock, flags); |
| 305 | } | 308 | } |
| @@ -310,7 +313,7 @@ static void tick_suspend(void) | |||
| 310 | unsigned long flags; | 313 | unsigned long flags; |
| 311 | 314 | ||
| 312 | spin_lock_irqsave(&tick_device_lock, flags); | 315 | spin_lock_irqsave(&tick_device_lock, flags); |
| 313 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN); | 316 | clockevents_shutdown(td->evtdev); |
| 314 | spin_unlock_irqrestore(&tick_device_lock, flags); | 317 | spin_unlock_irqrestore(&tick_device_lock, flags); |
| 315 | } | 318 | } |
| 316 | 319 | ||
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index f13f2b7f4fd4..469248782c23 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
| @@ -1,6 +1,10 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * tick internal variable and functions used by low/high res code | 2 | * tick internal variable and functions used by low/high res code |
| 3 | */ | 3 | */ |
| 4 | |||
| 5 | #define TICK_DO_TIMER_NONE -1 | ||
| 6 | #define TICK_DO_TIMER_BOOT -2 | ||
| 7 | |||
| 4 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); | 8 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); |
| 5 | extern spinlock_t tick_device_lock; | 9 | extern spinlock_t tick_device_lock; |
| 6 | extern ktime_t tick_next_period; | 10 | extern ktime_t tick_next_period; |
| @@ -10,6 +14,8 @@ extern int tick_do_timer_cpu __read_mostly; | |||
| 10 | extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); | 14 | extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); |
| 11 | extern void tick_handle_periodic(struct clock_event_device *dev); | 15 | extern void tick_handle_periodic(struct clock_event_device *dev); |
| 12 | 16 | ||
| 17 | extern void clockevents_shutdown(struct clock_event_device *dev); | ||
| 18 | |||
| 13 | /* | 19 | /* |
| 14 | * NO_HZ / high resolution timer shared code | 20 | * NO_HZ / high resolution timer shared code |
| 15 | */ | 21 | */ |
| @@ -17,6 +23,8 @@ extern void tick_handle_periodic(struct clock_event_device *dev); | |||
| 17 | extern void tick_setup_oneshot(struct clock_event_device *newdev, | 23 | extern void tick_setup_oneshot(struct clock_event_device *newdev, |
| 18 | void (*handler)(struct clock_event_device *), | 24 | void (*handler)(struct clock_event_device *), |
| 19 | ktime_t nextevt); | 25 | ktime_t nextevt); |
| 26 | extern int tick_dev_program_event(struct clock_event_device *dev, | ||
| 27 | ktime_t expires, int force); | ||
| 20 | extern int tick_program_event(ktime_t expires, int force); | 28 | extern int tick_program_event(ktime_t expires, int force); |
| 21 | extern void tick_oneshot_notify(void); | 29 | extern void tick_oneshot_notify(void); |
| 22 | extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); | 30 | extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); |
| @@ -27,6 +35,7 @@ extern void tick_broadcast_oneshot_control(unsigned long reason); | |||
| 27 | extern void tick_broadcast_switch_to_oneshot(void); | 35 | extern void tick_broadcast_switch_to_oneshot(void); |
| 28 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | 36 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); |
| 29 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 37 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
| 38 | extern int tick_broadcast_oneshot_active(void); | ||
| 30 | # else /* BROADCAST */ | 39 | # else /* BROADCAST */ |
| 31 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 40 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
| 32 | { | 41 | { |
| @@ -35,6 +44,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
| 35 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | 44 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } |
| 36 | static inline void tick_broadcast_switch_to_oneshot(void) { } | 45 | static inline void tick_broadcast_switch_to_oneshot(void) { } |
| 37 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 46 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
| 47 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
| 38 | # endif /* !BROADCAST */ | 48 | # endif /* !BROADCAST */ |
| 39 | 49 | ||
| 40 | #else /* !ONESHOT */ | 50 | #else /* !ONESHOT */ |
| @@ -64,6 +74,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
| 64 | { | 74 | { |
| 65 | return 0; | 75 | return 0; |
| 66 | } | 76 | } |
| 77 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
| 67 | #endif /* !TICK_ONESHOT */ | 78 | #endif /* !TICK_ONESHOT */ |
| 68 | 79 | ||
| 69 | /* | 80 | /* |
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index 450c04935b66..2e8de678e767 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c | |||
| @@ -23,24 +23,56 @@ | |||
| 23 | #include "tick-internal.h" | 23 | #include "tick-internal.h" |
| 24 | 24 | ||
| 25 | /** | 25 | /** |
| 26 | * tick_program_event | 26 | * tick_program_event internal worker function |
| 27 | */ | 27 | */ |
| 28 | int tick_program_event(ktime_t expires, int force) | 28 | int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires, |
| 29 | int force) | ||
| 29 | { | 30 | { |
| 30 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | ||
| 31 | ktime_t now = ktime_get(); | 31 | ktime_t now = ktime_get(); |
| 32 | int i; | ||
| 32 | 33 | ||
| 33 | while (1) { | 34 | for (i = 0;;) { |
| 34 | int ret = clockevents_program_event(dev, expires, now); | 35 | int ret = clockevents_program_event(dev, expires, now); |
| 35 | 36 | ||
| 36 | if (!ret || !force) | 37 | if (!ret || !force) |
| 37 | return ret; | 38 | return ret; |
| 39 | |||
| 40 | /* | ||
| 41 | * We tried 2 times to program the device with the given | ||
| 42 | * min_delta_ns. If that's not working then we double it | ||
| 43 | * and emit a warning. | ||
| 44 | */ | ||
| 45 | if (++i > 2) { | ||
| 46 | /* Increase the min. delta and try again */ | ||
| 47 | if (!dev->min_delta_ns) | ||
| 48 | dev->min_delta_ns = 5000; | ||
| 49 | else | ||
| 50 | dev->min_delta_ns += dev->min_delta_ns >> 1; | ||
| 51 | |||
| 52 | printk(KERN_WARNING | ||
| 53 | "CE: %s increasing min_delta_ns to %lu nsec\n", | ||
| 54 | dev->name ? dev->name : "?", | ||
| 55 | dev->min_delta_ns << 1); | ||
| 56 | |||
| 57 | i = 0; | ||
| 58 | } | ||
| 59 | |||
| 38 | now = ktime_get(); | 60 | now = ktime_get(); |
| 39 | expires = ktime_add(now, ktime_set(0, dev->min_delta_ns)); | 61 | expires = ktime_add_ns(now, dev->min_delta_ns); |
| 40 | } | 62 | } |
| 41 | } | 63 | } |
| 42 | 64 | ||
| 43 | /** | 65 | /** |
| 66 | * tick_program_event | ||
| 67 | */ | ||
| 68 | int tick_program_event(ktime_t expires, int force) | ||
| 69 | { | ||
| 70 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | ||
| 71 | |||
| 72 | return tick_dev_program_event(dev, expires, force); | ||
| 73 | } | ||
| 74 | |||
| 75 | /** | ||
| 44 | * tick_resume_onshot - resume oneshot mode | 76 | * tick_resume_onshot - resume oneshot mode |
| 45 | */ | 77 | */ |
| 46 | void tick_resume_oneshot(void) | 78 | void tick_resume_oneshot(void) |
| @@ -61,7 +93,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev, | |||
| 61 | { | 93 | { |
| 62 | newdev->event_handler = handler; | 94 | newdev->event_handler = handler; |
| 63 | clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT); | 95 | clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT); |
| 64 | clockevents_program_event(newdev, next_event, ktime_get()); | 96 | tick_dev_program_event(newdev, next_event, 1); |
| 65 | } | 97 | } |
| 66 | 98 | ||
| 67 | /** | 99 | /** |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f5da526424a9..cb02324bdb88 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -75,6 +75,9 @@ static void tick_do_update_jiffies64(ktime_t now) | |||
| 75 | incr * ticks); | 75 | incr * ticks); |
| 76 | } | 76 | } |
| 77 | do_timer(++ticks); | 77 | do_timer(++ticks); |
| 78 | |||
| 79 | /* Keep the tick_next_period variable up to date */ | ||
| 80 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | ||
| 78 | } | 81 | } |
| 79 | write_sequnlock(&xtime_lock); | 82 | write_sequnlock(&xtime_lock); |
| 80 | } | 83 | } |
| @@ -162,6 +165,8 @@ void tick_nohz_stop_idle(int cpu) | |||
| 162 | ts->idle_lastupdate = now; | 165 | ts->idle_lastupdate = now; |
| 163 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | 166 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
| 164 | ts->idle_active = 0; | 167 | ts->idle_active = 0; |
| 168 | |||
| 169 | sched_clock_idle_wakeup_event(0); | ||
| 165 | } | 170 | } |
| 166 | } | 171 | } |
| 167 | 172 | ||
| @@ -177,6 +182,7 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts) | |||
| 177 | } | 182 | } |
| 178 | ts->idle_entrytime = now; | 183 | ts->idle_entrytime = now; |
| 179 | ts->idle_active = 1; | 184 | ts->idle_active = 1; |
| 185 | sched_clock_idle_sleep_event(); | ||
| 180 | return now; | 186 | return now; |
| 181 | } | 187 | } |
| 182 | 188 | ||
| @@ -218,7 +224,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
| 218 | */ | 224 | */ |
| 219 | if (unlikely(!cpu_online(cpu))) { | 225 | if (unlikely(!cpu_online(cpu))) { |
| 220 | if (cpu == tick_do_timer_cpu) | 226 | if (cpu == tick_do_timer_cpu) |
| 221 | tick_do_timer_cpu = -1; | 227 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
| 222 | } | 228 | } |
| 223 | 229 | ||
| 224 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 230 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
| @@ -300,7 +306,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
| 300 | * invoked. | 306 | * invoked. |
| 301 | */ | 307 | */ |
| 302 | if (cpu == tick_do_timer_cpu) | 308 | if (cpu == tick_do_timer_cpu) |
| 303 | tick_do_timer_cpu = -1; | 309 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
| 304 | 310 | ||
| 305 | ts->idle_sleeps++; | 311 | ts->idle_sleeps++; |
| 306 | 312 | ||
| @@ -465,7 +471,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) | |||
| 465 | * this duty, then the jiffies update is still serialized by | 471 | * this duty, then the jiffies update is still serialized by |
| 466 | * xtime_lock. | 472 | * xtime_lock. |
| 467 | */ | 473 | */ |
| 468 | if (unlikely(tick_do_timer_cpu == -1)) | 474 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
| 469 | tick_do_timer_cpu = cpu; | 475 | tick_do_timer_cpu = cpu; |
| 470 | 476 | ||
| 471 | /* Check, if the jiffies need an update */ | 477 | /* Check, if the jiffies need an update */ |
| @@ -567,7 +573,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
| 567 | * this duty, then the jiffies update is still serialized by | 573 | * this duty, then the jiffies update is still serialized by |
| 568 | * xtime_lock. | 574 | * xtime_lock. |
| 569 | */ | 575 | */ |
| 570 | if (unlikely(tick_do_timer_cpu == -1)) | 576 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
| 571 | tick_do_timer_cpu = cpu; | 577 | tick_do_timer_cpu = cpu; |
| 572 | #endif | 578 | #endif |
| 573 | 579 | ||
| @@ -619,7 +625,7 @@ void tick_setup_sched_timer(void) | |||
| 619 | */ | 625 | */ |
| 620 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 626 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
| 621 | ts->sched_timer.function = tick_sched_timer; | 627 | ts->sched_timer.function = tick_sched_timer; |
| 622 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 628 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; |
| 623 | 629 | ||
| 624 | /* Get the next period (per cpu) */ | 630 | /* Get the next period (per cpu) */ |
| 625 | ts->sched_timer.expires = tick_init_jiffy_update(); | 631 | ts->sched_timer.expires = tick_init_jiffy_update(); |
| @@ -643,17 +649,21 @@ void tick_setup_sched_timer(void) | |||
| 643 | ts->nohz_mode = NOHZ_MODE_HIGHRES; | 649 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
| 644 | #endif | 650 | #endif |
| 645 | } | 651 | } |
| 652 | #endif /* HIGH_RES_TIMERS */ | ||
| 646 | 653 | ||
| 654 | #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS | ||
| 647 | void tick_cancel_sched_timer(int cpu) | 655 | void tick_cancel_sched_timer(int cpu) |
| 648 | { | 656 | { |
| 649 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 657 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 650 | 658 | ||
| 659 | # ifdef CONFIG_HIGH_RES_TIMERS | ||
| 651 | if (ts->sched_timer.base) | 660 | if (ts->sched_timer.base) |
| 652 | hrtimer_cancel(&ts->sched_timer); | 661 | hrtimer_cancel(&ts->sched_timer); |
| 662 | # endif | ||
| 653 | 663 | ||
| 654 | ts->nohz_mode = NOHZ_MODE_INACTIVE; | 664 | ts->nohz_mode = NOHZ_MODE_INACTIVE; |
| 655 | } | 665 | } |
| 656 | #endif /* HIGH_RES_TIMERS */ | 666 | #endif |
| 657 | 667 | ||
| 658 | /** | 668 | /** |
| 659 | * Async notification about clocksource changes | 669 | * Async notification about clocksource changes |
