diff options
Diffstat (limited to 'kernel/time/tick-broadcast.c')
| -rw-r--r-- | kernel/time/tick-broadcast.c | 99 |
1 files changed, 73 insertions, 26 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 31463d370b94..cb01cd8f919b 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -175,6 +175,8 @@ static void tick_do_periodic_broadcast(void) | |||
| 175 | */ | 175 | */ |
| 176 | static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | 176 | static void tick_handle_periodic_broadcast(struct clock_event_device *dev) |
| 177 | { | 177 | { |
| 178 | ktime_t next; | ||
| 179 | |||
| 178 | tick_do_periodic_broadcast(); | 180 | tick_do_periodic_broadcast(); |
| 179 | 181 | ||
| 180 | /* | 182 | /* |
| @@ -185,10 +187,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
| 185 | 187 | ||
| 186 | /* | 188 | /* |
| 187 | * Setup the next period for devices, which do not have | 189 | * Setup the next period for devices, which do not have |
| 188 | * periodic mode: | 190 | * periodic mode. We read dev->next_event first and add to it |
| 191 | * when the event alrady expired. clockevents_program_event() | ||
| 192 | * sets dev->next_event only when the event is really | ||
| 193 | * programmed to the device. | ||
| 189 | */ | 194 | */ |
| 190 | for (;;) { | 195 | for (next = dev->next_event; ;) { |
| 191 | ktime_t next = ktime_add(dev->next_event, tick_period); | 196 | next = ktime_add(next, tick_period); |
| 192 | 197 | ||
| 193 | if (!clockevents_program_event(dev, next, ktime_get())) | 198 | if (!clockevents_program_event(dev, next, ktime_get())) |
| 194 | return; | 199 | return; |
| @@ -205,7 +210,7 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 205 | struct clock_event_device *bc, *dev; | 210 | struct clock_event_device *bc, *dev; |
| 206 | struct tick_device *td; | 211 | struct tick_device *td; |
| 207 | unsigned long flags, *reason = why; | 212 | unsigned long flags, *reason = why; |
| 208 | int cpu; | 213 | int cpu, bc_stopped; |
| 209 | 214 | ||
| 210 | spin_lock_irqsave(&tick_broadcast_lock, flags); | 215 | spin_lock_irqsave(&tick_broadcast_lock, flags); |
| 211 | 216 | ||
| @@ -223,14 +228,16 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 223 | if (!tick_device_is_functional(dev)) | 228 | if (!tick_device_is_functional(dev)) |
| 224 | goto out; | 229 | goto out; |
| 225 | 230 | ||
| 231 | bc_stopped = cpus_empty(tick_broadcast_mask); | ||
| 232 | |||
| 226 | switch (*reason) { | 233 | switch (*reason) { |
| 227 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: | 234 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: |
| 228 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: |
| 229 | if (!cpu_isset(cpu, tick_broadcast_mask)) { | 236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { |
| 230 | cpu_set(cpu, tick_broadcast_mask); | 237 | cpu_set(cpu, tick_broadcast_mask); |
| 231 | if (td->mode == TICKDEV_MODE_PERIODIC) | 238 | if (tick_broadcast_device.mode == |
| 232 | clockevents_set_mode(dev, | 239 | TICKDEV_MODE_PERIODIC) |
| 233 | CLOCK_EVT_MODE_SHUTDOWN); | 240 | clockevents_shutdown(dev); |
| 234 | } | 241 | } |
| 235 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) | 242 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) |
| 236 | tick_broadcast_force = 1; | 243 | tick_broadcast_force = 1; |
| @@ -239,15 +246,17 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 239 | if (!tick_broadcast_force && | 246 | if (!tick_broadcast_force && |
| 240 | cpu_isset(cpu, tick_broadcast_mask)) { | 247 | cpu_isset(cpu, tick_broadcast_mask)) { |
| 241 | cpu_clear(cpu, tick_broadcast_mask); | 248 | cpu_clear(cpu, tick_broadcast_mask); |
| 242 | if (td->mode == TICKDEV_MODE_PERIODIC) | 249 | if (tick_broadcast_device.mode == |
| 250 | TICKDEV_MODE_PERIODIC) | ||
| 243 | tick_setup_periodic(dev, 0); | 251 | tick_setup_periodic(dev, 0); |
| 244 | } | 252 | } |
| 245 | break; | 253 | break; |
| 246 | } | 254 | } |
| 247 | 255 | ||
| 248 | if (cpus_empty(tick_broadcast_mask)) | 256 | if (cpus_empty(tick_broadcast_mask)) { |
| 249 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 257 | if (!bc_stopped) |
| 250 | else { | 258 | clockevents_shutdown(bc); |
| 259 | } else if (bc_stopped) { | ||
| 251 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | 260 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
| 252 | tick_broadcast_start_periodic(bc); | 261 | tick_broadcast_start_periodic(bc); |
| 253 | else | 262 | else |
| @@ -298,7 +307,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
| 298 | 307 | ||
| 299 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | 308 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { |
| 300 | if (bc && cpus_empty(tick_broadcast_mask)) | 309 | if (bc && cpus_empty(tick_broadcast_mask)) |
| 301 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 310 | clockevents_shutdown(bc); |
| 302 | } | 311 | } |
| 303 | 312 | ||
| 304 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 313 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| @@ -313,7 +322,7 @@ void tick_suspend_broadcast(void) | |||
| 313 | 322 | ||
| 314 | bc = tick_broadcast_device.evtdev; | 323 | bc = tick_broadcast_device.evtdev; |
| 315 | if (bc) | 324 | if (bc) |
| 316 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 325 | clockevents_shutdown(bc); |
| 317 | 326 | ||
| 318 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 327 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 319 | } | 328 | } |
| @@ -364,16 +373,8 @@ cpumask_t *tick_get_broadcast_oneshot_mask(void) | |||
| 364 | static int tick_broadcast_set_event(ktime_t expires, int force) | 373 | static int tick_broadcast_set_event(ktime_t expires, int force) |
| 365 | { | 374 | { |
| 366 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | 375 | struct clock_event_device *bc = tick_broadcast_device.evtdev; |
| 367 | ktime_t now = ktime_get(); | 376 | |
| 368 | int res; | 377 | return tick_dev_program_event(bc, expires, force); |
| 369 | |||
| 370 | for(;;) { | ||
| 371 | res = clockevents_program_event(bc, expires, now); | ||
| 372 | if (!res || !force) | ||
| 373 | return res; | ||
| 374 | now = ktime_get(); | ||
| 375 | expires = ktime_add(now, ktime_set(0, bc->min_delta_ns)); | ||
| 376 | } | ||
| 377 | } | 378 | } |
| 378 | 379 | ||
| 379 | int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | 380 | int tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
| @@ -491,14 +492,52 @@ static void tick_broadcast_clear_oneshot(int cpu) | |||
| 491 | cpu_clear(cpu, tick_broadcast_oneshot_mask); | 492 | cpu_clear(cpu, tick_broadcast_oneshot_mask); |
| 492 | } | 493 | } |
| 493 | 494 | ||
| 495 | static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires) | ||
| 496 | { | ||
| 497 | struct tick_device *td; | ||
| 498 | int cpu; | ||
| 499 | |||
| 500 | for_each_cpu_mask_nr(cpu, *mask) { | ||
| 501 | td = &per_cpu(tick_cpu_device, cpu); | ||
| 502 | if (td->evtdev) | ||
| 503 | td->evtdev->next_event = expires; | ||
| 504 | } | ||
| 505 | } | ||
| 506 | |||
| 494 | /** | 507 | /** |
| 495 | * tick_broadcast_setup_oneshot - setup the broadcast device | 508 | * tick_broadcast_setup_oneshot - setup the broadcast device |
| 496 | */ | 509 | */ |
| 497 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 510 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
| 498 | { | 511 | { |
| 499 | bc->event_handler = tick_handle_oneshot_broadcast; | 512 | /* Set it up only once ! */ |
| 500 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | 513 | if (bc->event_handler != tick_handle_oneshot_broadcast) { |
| 501 | bc->next_event.tv64 = KTIME_MAX; | 514 | int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; |
| 515 | int cpu = smp_processor_id(); | ||
| 516 | cpumask_t mask; | ||
| 517 | |||
| 518 | bc->event_handler = tick_handle_oneshot_broadcast; | ||
| 519 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | ||
| 520 | |||
| 521 | /* Take the do_timer update */ | ||
| 522 | tick_do_timer_cpu = cpu; | ||
| 523 | |||
| 524 | /* | ||
| 525 | * We must be careful here. There might be other CPUs | ||
| 526 | * waiting for periodic broadcast. We need to set the | ||
| 527 | * oneshot_mask bits for those and program the | ||
| 528 | * broadcast device to fire. | ||
| 529 | */ | ||
| 530 | mask = tick_broadcast_mask; | ||
| 531 | cpu_clear(cpu, mask); | ||
| 532 | cpus_or(tick_broadcast_oneshot_mask, | ||
| 533 | tick_broadcast_oneshot_mask, mask); | ||
| 534 | |||
| 535 | if (was_periodic && !cpus_empty(mask)) { | ||
| 536 | tick_broadcast_init_next_event(&mask, tick_next_period); | ||
| 537 | tick_broadcast_set_event(tick_next_period, 1); | ||
| 538 | } else | ||
| 539 | bc->next_event.tv64 = KTIME_MAX; | ||
| 540 | } | ||
| 502 | } | 541 | } |
| 503 | 542 | ||
| 504 | /* | 543 | /* |
| @@ -538,4 +577,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
| 538 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 577 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 539 | } | 578 | } |
| 540 | 579 | ||
| 580 | /* | ||
| 581 | * Check, whether the broadcast device is in one shot mode | ||
| 582 | */ | ||
| 583 | int tick_broadcast_oneshot_active(void) | ||
| 584 | { | ||
| 585 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | ||
| 586 | } | ||
| 587 | |||
| 541 | #endif | 588 | #endif |
