diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-12-18 12:05:58 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-12-18 12:05:58 -0500 |
commit | cdc6f27d9e3c2f7ca1a3e19c6eabb1ad6a2add5d (patch) | |
tree | d557b594cd5d180e1ce91cac25a29d81a42499a5 /kernel/time | |
parent | bd87f1f028ddaad45d4a9a3621dfe688c840ba41 (diff) |
clockevents: fix reprogramming decision in oneshot broadcast
Resolve the following regression of a choppy, almost unusable laptop:
http://lkml.org/lkml/2007/12/7/299
http://bugzilla.kernel.org/show_bug.cgi?id=9525
A previous version of the code did the reprogramming of the broadcast
device in the return from idle code. This was removed, but the logic in
tick_handle_oneshot_broadcast() was kept the same.
When a broadcast interrupt happens we signal the expiry to all CPUs
which have an expired event. If none of the CPUs has an expired event,
which can happen in dyntick mode, then we reprogram the broadcast
device. We do not reprogram otherwise, but this is only correct if all
CPUs, which are in the idle broadcast state have been woken up.
The code ignores, that there might be pending not yet expired events on
other CPUs, which are in the idle broadcast state. So the delivery of
those events can be delayed for quite a time.
Change the tick_handle_oneshot_broadcast() function to check for CPUs,
which are in broadcast state and are not woken up by the current event,
and enforce the rearming of the broadcast device for those CPUs.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/tick-broadcast.c | 56 |
1 files changed, 21 insertions, 35 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index aa82d7bf478a..5b86698faa0b 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -384,45 +384,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
384 | } | 384 | } |
385 | 385 | ||
386 | /* | 386 | /* |
387 | * Reprogram the broadcast device: | ||
388 | * | ||
389 | * Called with tick_broadcast_lock held and interrupts disabled. | ||
390 | */ | ||
391 | static int tick_broadcast_reprogram(void) | ||
392 | { | ||
393 | ktime_t expires = { .tv64 = KTIME_MAX }; | ||
394 | struct tick_device *td; | ||
395 | int cpu; | ||
396 | |||
397 | /* | ||
398 | * Find the event which expires next: | ||
399 | */ | ||
400 | for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; | ||
401 | cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { | ||
402 | td = &per_cpu(tick_cpu_device, cpu); | ||
403 | if (td->evtdev->next_event.tv64 < expires.tv64) | ||
404 | expires = td->evtdev->next_event; | ||
405 | } | ||
406 | |||
407 | if (expires.tv64 == KTIME_MAX) | ||
408 | return 0; | ||
409 | |||
410 | return tick_broadcast_set_event(expires, 0); | ||
411 | } | ||
412 | |||
413 | /* | ||
414 | * Handle oneshot mode broadcasting | 387 | * Handle oneshot mode broadcasting |
415 | */ | 388 | */ |
416 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) | 389 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) |
417 | { | 390 | { |
418 | struct tick_device *td; | 391 | struct tick_device *td; |
419 | cpumask_t mask; | 392 | cpumask_t mask; |
420 | ktime_t now; | 393 | ktime_t now, next_event; |
421 | int cpu; | 394 | int cpu; |
422 | 395 | ||
423 | spin_lock(&tick_broadcast_lock); | 396 | spin_lock(&tick_broadcast_lock); |
424 | again: | 397 | again: |
425 | dev->next_event.tv64 = KTIME_MAX; | 398 | dev->next_event.tv64 = KTIME_MAX; |
399 | next_event.tv64 = KTIME_MAX; | ||
426 | mask = CPU_MASK_NONE; | 400 | mask = CPU_MASK_NONE; |
427 | now = ktime_get(); | 401 | now = ktime_get(); |
428 | /* Find all expired events */ | 402 | /* Find all expired events */ |
@@ -431,19 +405,31 @@ again: | |||
431 | td = &per_cpu(tick_cpu_device, cpu); | 405 | td = &per_cpu(tick_cpu_device, cpu); |
432 | if (td->evtdev->next_event.tv64 <= now.tv64) | 406 | if (td->evtdev->next_event.tv64 <= now.tv64) |
433 | cpu_set(cpu, mask); | 407 | cpu_set(cpu, mask); |
408 | else if (td->evtdev->next_event.tv64 < next_event.tv64) | ||
409 | next_event.tv64 = td->evtdev->next_event.tv64; | ||
434 | } | 410 | } |
435 | 411 | ||
436 | /* | 412 | /* |
437 | * Wakeup the cpus which have an expired event. The broadcast | 413 | * Wakeup the cpus which have an expired event. |
438 | * device is reprogrammed in the return from idle code. | 414 | */ |
415 | tick_do_broadcast(mask); | ||
416 | |||
417 | /* | ||
418 | * Two reasons for reprogram: | ||
419 | * | ||
420 | * - The global event did not expire any CPU local | ||
421 | * events. This happens in dyntick mode, as the maximum PIT | ||
422 | * delta is quite small. | ||
423 | * | ||
424 | * - There are pending events on sleeping CPUs which were not | ||
425 | * in the event mask | ||
439 | */ | 426 | */ |
440 | if (!tick_do_broadcast(mask)) { | 427 | if (next_event.tv64 != KTIME_MAX) { |
441 | /* | 428 | /* |
442 | * The global event did not expire any CPU local | 429 | * Rearm the broadcast device. If event expired, |
443 | * events. This happens in dyntick mode, as the | 430 | * repeat the above |
444 | * maximum PIT delta is quite small. | ||
445 | */ | 431 | */ |
446 | if (tick_broadcast_reprogram()) | 432 | if (tick_broadcast_set_event(next_event, 0)) |
447 | goto again; | 433 | goto again; |
448 | } | 434 | } |
449 | spin_unlock(&tick_broadcast_lock); | 435 | spin_unlock(&tick_broadcast_lock); |