diff options
Diffstat (limited to 'kernel/time/tick-broadcast.c')
| -rw-r--r-- | kernel/time/tick-broadcast.c | 85 |
1 files changed, 74 insertions, 11 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 98977a57ac72..64c5990fd500 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -120,6 +120,19 @@ int tick_is_broadcast_device(struct clock_event_device *dev) | |||
| 120 | return (dev && tick_broadcast_device.evtdev == dev); | 120 | return (dev && tick_broadcast_device.evtdev == dev); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq) | ||
| 124 | { | ||
| 125 | int ret = -ENODEV; | ||
| 126 | |||
| 127 | if (tick_is_broadcast_device(dev)) { | ||
| 128 | raw_spin_lock(&tick_broadcast_lock); | ||
| 129 | ret = __clockevents_update_freq(dev, freq); | ||
| 130 | raw_spin_unlock(&tick_broadcast_lock); | ||
| 131 | } | ||
| 132 | return ret; | ||
| 133 | } | ||
| 134 | |||
| 135 | |||
| 123 | static void err_broadcast(const struct cpumask *mask) | 136 | static void err_broadcast(const struct cpumask *mask) |
| 124 | { | 137 | { |
| 125 | pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n"); | 138 | pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n"); |
| @@ -272,12 +285,8 @@ static void tick_do_broadcast(struct cpumask *mask) | |||
| 272 | */ | 285 | */ |
| 273 | static void tick_do_periodic_broadcast(void) | 286 | static void tick_do_periodic_broadcast(void) |
| 274 | { | 287 | { |
| 275 | raw_spin_lock(&tick_broadcast_lock); | ||
| 276 | |||
| 277 | cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); | 288 | cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); |
| 278 | tick_do_broadcast(tmpmask); | 289 | tick_do_broadcast(tmpmask); |
| 279 | |||
| 280 | raw_spin_unlock(&tick_broadcast_lock); | ||
| 281 | } | 290 | } |
| 282 | 291 | ||
| 283 | /* | 292 | /* |
| @@ -287,13 +296,15 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
| 287 | { | 296 | { |
| 288 | ktime_t next; | 297 | ktime_t next; |
| 289 | 298 | ||
| 299 | raw_spin_lock(&tick_broadcast_lock); | ||
| 300 | |||
| 290 | tick_do_periodic_broadcast(); | 301 | tick_do_periodic_broadcast(); |
| 291 | 302 | ||
| 292 | /* | 303 | /* |
| 293 | * The device is in periodic mode. No reprogramming necessary: | 304 | * The device is in periodic mode. No reprogramming necessary: |
| 294 | */ | 305 | */ |
| 295 | if (dev->mode == CLOCK_EVT_MODE_PERIODIC) | 306 | if (dev->mode == CLOCK_EVT_MODE_PERIODIC) |
| 296 | return; | 307 | goto unlock; |
| 297 | 308 | ||
| 298 | /* | 309 | /* |
| 299 | * Setup the next period for devices, which do not have | 310 | * Setup the next period for devices, which do not have |
| @@ -306,9 +317,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
| 306 | next = ktime_add(next, tick_period); | 317 | next = ktime_add(next, tick_period); |
| 307 | 318 | ||
| 308 | if (!clockevents_program_event(dev, next, false)) | 319 | if (!clockevents_program_event(dev, next, false)) |
| 309 | return; | 320 | goto unlock; |
| 310 | tick_do_periodic_broadcast(); | 321 | tick_do_periodic_broadcast(); |
| 311 | } | 322 | } |
| 323 | unlock: | ||
| 324 | raw_spin_unlock(&tick_broadcast_lock); | ||
| 312 | } | 325 | } |
| 313 | 326 | ||
| 314 | /* | 327 | /* |
| @@ -630,24 +643,61 @@ again: | |||
| 630 | raw_spin_unlock(&tick_broadcast_lock); | 643 | raw_spin_unlock(&tick_broadcast_lock); |
| 631 | } | 644 | } |
| 632 | 645 | ||
| 646 | static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu) | ||
| 647 | { | ||
| 648 | if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER)) | ||
| 649 | return 0; | ||
| 650 | if (bc->next_event.tv64 == KTIME_MAX) | ||
| 651 | return 0; | ||
| 652 | return bc->bound_on == cpu ? -EBUSY : 0; | ||
| 653 | } | ||
| 654 | |||
| 655 | static void broadcast_shutdown_local(struct clock_event_device *bc, | ||
| 656 | struct clock_event_device *dev) | ||
| 657 | { | ||
| 658 | /* | ||
| 659 | * For hrtimer based broadcasting we cannot shutdown the cpu | ||
| 660 | * local device if our own event is the first one to expire or | ||
| 661 | * if we own the broadcast timer. | ||
| 662 | */ | ||
| 663 | if (bc->features & CLOCK_EVT_FEAT_HRTIMER) { | ||
| 664 | if (broadcast_needs_cpu(bc, smp_processor_id())) | ||
| 665 | return; | ||
| 666 | if (dev->next_event.tv64 < bc->next_event.tv64) | ||
| 667 | return; | ||
| 668 | } | ||
| 669 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | ||
| 670 | } | ||
| 671 | |||
| 672 | static void broadcast_move_bc(int deadcpu) | ||
| 673 | { | ||
| 674 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | ||
| 675 | |||
| 676 | if (!bc || !broadcast_needs_cpu(bc, deadcpu)) | ||
| 677 | return; | ||
| 678 | /* This moves the broadcast assignment to this cpu */ | ||
| 679 | clockevents_program_event(bc, bc->next_event, 1); | ||
| 680 | } | ||
| 681 | |||
| 633 | /* | 682 | /* |
| 634 | * Powerstate information: The system enters/leaves a state, where | 683 | * Powerstate information: The system enters/leaves a state, where |
| 635 | * affected devices might stop | 684 | * affected devices might stop |
| 685 | * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups. | ||
| 636 | */ | 686 | */ |
| 637 | void tick_broadcast_oneshot_control(unsigned long reason) | 687 | int tick_broadcast_oneshot_control(unsigned long reason) |
| 638 | { | 688 | { |
| 639 | struct clock_event_device *bc, *dev; | 689 | struct clock_event_device *bc, *dev; |
| 640 | struct tick_device *td; | 690 | struct tick_device *td; |
| 641 | unsigned long flags; | 691 | unsigned long flags; |
| 642 | ktime_t now; | 692 | ktime_t now; |
| 643 | int cpu; | 693 | int cpu, ret = 0; |
| 644 | 694 | ||
| 645 | /* | 695 | /* |
| 646 | * Periodic mode does not care about the enter/exit of power | 696 | * Periodic mode does not care about the enter/exit of power |
| 647 | * states | 697 | * states |
| 648 | */ | 698 | */ |
| 649 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | 699 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
| 650 | return; | 700 | return 0; |
| 651 | 701 | ||
| 652 | /* | 702 | /* |
| 653 | * We are called with preemtion disabled from the depth of the | 703 | * We are called with preemtion disabled from the depth of the |
| @@ -658,7 +708,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 658 | dev = td->evtdev; | 708 | dev = td->evtdev; |
| 659 | 709 | ||
| 660 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) | 710 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
| 661 | return; | 711 | return 0; |
| 662 | 712 | ||
| 663 | bc = tick_broadcast_device.evtdev; | 713 | bc = tick_broadcast_device.evtdev; |
| 664 | 714 | ||
| @@ -666,7 +716,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 666 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { | 716 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { |
| 667 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { | 717 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { |
| 668 | WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); | 718 | WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); |
| 669 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | 719 | broadcast_shutdown_local(bc, dev); |
| 670 | /* | 720 | /* |
| 671 | * We only reprogram the broadcast timer if we | 721 | * We only reprogram the broadcast timer if we |
| 672 | * did not mark ourself in the force mask and | 722 | * did not mark ourself in the force mask and |
| @@ -679,6 +729,16 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 679 | dev->next_event.tv64 < bc->next_event.tv64) | 729 | dev->next_event.tv64 < bc->next_event.tv64) |
| 680 | tick_broadcast_set_event(bc, cpu, dev->next_event, 1); | 730 | tick_broadcast_set_event(bc, cpu, dev->next_event, 1); |
| 681 | } | 731 | } |
| 732 | /* | ||
| 733 | * If the current CPU owns the hrtimer broadcast | ||
| 734 | * mechanism, it cannot go deep idle and we remove the | ||
| 735 | * CPU from the broadcast mask. We don't have to go | ||
| 736 | * through the EXIT path as the local timer is not | ||
| 737 | * shutdown. | ||
| 738 | */ | ||
| 739 | ret = broadcast_needs_cpu(bc, cpu); | ||
| 740 | if (ret) | ||
| 741 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); | ||
| 682 | } else { | 742 | } else { |
| 683 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { | 743 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { |
| 684 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); | 744 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); |
| @@ -746,6 +806,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 746 | } | 806 | } |
| 747 | out: | 807 | out: |
| 748 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 808 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 809 | return ret; | ||
| 749 | } | 810 | } |
| 750 | 811 | ||
| 751 | /* | 812 | /* |
| @@ -852,6 +913,8 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
| 852 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); | 913 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); |
| 853 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); | 914 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); |
| 854 | 915 | ||
| 916 | broadcast_move_bc(cpu); | ||
| 917 | |||
| 855 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 918 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 856 | } | 919 | } |
| 857 | 920 | ||
