diff options
Diffstat (limited to 'kernel/time/tick-broadcast.c')
| -rw-r--r-- | kernel/time/tick-broadcast.c | 129 |
1 files changed, 107 insertions, 22 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 20d6fba70652..218bcb565fed 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/profile.h> | 19 | #include <linux/profile.h> |
| 20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
| 21 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
| 22 | #include <linux/module.h> | ||
| 22 | 23 | ||
| 23 | #include "tick-internal.h" | 24 | #include "tick-internal.h" |
| 24 | 25 | ||
| @@ -29,6 +30,7 @@ | |||
| 29 | 30 | ||
| 30 | static struct tick_device tick_broadcast_device; | 31 | static struct tick_device tick_broadcast_device; |
| 31 | static cpumask_var_t tick_broadcast_mask; | 32 | static cpumask_var_t tick_broadcast_mask; |
| 33 | static cpumask_var_t tick_broadcast_on; | ||
| 32 | static cpumask_var_t tmpmask; | 34 | static cpumask_var_t tmpmask; |
| 33 | static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); | 35 | static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); |
| 34 | static int tick_broadcast_force; | 36 | static int tick_broadcast_force; |
| @@ -64,17 +66,34 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc) | |||
| 64 | /* | 66 | /* |
| 65 | * Check, if the device can be utilized as broadcast device: | 67 | * Check, if the device can be utilized as broadcast device: |
| 66 | */ | 68 | */ |
| 67 | int tick_check_broadcast_device(struct clock_event_device *dev) | 69 | static bool tick_check_broadcast_device(struct clock_event_device *curdev, |
| 70 | struct clock_event_device *newdev) | ||
| 71 | { | ||
| 72 | if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) || | ||
| 73 | (newdev->features & CLOCK_EVT_FEAT_C3STOP)) | ||
| 74 | return false; | ||
| 75 | |||
| 76 | if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT && | ||
| 77 | !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) | ||
| 78 | return false; | ||
| 79 | |||
| 80 | return !curdev || newdev->rating > curdev->rating; | ||
| 81 | } | ||
| 82 | |||
| 83 | /* | ||
| 84 | * Conditionally install/replace broadcast device | ||
| 85 | */ | ||
| 86 | void tick_install_broadcast_device(struct clock_event_device *dev) | ||
| 68 | { | 87 | { |
| 69 | struct clock_event_device *cur = tick_broadcast_device.evtdev; | 88 | struct clock_event_device *cur = tick_broadcast_device.evtdev; |
| 70 | 89 | ||
| 71 | if ((dev->features & CLOCK_EVT_FEAT_DUMMY) || | 90 | if (!tick_check_broadcast_device(cur, dev)) |
| 72 | (tick_broadcast_device.evtdev && | 91 | return; |
| 73 | tick_broadcast_device.evtdev->rating >= dev->rating) || | 92 | |
| 74 | (dev->features & CLOCK_EVT_FEAT_C3STOP)) | 93 | if (!try_module_get(dev->owner)) |
| 75 | return 0; | 94 | return; |
| 76 | 95 | ||
| 77 | clockevents_exchange_device(tick_broadcast_device.evtdev, dev); | 96 | clockevents_exchange_device(cur, dev); |
| 78 | if (cur) | 97 | if (cur) |
| 79 | cur->event_handler = clockevents_handle_noop; | 98 | cur->event_handler = clockevents_handle_noop; |
| 80 | tick_broadcast_device.evtdev = dev; | 99 | tick_broadcast_device.evtdev = dev; |
| @@ -90,7 +109,6 @@ int tick_check_broadcast_device(struct clock_event_device *dev) | |||
| 90 | */ | 109 | */ |
| 91 | if (dev->features & CLOCK_EVT_FEAT_ONESHOT) | 110 | if (dev->features & CLOCK_EVT_FEAT_ONESHOT) |
| 92 | tick_clock_notify(); | 111 | tick_clock_notify(); |
| 93 | return 1; | ||
| 94 | } | 112 | } |
| 95 | 113 | ||
| 96 | /* | 114 | /* |
| @@ -123,8 +141,9 @@ static void tick_device_setup_broadcast_func(struct clock_event_device *dev) | |||
| 123 | */ | 141 | */ |
| 124 | int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | 142 | int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) |
| 125 | { | 143 | { |
| 144 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | ||
| 126 | unsigned long flags; | 145 | unsigned long flags; |
| 127 | int ret = 0; | 146 | int ret; |
| 128 | 147 | ||
| 129 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | 148 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
| 130 | 149 | ||
| @@ -138,20 +157,62 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | |||
| 138 | dev->event_handler = tick_handle_periodic; | 157 | dev->event_handler = tick_handle_periodic; |
| 139 | tick_device_setup_broadcast_func(dev); | 158 | tick_device_setup_broadcast_func(dev); |
| 140 | cpumask_set_cpu(cpu, tick_broadcast_mask); | 159 | cpumask_set_cpu(cpu, tick_broadcast_mask); |
| 141 | tick_broadcast_start_periodic(tick_broadcast_device.evtdev); | 160 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
| 161 | tick_broadcast_start_periodic(bc); | ||
| 162 | else | ||
| 163 | tick_broadcast_setup_oneshot(bc); | ||
| 142 | ret = 1; | 164 | ret = 1; |
| 143 | } else { | 165 | } else { |
| 144 | /* | 166 | /* |
| 145 | * When the new device is not affected by the stop | 167 | * Clear the broadcast bit for this cpu if the |
| 146 | * feature and the cpu is marked in the broadcast mask | 168 | * device is not power state affected. |
| 147 | * then clear the broadcast bit. | ||
| 148 | */ | 169 | */ |
| 149 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { | 170 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
| 150 | int cpu = smp_processor_id(); | ||
| 151 | cpumask_clear_cpu(cpu, tick_broadcast_mask); | 171 | cpumask_clear_cpu(cpu, tick_broadcast_mask); |
| 152 | tick_broadcast_clear_oneshot(cpu); | 172 | else |
| 153 | } else { | ||
| 154 | tick_device_setup_broadcast_func(dev); | 173 | tick_device_setup_broadcast_func(dev); |
| 174 | |||
| 175 | /* | ||
| 176 | * Clear the broadcast bit if the CPU is not in | ||
| 177 | * periodic broadcast on state. | ||
| 178 | */ | ||
| 179 | if (!cpumask_test_cpu(cpu, tick_broadcast_on)) | ||
| 180 | cpumask_clear_cpu(cpu, tick_broadcast_mask); | ||
| 181 | |||
| 182 | switch (tick_broadcast_device.mode) { | ||
| 183 | case TICKDEV_MODE_ONESHOT: | ||
| 184 | /* | ||
| 185 | * If the system is in oneshot mode we can | ||
| 186 | * unconditionally clear the oneshot mask bit, | ||
| 187 | * because the CPU is running and therefore | ||
| 188 | * not in an idle state which causes the power | ||
| 189 | * state affected device to stop. Let the | ||
| 190 | * caller initialize the device. | ||
| 191 | */ | ||
| 192 | tick_broadcast_clear_oneshot(cpu); | ||
| 193 | ret = 0; | ||
| 194 | break; | ||
| 195 | |||
| 196 | case TICKDEV_MODE_PERIODIC: | ||
| 197 | /* | ||
| 198 | * If the system is in periodic mode, check | ||
| 199 | * whether the broadcast device can be | ||
| 200 | * switched off now. | ||
| 201 | */ | ||
| 202 | if (cpumask_empty(tick_broadcast_mask) && bc) | ||
| 203 | clockevents_shutdown(bc); | ||
| 204 | /* | ||
| 205 | * If we kept the cpu in the broadcast mask, | ||
| 206 | * tell the caller to leave the per cpu device | ||
| 207 | * in shutdown state. The periodic interrupt | ||
| 208 | * is delivered by the broadcast device. | ||
| 209 | */ | ||
| 210 | ret = cpumask_test_cpu(cpu, tick_broadcast_mask); | ||
| 211 | break; | ||
| 212 | default: | ||
| 213 | /* Nothing to do */ | ||
| 214 | ret = 0; | ||
| 215 | break; | ||
| 155 | } | 216 | } |
| 156 | } | 217 | } |
| 157 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 218 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| @@ -281,6 +342,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason) | |||
| 281 | switch (*reason) { | 342 | switch (*reason) { |
| 282 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: | 343 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: |
| 283 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 344 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: |
| 345 | cpumask_set_cpu(cpu, tick_broadcast_on); | ||
| 284 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { | 346 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { |
| 285 | if (tick_broadcast_device.mode == | 347 | if (tick_broadcast_device.mode == |
| 286 | TICKDEV_MODE_PERIODIC) | 348 | TICKDEV_MODE_PERIODIC) |
| @@ -290,8 +352,12 @@ static void tick_do_broadcast_on_off(unsigned long *reason) | |||
| 290 | tick_broadcast_force = 1; | 352 | tick_broadcast_force = 1; |
| 291 | break; | 353 | break; |
| 292 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: | 354 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: |
| 293 | if (!tick_broadcast_force && | 355 | if (tick_broadcast_force) |
| 294 | cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { | 356 | break; |
| 357 | cpumask_clear_cpu(cpu, tick_broadcast_on); | ||
| 358 | if (!tick_device_is_functional(dev)) | ||
| 359 | break; | ||
| 360 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { | ||
| 295 | if (tick_broadcast_device.mode == | 361 | if (tick_broadcast_device.mode == |
| 296 | TICKDEV_MODE_PERIODIC) | 362 | TICKDEV_MODE_PERIODIC) |
| 297 | tick_setup_periodic(dev, 0); | 363 | tick_setup_periodic(dev, 0); |
| @@ -349,6 +415,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
| 349 | 415 | ||
| 350 | bc = tick_broadcast_device.evtdev; | 416 | bc = tick_broadcast_device.evtdev; |
| 351 | cpumask_clear_cpu(cpu, tick_broadcast_mask); | 417 | cpumask_clear_cpu(cpu, tick_broadcast_mask); |
| 418 | cpumask_clear_cpu(cpu, tick_broadcast_on); | ||
| 352 | 419 | ||
| 353 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | 420 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { |
| 354 | if (bc && cpumask_empty(tick_broadcast_mask)) | 421 | if (bc && cpumask_empty(tick_broadcast_mask)) |
| @@ -475,7 +542,15 @@ void tick_check_oneshot_broadcast(int cpu) | |||
| 475 | if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) { | 542 | if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) { |
| 476 | struct tick_device *td = &per_cpu(tick_cpu_device, cpu); | 543 | struct tick_device *td = &per_cpu(tick_cpu_device, cpu); |
| 477 | 544 | ||
| 478 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); | 545 | /* |
| 546 | * We might be in the middle of switching over from | ||
| 547 | * periodic to oneshot. If the CPU has not yet | ||
| 548 | * switched over, leave the device alone. | ||
| 549 | */ | ||
| 550 | if (td->mode == TICKDEV_MODE_ONESHOT) { | ||
| 551 | clockevents_set_mode(td->evtdev, | ||
| 552 | CLOCK_EVT_MODE_ONESHOT); | ||
| 553 | } | ||
| 479 | } | 554 | } |
| 480 | } | 555 | } |
| 481 | 556 | ||
| @@ -522,6 +597,13 @@ again: | |||
| 522 | cpumask_clear(tick_broadcast_force_mask); | 597 | cpumask_clear(tick_broadcast_force_mask); |
| 523 | 598 | ||
| 524 | /* | 599 | /* |
| 600 | * Sanity check. Catch the case where we try to broadcast to | ||
| 601 | * offline cpus. | ||
| 602 | */ | ||
| 603 | if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask))) | ||
| 604 | cpumask_and(tmpmask, tmpmask, cpu_online_mask); | ||
| 605 | |||
| 606 | /* | ||
| 525 | * Wakeup the cpus which have an expired event. | 607 | * Wakeup the cpus which have an expired event. |
| 526 | */ | 608 | */ |
| 527 | tick_do_broadcast(tmpmask); | 609 | tick_do_broadcast(tmpmask); |
| @@ -761,10 +843,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
| 761 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | 843 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
| 762 | 844 | ||
| 763 | /* | 845 | /* |
| 764 | * Clear the broadcast mask flag for the dead cpu, but do not | 846 | * Clear the broadcast masks for the dead cpu, but do not stop |
| 765 | * stop the broadcast device! | 847 | * the broadcast device! |
| 766 | */ | 848 | */ |
| 767 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); | 849 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
| 850 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); | ||
| 851 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); | ||
| 768 | 852 | ||
| 769 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 853 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 770 | } | 854 | } |
| @@ -792,6 +876,7 @@ bool tick_broadcast_oneshot_available(void) | |||
| 792 | void __init tick_broadcast_init(void) | 876 | void __init tick_broadcast_init(void) |
| 793 | { | 877 | { |
| 794 | zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); | 878 | zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); |
| 879 | zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT); | ||
| 795 | zalloc_cpumask_var(&tmpmask, GFP_NOWAIT); | 880 | zalloc_cpumask_var(&tmpmask, GFP_NOWAIT); |
| 796 | #ifdef CONFIG_TICK_ONESHOT | 881 | #ifdef CONFIG_TICK_ONESHOT |
| 797 | zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); | 882 | zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); |
