summaryrefslogtreecommitdiffstats
path: root/kernel/time/tick-common.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/tick-common.c')
-rw-r--r--kernel/time/tick-common.c197
1 files changed, 80 insertions, 117 deletions
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 5d3fb100bc06..64522ecdfe0e 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -18,6 +18,7 @@
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19#include <linux/profile.h> 19#include <linux/profile.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/module.h>
21 22
22#include <asm/irq_regs.h> 23#include <asm/irq_regs.h>
23 24
@@ -33,7 +34,6 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
33ktime_t tick_next_period; 34ktime_t tick_next_period;
34ktime_t tick_period; 35ktime_t tick_period;
35int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; 36int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
36static DEFINE_RAW_SPINLOCK(tick_device_lock);
37 37
38/* 38/*
39 * Debugging: see timer_list.c 39 * Debugging: see timer_list.c
@@ -194,7 +194,8 @@ static void tick_setup_device(struct tick_device *td,
194 * When global broadcasting is active, check if the current 194 * When global broadcasting is active, check if the current
195 * device is registered as a placeholder for broadcast mode. 195 * device is registered as a placeholder for broadcast mode.
196 * This allows us to handle this x86 misfeature in a generic 196 * This allows us to handle this x86 misfeature in a generic
197 * way. 197 * way. This function also returns !=0 when we keep the
198 * current active broadcast state for this CPU.
198 */ 199 */
199 if (tick_device_uses_broadcast(newdev, cpu)) 200 if (tick_device_uses_broadcast(newdev, cpu))
200 return; 201 return;
@@ -205,17 +206,75 @@ static void tick_setup_device(struct tick_device *td,
205 tick_setup_oneshot(newdev, handler, next_event); 206 tick_setup_oneshot(newdev, handler, next_event);
206} 207}
207 208
209void tick_install_replacement(struct clock_event_device *newdev)
210{
211 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
212 int cpu = smp_processor_id();
213
214 clockevents_exchange_device(td->evtdev, newdev);
215 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
216 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
217 tick_oneshot_notify();
218}
219
220static bool tick_check_percpu(struct clock_event_device *curdev,
221 struct clock_event_device *newdev, int cpu)
222{
223 if (!cpumask_test_cpu(cpu, newdev->cpumask))
224 return false;
225 if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
226 return true;
227 /* Check if irq affinity can be set */
228 if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
229 return false;
230 /* Prefer an existing cpu local device */
231 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
232 return false;
233 return true;
234}
235
236static bool tick_check_preferred(struct clock_event_device *curdev,
237 struct clock_event_device *newdev)
238{
239 /* Prefer oneshot capable device */
240 if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
241 if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
242 return false;
243 if (tick_oneshot_mode_active())
244 return false;
245 }
246
247 /*
248 * Use the higher rated one, but prefer a CPU local device with a lower
249 * rating than a non-CPU local device
250 */
251 return !curdev ||
252 newdev->rating > curdev->rating ||
253 !cpumask_equal(curdev->cpumask, newdev->cpumask);
254}
255
256/*
257 * Check whether the new device is a better fit than curdev. curdev
258 * can be NULL !
259 */
260bool tick_check_replacement(struct clock_event_device *curdev,
261 struct clock_event_device *newdev)
262{
263 if (tick_check_percpu(curdev, newdev, smp_processor_id()))
264 return false;
265
266 return tick_check_preferred(curdev, newdev);
267}
268
208/* 269/*
209 * Check, if the new registered device should be used. 270 * Check, if the new registered device should be used. Called with
271 * clockevents_lock held and interrupts disabled.
210 */ 272 */
211static int tick_check_new_device(struct clock_event_device *newdev) 273void tick_check_new_device(struct clock_event_device *newdev)
212{ 274{
213 struct clock_event_device *curdev; 275 struct clock_event_device *curdev;
214 struct tick_device *td; 276 struct tick_device *td;
215 int cpu, ret = NOTIFY_OK; 277 int cpu;
216 unsigned long flags;
217
218 raw_spin_lock_irqsave(&tick_device_lock, flags);
219 278
220 cpu = smp_processor_id(); 279 cpu = smp_processor_id();
221 if (!cpumask_test_cpu(cpu, newdev->cpumask)) 280 if (!cpumask_test_cpu(cpu, newdev->cpumask))
@@ -225,40 +284,15 @@ static int tick_check_new_device(struct clock_event_device *newdev)
225 curdev = td->evtdev; 284 curdev = td->evtdev;
226 285
227 /* cpu local device ? */ 286 /* cpu local device ? */
228 if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) { 287 if (!tick_check_percpu(curdev, newdev, cpu))
229 288 goto out_bc;
230 /*
231 * If the cpu affinity of the device interrupt can not
232 * be set, ignore it.
233 */
234 if (!irq_can_set_affinity(newdev->irq))
235 goto out_bc;
236 289
237 /* 290 /* Preference decision */
238 * If we have a cpu local device already, do not replace it 291 if (!tick_check_preferred(curdev, newdev))
239 * by a non cpu local device 292 goto out_bc;
240 */
241 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
242 goto out_bc;
243 }
244 293
245 /* 294 if (!try_module_get(newdev->owner))
246 * If we have an active device, then check the rating and the oneshot 295 return;
247 * feature.
248 */
249 if (curdev) {
250 /*
251 * Prefer one shot capable devices !
252 */
253 if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
254 !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
255 goto out_bc;
256 /*
257 * Check the rating
258 */
259 if (curdev->rating >= newdev->rating)
260 goto out_bc;
261 }
262 296
263 /* 297 /*
264 * Replace the eventually existing device by the new 298 * Replace the eventually existing device by the new
@@ -273,20 +307,13 @@ static int tick_check_new_device(struct clock_event_device *newdev)
273 tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); 307 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
274 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) 308 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
275 tick_oneshot_notify(); 309 tick_oneshot_notify();
276 310 return;
277 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
278 return NOTIFY_STOP;
279 311
280out_bc: 312out_bc:
281 /* 313 /*
282 * Can the new device be used as a broadcast device ? 314 * Can the new device be used as a broadcast device ?
283 */ 315 */
284 if (tick_check_broadcast_device(newdev)) 316 tick_install_broadcast_device(newdev);
285 ret = NOTIFY_STOP;
286
287 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
288
289 return ret;
290} 317}
291 318
292/* 319/*
@@ -294,7 +321,7 @@ out_bc:
294 * 321 *
295 * Called with interrupts disabled. 322 * Called with interrupts disabled.
296 */ 323 */
297static void tick_handover_do_timer(int *cpup) 324void tick_handover_do_timer(int *cpup)
298{ 325{
299 if (*cpup == tick_do_timer_cpu) { 326 if (*cpup == tick_do_timer_cpu) {
300 int cpu = cpumask_first(cpu_online_mask); 327 int cpu = cpumask_first(cpu_online_mask);
@@ -311,13 +338,11 @@ static void tick_handover_do_timer(int *cpup)
311 * access the hardware device itself. 338 * access the hardware device itself.
312 * We just set the mode and remove it from the lists. 339 * We just set the mode and remove it from the lists.
313 */ 340 */
314static void tick_shutdown(unsigned int *cpup) 341void tick_shutdown(unsigned int *cpup)
315{ 342{
316 struct tick_device *td = &per_cpu(tick_cpu_device, *cpup); 343 struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
317 struct clock_event_device *dev = td->evtdev; 344 struct clock_event_device *dev = td->evtdev;
318 unsigned long flags;
319 345
320 raw_spin_lock_irqsave(&tick_device_lock, flags);
321 td->mode = TICKDEV_MODE_PERIODIC; 346 td->mode = TICKDEV_MODE_PERIODIC;
322 if (dev) { 347 if (dev) {
323 /* 348 /*
@@ -329,26 +354,20 @@ static void tick_shutdown(unsigned int *cpup)
329 dev->event_handler = clockevents_handle_noop; 354 dev->event_handler = clockevents_handle_noop;
330 td->evtdev = NULL; 355 td->evtdev = NULL;
331 } 356 }
332 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
333} 357}
334 358
335static void tick_suspend(void) 359void tick_suspend(void)
336{ 360{
337 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 361 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
338 unsigned long flags;
339 362
340 raw_spin_lock_irqsave(&tick_device_lock, flags);
341 clockevents_shutdown(td->evtdev); 363 clockevents_shutdown(td->evtdev);
342 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
343} 364}
344 365
345static void tick_resume(void) 366void tick_resume(void)
346{ 367{
347 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 368 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
348 unsigned long flags;
349 int broadcast = tick_resume_broadcast(); 369 int broadcast = tick_resume_broadcast();
350 370
351 raw_spin_lock_irqsave(&tick_device_lock, flags);
352 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); 371 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
353 372
354 if (!broadcast) { 373 if (!broadcast) {
@@ -357,68 +376,12 @@ static void tick_resume(void)
357 else 376 else
358 tick_resume_oneshot(); 377 tick_resume_oneshot();
359 } 378 }
360 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
361} 379}
362 380
363/*
364 * Notification about clock event devices
365 */
366static int tick_notify(struct notifier_block *nb, unsigned long reason,
367 void *dev)
368{
369 switch (reason) {
370
371 case CLOCK_EVT_NOTIFY_ADD:
372 return tick_check_new_device(dev);
373
374 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
375 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
376 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
377 tick_broadcast_on_off(reason, dev);
378 break;
379
380 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
381 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
382 tick_broadcast_oneshot_control(reason);
383 break;
384
385 case CLOCK_EVT_NOTIFY_CPU_DYING:
386 tick_handover_do_timer(dev);
387 break;
388
389 case CLOCK_EVT_NOTIFY_CPU_DEAD:
390 tick_shutdown_broadcast_oneshot(dev);
391 tick_shutdown_broadcast(dev);
392 tick_shutdown(dev);
393 break;
394
395 case CLOCK_EVT_NOTIFY_SUSPEND:
396 tick_suspend();
397 tick_suspend_broadcast();
398 break;
399
400 case CLOCK_EVT_NOTIFY_RESUME:
401 tick_resume();
402 break;
403
404 default:
405 break;
406 }
407
408 return NOTIFY_OK;
409}
410
411static struct notifier_block tick_notifier = {
412 .notifier_call = tick_notify,
413};
414
415/** 381/**
416 * tick_init - initialize the tick control 382 * tick_init - initialize the tick control
417 *
418 * Register the notifier with the clockevents framework
419 */ 383 */
420void __init tick_init(void) 384void __init tick_init(void)
421{ 385{
422 clockevents_register_notifier(&tick_notifier);
423 tick_broadcast_init(); 386 tick_broadcast_init();
424} 387}