diff options
Diffstat (limited to 'kernel/time/tick-broadcast.c')
-rw-r--r-- | kernel/time/tick-broadcast.c | 179 |
1 files changed, 96 insertions, 83 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 066f0ec05e48..7e8ca4f448a8 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -33,12 +33,14 @@ static cpumask_var_t tick_broadcast_mask; | |||
33 | static cpumask_var_t tick_broadcast_on; | 33 | static cpumask_var_t tick_broadcast_on; |
34 | static cpumask_var_t tmpmask; | 34 | static cpumask_var_t tmpmask; |
35 | static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); | 35 | static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); |
36 | static int tick_broadcast_force; | 36 | static int tick_broadcast_forced; |
37 | 37 | ||
38 | #ifdef CONFIG_TICK_ONESHOT | 38 | #ifdef CONFIG_TICK_ONESHOT |
39 | static void tick_broadcast_clear_oneshot(int cpu); | 39 | static void tick_broadcast_clear_oneshot(int cpu); |
40 | static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); | ||
40 | #else | 41 | #else |
41 | static inline void tick_broadcast_clear_oneshot(int cpu) { } | 42 | static inline void tick_broadcast_clear_oneshot(int cpu) { } |
43 | static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } | ||
42 | #endif | 44 | #endif |
43 | 45 | ||
44 | /* | 46 | /* |
@@ -303,7 +305,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
303 | /* | 305 | /* |
304 | * The device is in periodic mode. No reprogramming necessary: | 306 | * The device is in periodic mode. No reprogramming necessary: |
305 | */ | 307 | */ |
306 | if (dev->mode == CLOCK_EVT_MODE_PERIODIC) | 308 | if (dev->state == CLOCK_EVT_STATE_PERIODIC) |
307 | goto unlock; | 309 | goto unlock; |
308 | 310 | ||
309 | /* | 311 | /* |
@@ -324,49 +326,54 @@ unlock: | |||
324 | raw_spin_unlock(&tick_broadcast_lock); | 326 | raw_spin_unlock(&tick_broadcast_lock); |
325 | } | 327 | } |
326 | 328 | ||
327 | /* | 329 | /** |
328 | * Powerstate information: The system enters/leaves a state, where | 330 | * tick_broadcast_control - Enable/disable or force broadcast mode |
329 | * affected devices might stop | 331 | * @mode: The selected broadcast mode |
332 | * | ||
333 | * Called when the system enters a state where affected tick devices | ||
334 | * might stop. Note: TICK_BROADCAST_FORCE cannot be undone. | ||
335 | * | ||
336 | * Called with interrupts disabled, so clockevents_lock is not | ||
337 | * required here because the local clock event device cannot go away | ||
338 | * under us. | ||
330 | */ | 339 | */ |
331 | static void tick_do_broadcast_on_off(unsigned long *reason) | 340 | void tick_broadcast_control(enum tick_broadcast_mode mode) |
332 | { | 341 | { |
333 | struct clock_event_device *bc, *dev; | 342 | struct clock_event_device *bc, *dev; |
334 | struct tick_device *td; | 343 | struct tick_device *td; |
335 | unsigned long flags; | ||
336 | int cpu, bc_stopped; | 344 | int cpu, bc_stopped; |
337 | 345 | ||
338 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | 346 | td = this_cpu_ptr(&tick_cpu_device); |
339 | |||
340 | cpu = smp_processor_id(); | ||
341 | td = &per_cpu(tick_cpu_device, cpu); | ||
342 | dev = td->evtdev; | 347 | dev = td->evtdev; |
343 | bc = tick_broadcast_device.evtdev; | ||
344 | 348 | ||
345 | /* | 349 | /* |
346 | * Is the device not affected by the powerstate ? | 350 | * Is the device not affected by the powerstate ? |
347 | */ | 351 | */ |
348 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) | 352 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
349 | goto out; | 353 | return; |
350 | 354 | ||
351 | if (!tick_device_is_functional(dev)) | 355 | if (!tick_device_is_functional(dev)) |
352 | goto out; | 356 | return; |
353 | 357 | ||
358 | raw_spin_lock(&tick_broadcast_lock); | ||
359 | cpu = smp_processor_id(); | ||
360 | bc = tick_broadcast_device.evtdev; | ||
354 | bc_stopped = cpumask_empty(tick_broadcast_mask); | 361 | bc_stopped = cpumask_empty(tick_broadcast_mask); |
355 | 362 | ||
356 | switch (*reason) { | 363 | switch (mode) { |
357 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: | 364 | case TICK_BROADCAST_FORCE: |
358 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 365 | tick_broadcast_forced = 1; |
366 | case TICK_BROADCAST_ON: | ||
359 | cpumask_set_cpu(cpu, tick_broadcast_on); | 367 | cpumask_set_cpu(cpu, tick_broadcast_on); |
360 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { | 368 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { |
361 | if (tick_broadcast_device.mode == | 369 | if (tick_broadcast_device.mode == |
362 | TICKDEV_MODE_PERIODIC) | 370 | TICKDEV_MODE_PERIODIC) |
363 | clockevents_shutdown(dev); | 371 | clockevents_shutdown(dev); |
364 | } | 372 | } |
365 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) | ||
366 | tick_broadcast_force = 1; | ||
367 | break; | 373 | break; |
368 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: | 374 | |
369 | if (tick_broadcast_force) | 375 | case TICK_BROADCAST_OFF: |
376 | if (tick_broadcast_forced) | ||
370 | break; | 377 | break; |
371 | cpumask_clear_cpu(cpu, tick_broadcast_on); | 378 | cpumask_clear_cpu(cpu, tick_broadcast_on); |
372 | if (!tick_device_is_functional(dev)) | 379 | if (!tick_device_is_functional(dev)) |
@@ -388,22 +395,9 @@ static void tick_do_broadcast_on_off(unsigned long *reason) | |||
388 | else | 395 | else |
389 | tick_broadcast_setup_oneshot(bc); | 396 | tick_broadcast_setup_oneshot(bc); |
390 | } | 397 | } |
391 | out: | 398 | raw_spin_unlock(&tick_broadcast_lock); |
392 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||
393 | } | ||
394 | |||
395 | /* | ||
396 | * Powerstate information: The system enters/leaves a state, where | ||
397 | * affected devices might stop. | ||
398 | */ | ||
399 | void tick_broadcast_on_off(unsigned long reason, int *oncpu) | ||
400 | { | ||
401 | if (!cpumask_test_cpu(*oncpu, cpu_online_mask)) | ||
402 | printk(KERN_ERR "tick-broadcast: ignoring broadcast for " | ||
403 | "offline CPU #%d\n", *oncpu); | ||
404 | else | ||
405 | tick_do_broadcast_on_off(&reason); | ||
406 | } | 399 | } |
400 | EXPORT_SYMBOL_GPL(tick_broadcast_control); | ||
407 | 401 | ||
408 | /* | 402 | /* |
409 | * Set the periodic handler depending on broadcast on/off | 403 | * Set the periodic handler depending on broadcast on/off |
@@ -416,14 +410,14 @@ void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) | |||
416 | dev->event_handler = tick_handle_periodic_broadcast; | 410 | dev->event_handler = tick_handle_periodic_broadcast; |
417 | } | 411 | } |
418 | 412 | ||
413 | #ifdef CONFIG_HOTPLUG_CPU | ||
419 | /* | 414 | /* |
420 | * Remove a CPU from broadcasting | 415 | * Remove a CPU from broadcasting |
421 | */ | 416 | */ |
422 | void tick_shutdown_broadcast(unsigned int *cpup) | 417 | void tick_shutdown_broadcast(unsigned int cpu) |
423 | { | 418 | { |
424 | struct clock_event_device *bc; | 419 | struct clock_event_device *bc; |
425 | unsigned long flags; | 420 | unsigned long flags; |
426 | unsigned int cpu = *cpup; | ||
427 | 421 | ||
428 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | 422 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
429 | 423 | ||
@@ -438,6 +432,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
438 | 432 | ||
439 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 433 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
440 | } | 434 | } |
435 | #endif | ||
441 | 436 | ||
442 | void tick_suspend_broadcast(void) | 437 | void tick_suspend_broadcast(void) |
443 | { | 438 | { |
@@ -453,38 +448,48 @@ void tick_suspend_broadcast(void) | |||
453 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 448 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
454 | } | 449 | } |
455 | 450 | ||
456 | int tick_resume_broadcast(void) | 451 | /* |
452 | * This is called from tick_resume_local() on a resuming CPU. That's | ||
453 | * called from the core resume function, tick_unfreeze() and the magic XEN | ||
454 | * resume hackery. | ||
455 | * | ||
456 | * In none of these cases the broadcast device mode can change and the | ||
457 | * bit of the resuming CPU in the broadcast mask is safe as well. | ||
458 | */ | ||
459 | bool tick_resume_check_broadcast(void) | ||
460 | { | ||
461 | if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) | ||
462 | return false; | ||
463 | else | ||
464 | return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask); | ||
465 | } | ||
466 | |||
467 | void tick_resume_broadcast(void) | ||
457 | { | 468 | { |
458 | struct clock_event_device *bc; | 469 | struct clock_event_device *bc; |
459 | unsigned long flags; | 470 | unsigned long flags; |
460 | int broadcast = 0; | ||
461 | 471 | ||
462 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | 472 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
463 | 473 | ||
464 | bc = tick_broadcast_device.evtdev; | 474 | bc = tick_broadcast_device.evtdev; |
465 | 475 | ||
466 | if (bc) { | 476 | if (bc) { |
467 | clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME); | 477 | clockevents_tick_resume(bc); |
468 | 478 | ||
469 | switch (tick_broadcast_device.mode) { | 479 | switch (tick_broadcast_device.mode) { |
470 | case TICKDEV_MODE_PERIODIC: | 480 | case TICKDEV_MODE_PERIODIC: |
471 | if (!cpumask_empty(tick_broadcast_mask)) | 481 | if (!cpumask_empty(tick_broadcast_mask)) |
472 | tick_broadcast_start_periodic(bc); | 482 | tick_broadcast_start_periodic(bc); |
473 | broadcast = cpumask_test_cpu(smp_processor_id(), | ||
474 | tick_broadcast_mask); | ||
475 | break; | 483 | break; |
476 | case TICKDEV_MODE_ONESHOT: | 484 | case TICKDEV_MODE_ONESHOT: |
477 | if (!cpumask_empty(tick_broadcast_mask)) | 485 | if (!cpumask_empty(tick_broadcast_mask)) |
478 | broadcast = tick_resume_broadcast_oneshot(bc); | 486 | tick_resume_broadcast_oneshot(bc); |
479 | break; | 487 | break; |
480 | } | 488 | } |
481 | } | 489 | } |
482 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 490 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
483 | |||
484 | return broadcast; | ||
485 | } | 491 | } |
486 | 492 | ||
487 | |||
488 | #ifdef CONFIG_TICK_ONESHOT | 493 | #ifdef CONFIG_TICK_ONESHOT |
489 | 494 | ||
490 | static cpumask_var_t tick_broadcast_oneshot_mask; | 495 | static cpumask_var_t tick_broadcast_oneshot_mask; |
@@ -532,8 +537,8 @@ static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu, | |||
532 | { | 537 | { |
533 | int ret; | 538 | int ret; |
534 | 539 | ||
535 | if (bc->mode != CLOCK_EVT_MODE_ONESHOT) | 540 | if (bc->state != CLOCK_EVT_STATE_ONESHOT) |
536 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | 541 | clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT); |
537 | 542 | ||
538 | ret = clockevents_program_event(bc, expires, force); | 543 | ret = clockevents_program_event(bc, expires, force); |
539 | if (!ret) | 544 | if (!ret) |
@@ -541,10 +546,9 @@ static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu, | |||
541 | return ret; | 546 | return ret; |
542 | } | 547 | } |
543 | 548 | ||
544 | int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | 549 | static void tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
545 | { | 550 | { |
546 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | 551 | clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT); |
547 | return 0; | ||
548 | } | 552 | } |
549 | 553 | ||
550 | /* | 554 | /* |
@@ -562,8 +566,8 @@ void tick_check_oneshot_broadcast_this_cpu(void) | |||
562 | * switched over, leave the device alone. | 566 | * switched over, leave the device alone. |
563 | */ | 567 | */ |
564 | if (td->mode == TICKDEV_MODE_ONESHOT) { | 568 | if (td->mode == TICKDEV_MODE_ONESHOT) { |
565 | clockevents_set_mode(td->evtdev, | 569 | clockevents_set_state(td->evtdev, |
566 | CLOCK_EVT_MODE_ONESHOT); | 570 | CLOCK_EVT_STATE_ONESHOT); |
567 | } | 571 | } |
568 | } | 572 | } |
569 | } | 573 | } |
@@ -666,31 +670,26 @@ static void broadcast_shutdown_local(struct clock_event_device *bc, | |||
666 | if (dev->next_event.tv64 < bc->next_event.tv64) | 670 | if (dev->next_event.tv64 < bc->next_event.tv64) |
667 | return; | 671 | return; |
668 | } | 672 | } |
669 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | 673 | clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN); |
670 | } | 674 | } |
671 | 675 | ||
672 | static void broadcast_move_bc(int deadcpu) | 676 | /** |
673 | { | 677 | * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode |
674 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | 678 | * @state: The target state (enter/exit) |
675 | 679 | * | |
676 | if (!bc || !broadcast_needs_cpu(bc, deadcpu)) | 680 | * The system enters/leaves a state, where affected devices might stop |
677 | return; | ||
678 | /* This moves the broadcast assignment to this cpu */ | ||
679 | clockevents_program_event(bc, bc->next_event, 1); | ||
680 | } | ||
681 | |||
682 | /* | ||
683 | * Powerstate information: The system enters/leaves a state, where | ||
684 | * affected devices might stop | ||
685 | * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups. | 681 | * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups. |
682 | * | ||
683 | * Called with interrupts disabled, so clockevents_lock is not | ||
684 | * required here because the local clock event device cannot go away | ||
685 | * under us. | ||
686 | */ | 686 | */ |
687 | int tick_broadcast_oneshot_control(unsigned long reason) | 687 | int tick_broadcast_oneshot_control(enum tick_broadcast_state state) |
688 | { | 688 | { |
689 | struct clock_event_device *bc, *dev; | 689 | struct clock_event_device *bc, *dev; |
690 | struct tick_device *td; | 690 | struct tick_device *td; |
691 | unsigned long flags; | ||
692 | ktime_t now; | ||
693 | int cpu, ret = 0; | 691 | int cpu, ret = 0; |
692 | ktime_t now; | ||
694 | 693 | ||
695 | /* | 694 | /* |
696 | * Periodic mode does not care about the enter/exit of power | 695 | * Periodic mode does not care about the enter/exit of power |
@@ -703,17 +702,17 @@ int tick_broadcast_oneshot_control(unsigned long reason) | |||
703 | * We are called with preemtion disabled from the depth of the | 702 | * We are called with preemtion disabled from the depth of the |
704 | * idle code, so we can't be moved away. | 703 | * idle code, so we can't be moved away. |
705 | */ | 704 | */ |
706 | cpu = smp_processor_id(); | 705 | td = this_cpu_ptr(&tick_cpu_device); |
707 | td = &per_cpu(tick_cpu_device, cpu); | ||
708 | dev = td->evtdev; | 706 | dev = td->evtdev; |
709 | 707 | ||
710 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) | 708 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
711 | return 0; | 709 | return 0; |
712 | 710 | ||
711 | raw_spin_lock(&tick_broadcast_lock); | ||
713 | bc = tick_broadcast_device.evtdev; | 712 | bc = tick_broadcast_device.evtdev; |
713 | cpu = smp_processor_id(); | ||
714 | 714 | ||
715 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | 715 | if (state == TICK_BROADCAST_ENTER) { |
716 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { | ||
717 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { | 716 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { |
718 | WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); | 717 | WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); |
719 | broadcast_shutdown_local(bc, dev); | 718 | broadcast_shutdown_local(bc, dev); |
@@ -741,7 +740,7 @@ int tick_broadcast_oneshot_control(unsigned long reason) | |||
741 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); | 740 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
742 | } else { | 741 | } else { |
743 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { | 742 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { |
744 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); | 743 | clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT); |
745 | /* | 744 | /* |
746 | * The cpu which was handling the broadcast | 745 | * The cpu which was handling the broadcast |
747 | * timer marked this cpu in the broadcast | 746 | * timer marked this cpu in the broadcast |
@@ -805,9 +804,10 @@ int tick_broadcast_oneshot_control(unsigned long reason) | |||
805 | } | 804 | } |
806 | } | 805 | } |
807 | out: | 806 | out: |
808 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 807 | raw_spin_unlock(&tick_broadcast_lock); |
809 | return ret; | 808 | return ret; |
810 | } | 809 | } |
810 | EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control); | ||
811 | 811 | ||
812 | /* | 812 | /* |
813 | * Reset the one shot broadcast for a cpu | 813 | * Reset the one shot broadcast for a cpu |
@@ -842,7 +842,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
842 | 842 | ||
843 | /* Set it up only once ! */ | 843 | /* Set it up only once ! */ |
844 | if (bc->event_handler != tick_handle_oneshot_broadcast) { | 844 | if (bc->event_handler != tick_handle_oneshot_broadcast) { |
845 | int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; | 845 | int was_periodic = bc->state == CLOCK_EVT_STATE_PERIODIC; |
846 | 846 | ||
847 | bc->event_handler = tick_handle_oneshot_broadcast; | 847 | bc->event_handler = tick_handle_oneshot_broadcast; |
848 | 848 | ||
@@ -858,7 +858,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
858 | tick_broadcast_oneshot_mask, tmpmask); | 858 | tick_broadcast_oneshot_mask, tmpmask); |
859 | 859 | ||
860 | if (was_periodic && !cpumask_empty(tmpmask)) { | 860 | if (was_periodic && !cpumask_empty(tmpmask)) { |
861 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | 861 | clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT); |
862 | tick_broadcast_init_next_event(tmpmask, | 862 | tick_broadcast_init_next_event(tmpmask, |
863 | tick_next_period); | 863 | tick_next_period); |
864 | tick_broadcast_set_event(bc, cpu, tick_next_period, 1); | 864 | tick_broadcast_set_event(bc, cpu, tick_next_period, 1); |
@@ -894,14 +894,28 @@ void tick_broadcast_switch_to_oneshot(void) | |||
894 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 894 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
895 | } | 895 | } |
896 | 896 | ||
897 | #ifdef CONFIG_HOTPLUG_CPU | ||
898 | void hotplug_cpu__broadcast_tick_pull(int deadcpu) | ||
899 | { | ||
900 | struct clock_event_device *bc; | ||
901 | unsigned long flags; | ||
902 | |||
903 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | ||
904 | bc = tick_broadcast_device.evtdev; | ||
905 | |||
906 | if (bc && broadcast_needs_cpu(bc, deadcpu)) { | ||
907 | /* This moves the broadcast assignment to this CPU: */ | ||
908 | clockevents_program_event(bc, bc->next_event, 1); | ||
909 | } | ||
910 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||
911 | } | ||
897 | 912 | ||
898 | /* | 913 | /* |
899 | * Remove a dead CPU from broadcasting | 914 | * Remove a dead CPU from broadcasting |
900 | */ | 915 | */ |
901 | void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | 916 | void tick_shutdown_broadcast_oneshot(unsigned int cpu) |
902 | { | 917 | { |
903 | unsigned long flags; | 918 | unsigned long flags; |
904 | unsigned int cpu = *cpup; | ||
905 | 919 | ||
906 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | 920 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
907 | 921 | ||
@@ -913,10 +927,9 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
913 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); | 927 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); |
914 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); | 928 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); |
915 | 929 | ||
916 | broadcast_move_bc(cpu); | ||
917 | |||
918 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 930 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
919 | } | 931 | } |
932 | #endif | ||
920 | 933 | ||
921 | /* | 934 | /* |
922 | * Check, whether the broadcast device is in one shot mode | 935 | * Check, whether the broadcast device is in one shot mode |