aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c2
-rw-r--r--drivers/clocksource/timer-imx-gpt.c1
-rw-r--r--include/linux/tick.h7
-rw-r--r--include/linux/timekeeping.h1
-rw-r--r--kernel/time/clockevents.c24
-rw-r--r--kernel/time/tick-broadcast.c163
-rw-r--r--kernel/time/tick-common.c21
-rw-r--r--kernel/time/tick-sched.h10
8 files changed, 155 insertions, 74 deletions
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 4dda9bd6b8fb..e989cee77414 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -1464,7 +1464,7 @@ static inline void handle_rx_packet(struct sync_port *port)
1464 if (port->write_ts_idx == NBR_IN_DESCR) 1464 if (port->write_ts_idx == NBR_IN_DESCR)
1465 port->write_ts_idx = 0; 1465 port->write_ts_idx = 0;
1466 idx = port->write_ts_idx++; 1466 idx = port->write_ts_idx++;
1467 do_posix_clock_monotonic_gettime(&port->timestamp[idx]); 1467 ktime_get_ts(&port->timestamp[idx]);
1468 port->in_buffer_len += port->inbufchunk; 1468 port->in_buffer_len += port->inbufchunk;
1469 } 1469 }
1470 spin_unlock_irqrestore(&port->lock, flags); 1470 spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 879c78423546..2d59038dec43 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -529,6 +529,7 @@ static void __init imx6dl_timer_init_dt(struct device_node *np)
529 529
530CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt); 530CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
531CLOCKSOURCE_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt); 531CLOCKSOURCE_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
532CLOCKSOURCE_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
532CLOCKSOURCE_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt); 533CLOCKSOURCE_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
533CLOCKSOURCE_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt); 534CLOCKSOURCE_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
534CLOCKSOURCE_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt); 535CLOCKSOURCE_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 3741ba1a652c..edbfc9a5293e 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -67,10 +67,13 @@ extern void tick_broadcast_control(enum tick_broadcast_mode mode);
67static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } 67static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
68#endif /* BROADCAST */ 68#endif /* BROADCAST */
69 69
70#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) 70#ifdef CONFIG_GENERIC_CLOCKEVENTS
71extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); 71extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
72#else 72#else
73static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) { return 0; } 73static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
74{
75 return 0;
76}
74#endif 77#endif
75 78
76static inline void tick_broadcast_enable(void) 79static inline void tick_broadcast_enable(void)
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 3aa72e648650..6e191e4e6ab6 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -145,7 +145,6 @@ static inline void getboottime(struct timespec *ts)
145} 145}
146#endif 146#endif
147 147
148#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
149#define ktime_get_real_ts64(ts) getnstimeofday64(ts) 148#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
150 149
151/* 150/*
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 08ccc3da3ca0..50eb107f1198 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -120,19 +120,25 @@ static int __clockevents_switch_state(struct clock_event_device *dev,
120 /* The clockevent device is getting replaced. Shut it down. */ 120 /* The clockevent device is getting replaced. Shut it down. */
121 121
122 case CLOCK_EVT_STATE_SHUTDOWN: 122 case CLOCK_EVT_STATE_SHUTDOWN:
123 return dev->set_state_shutdown(dev); 123 if (dev->set_state_shutdown)
124 return dev->set_state_shutdown(dev);
125 return 0;
124 126
125 case CLOCK_EVT_STATE_PERIODIC: 127 case CLOCK_EVT_STATE_PERIODIC:
126 /* Core internal bug */ 128 /* Core internal bug */
127 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) 129 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
128 return -ENOSYS; 130 return -ENOSYS;
129 return dev->set_state_periodic(dev); 131 if (dev->set_state_periodic)
132 return dev->set_state_periodic(dev);
133 return 0;
130 134
131 case CLOCK_EVT_STATE_ONESHOT: 135 case CLOCK_EVT_STATE_ONESHOT:
132 /* Core internal bug */ 136 /* Core internal bug */
133 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 137 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
134 return -ENOSYS; 138 return -ENOSYS;
135 return dev->set_state_oneshot(dev); 139 if (dev->set_state_oneshot)
140 return dev->set_state_oneshot(dev);
141 return 0;
136 142
137 case CLOCK_EVT_STATE_ONESHOT_STOPPED: 143 case CLOCK_EVT_STATE_ONESHOT_STOPPED:
138 /* Core internal bug */ 144 /* Core internal bug */
@@ -471,18 +477,6 @@ static int clockevents_sanity_check(struct clock_event_device *dev)
471 if (dev->features & CLOCK_EVT_FEAT_DUMMY) 477 if (dev->features & CLOCK_EVT_FEAT_DUMMY)
472 return 0; 478 return 0;
473 479
474 /* New state-specific callbacks */
475 if (!dev->set_state_shutdown)
476 return -EINVAL;
477
478 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
479 !dev->set_state_periodic)
480 return -EINVAL;
481
482 if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) &&
483 !dev->set_state_oneshot)
484 return -EINVAL;
485
486 return 0; 480 return 0;
487} 481}
488 482
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index d39f32cdd1b5..52b9e199b5ac 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -159,7 +159,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
159{ 159{
160 struct clock_event_device *bc = tick_broadcast_device.evtdev; 160 struct clock_event_device *bc = tick_broadcast_device.evtdev;
161 unsigned long flags; 161 unsigned long flags;
162 int ret; 162 int ret = 0;
163 163
164 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 164 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
165 165
@@ -221,13 +221,14 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
221 * If we kept the cpu in the broadcast mask, 221 * If we kept the cpu in the broadcast mask,
222 * tell the caller to leave the per cpu device 222 * tell the caller to leave the per cpu device
223 * in shutdown state. The periodic interrupt 223 * in shutdown state. The periodic interrupt
224 * is delivered by the broadcast device. 224 * is delivered by the broadcast device, if
225 * the broadcast device exists and is not
226 * hrtimer based.
225 */ 227 */
226 ret = cpumask_test_cpu(cpu, tick_broadcast_mask); 228 if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
229 ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
227 break; 230 break;
228 default: 231 default:
229 /* Nothing to do */
230 ret = 0;
231 break; 232 break;
232 } 233 }
233 } 234 }
@@ -265,8 +266,22 @@ static bool tick_do_broadcast(struct cpumask *mask)
265 * Check, if the current cpu is in the mask 266 * Check, if the current cpu is in the mask
266 */ 267 */
267 if (cpumask_test_cpu(cpu, mask)) { 268 if (cpumask_test_cpu(cpu, mask)) {
269 struct clock_event_device *bc = tick_broadcast_device.evtdev;
270
268 cpumask_clear_cpu(cpu, mask); 271 cpumask_clear_cpu(cpu, mask);
269 local = true; 272 /*
273 * We only run the local handler, if the broadcast
274 * device is not hrtimer based. Otherwise we run into
275 * a hrtimer recursion.
276 *
277 * local timer_interrupt()
278 * local_handler()
279 * expire_hrtimers()
280 * bc_handler()
281 * local_handler()
282 * expire_hrtimers()
283 */
284 local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
270 } 285 }
271 286
272 if (!cpumask_empty(mask)) { 287 if (!cpumask_empty(mask)) {
@@ -301,6 +316,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
301 bool bc_local; 316 bool bc_local;
302 317
303 raw_spin_lock(&tick_broadcast_lock); 318 raw_spin_lock(&tick_broadcast_lock);
319
320 /* Handle spurious interrupts gracefully */
321 if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) {
322 raw_spin_unlock(&tick_broadcast_lock);
323 return;
324 }
325
304 bc_local = tick_do_periodic_broadcast(); 326 bc_local = tick_do_periodic_broadcast();
305 327
306 if (clockevent_state_oneshot(dev)) { 328 if (clockevent_state_oneshot(dev)) {
@@ -359,8 +381,16 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
359 case TICK_BROADCAST_ON: 381 case TICK_BROADCAST_ON:
360 cpumask_set_cpu(cpu, tick_broadcast_on); 382 cpumask_set_cpu(cpu, tick_broadcast_on);
361 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { 383 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
362 if (tick_broadcast_device.mode == 384 /*
363 TICKDEV_MODE_PERIODIC) 385 * Only shutdown the cpu local device, if:
386 *
387 * - the broadcast device exists
388 * - the broadcast device is not a hrtimer based one
389 * - the broadcast device is in periodic mode to
390 * avoid a hickup during switch to oneshot mode
391 */
392 if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
393 tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
364 clockevents_shutdown(dev); 394 clockevents_shutdown(dev);
365 } 395 }
366 break; 396 break;
@@ -379,14 +409,16 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
379 break; 409 break;
380 } 410 }
381 411
382 if (cpumask_empty(tick_broadcast_mask)) { 412 if (bc) {
383 if (!bc_stopped) 413 if (cpumask_empty(tick_broadcast_mask)) {
384 clockevents_shutdown(bc); 414 if (!bc_stopped)
385 } else if (bc_stopped) { 415 clockevents_shutdown(bc);
386 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 416 } else if (bc_stopped) {
387 tick_broadcast_start_periodic(bc); 417 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
388 else 418 tick_broadcast_start_periodic(bc);
389 tick_broadcast_setup_oneshot(bc); 419 else
420 tick_broadcast_setup_oneshot(bc);
421 }
390 } 422 }
391 raw_spin_unlock(&tick_broadcast_lock); 423 raw_spin_unlock(&tick_broadcast_lock);
392} 424}
@@ -662,71 +694,82 @@ static void broadcast_shutdown_local(struct clock_event_device *bc,
662 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 694 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
663} 695}
664 696
665/** 697int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
666 * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
667 * @state: The target state (enter/exit)
668 *
669 * The system enters/leaves a state, where affected devices might stop
670 * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
671 *
672 * Called with interrupts disabled, so clockevents_lock is not
673 * required here because the local clock event device cannot go away
674 * under us.
675 */
676int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
677{ 698{
678 struct clock_event_device *bc, *dev; 699 struct clock_event_device *bc, *dev;
679 struct tick_device *td;
680 int cpu, ret = 0; 700 int cpu, ret = 0;
681 ktime_t now; 701 ktime_t now;
682 702
683 /* 703 /*
684 * Periodic mode does not care about the enter/exit of power 704 * If there is no broadcast device, tell the caller not to go
685 * states 705 * into deep idle.
686 */ 706 */
687 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 707 if (!tick_broadcast_device.evtdev)
688 return 0; 708 return -EBUSY;
689 709
690 /* 710 dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
691 * We are called with preemtion disabled from the depth of the
692 * idle code, so we can't be moved away.
693 */
694 td = this_cpu_ptr(&tick_cpu_device);
695 dev = td->evtdev;
696
697 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
698 return 0;
699 711
700 raw_spin_lock(&tick_broadcast_lock); 712 raw_spin_lock(&tick_broadcast_lock);
701 bc = tick_broadcast_device.evtdev; 713 bc = tick_broadcast_device.evtdev;
702 cpu = smp_processor_id(); 714 cpu = smp_processor_id();
703 715
704 if (state == TICK_BROADCAST_ENTER) { 716 if (state == TICK_BROADCAST_ENTER) {
717 /*
718 * If the current CPU owns the hrtimer broadcast
719 * mechanism, it cannot go deep idle and we do not add
720 * the CPU to the broadcast mask. We don't have to go
721 * through the EXIT path as the local timer is not
722 * shutdown.
723 */
724 ret = broadcast_needs_cpu(bc, cpu);
725 if (ret)
726 goto out;
727
728 /*
729 * If the broadcast device is in periodic mode, we
730 * return.
731 */
732 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
733 /* If it is a hrtimer based broadcast, return busy */
734 if (bc->features & CLOCK_EVT_FEAT_HRTIMER)
735 ret = -EBUSY;
736 goto out;
737 }
738
705 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { 739 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
706 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); 740 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
741
742 /* Conditionally shut down the local timer. */
707 broadcast_shutdown_local(bc, dev); 743 broadcast_shutdown_local(bc, dev);
744
708 /* 745 /*
709 * We only reprogram the broadcast timer if we 746 * We only reprogram the broadcast timer if we
710 * did not mark ourself in the force mask and 747 * did not mark ourself in the force mask and
711 * if the cpu local event is earlier than the 748 * if the cpu local event is earlier than the
712 * broadcast event. If the current CPU is in 749 * broadcast event. If the current CPU is in
713 * the force mask, then we are going to be 750 * the force mask, then we are going to be
714 * woken by the IPI right away. 751 * woken by the IPI right away; we return
752 * busy, so the CPU does not try to go deep
753 * idle.
715 */ 754 */
716 if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) && 755 if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
717 dev->next_event.tv64 < bc->next_event.tv64) 756 ret = -EBUSY;
757 } else if (dev->next_event.tv64 < bc->next_event.tv64) {
718 tick_broadcast_set_event(bc, cpu, dev->next_event); 758 tick_broadcast_set_event(bc, cpu, dev->next_event);
759 /*
760 * In case of hrtimer broadcasts the
761 * programming might have moved the
762 * timer to this cpu. If yes, remove
763 * us from the broadcast mask and
764 * return busy.
765 */
766 ret = broadcast_needs_cpu(bc, cpu);
767 if (ret) {
768 cpumask_clear_cpu(cpu,
769 tick_broadcast_oneshot_mask);
770 }
771 }
719 } 772 }
720 /*
721 * If the current CPU owns the hrtimer broadcast
722 * mechanism, it cannot go deep idle and we remove the
723 * CPU from the broadcast mask. We don't have to go
724 * through the EXIT path as the local timer is not
725 * shutdown.
726 */
727 ret = broadcast_needs_cpu(bc, cpu);
728 if (ret)
729 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
730 } else { 773 } else {
731 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { 774 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
732 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); 775 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
@@ -938,6 +981,16 @@ bool tick_broadcast_oneshot_available(void)
938 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; 981 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
939} 982}
940 983
984#else
985int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
986{
987 struct clock_event_device *bc = tick_broadcast_device.evtdev;
988
989 if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))
990 return -EBUSY;
991
992 return 0;
993}
941#endif 994#endif
942 995
943void __init tick_broadcast_init(void) 996void __init tick_broadcast_init(void)
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 76446cb5dfe1..55e13efff1ab 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -343,6 +343,27 @@ out_bc:
343 tick_install_broadcast_device(newdev); 343 tick_install_broadcast_device(newdev);
344} 344}
345 345
346/**
347 * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
348 * @state: The target state (enter/exit)
349 *
350 * The system enters/leaves a state, where affected devices might stop
351 * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
352 *
353 * Called with interrupts disabled, so clockevents_lock is not
354 * required here because the local clock event device cannot go away
355 * under us.
356 */
357int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
358{
359 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
360
361 if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP))
362 return 0;
363
364 return __tick_broadcast_oneshot_control(state);
365}
366
346#ifdef CONFIG_HOTPLUG_CPU 367#ifdef CONFIG_HOTPLUG_CPU
347/* 368/*
348 * Transfer the do_timer job away from a dying cpu. 369 * Transfer the do_timer job away from a dying cpu.
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
index 42fdf4958bcc..a4a8d4e9baa1 100644
--- a/kernel/time/tick-sched.h
+++ b/kernel/time/tick-sched.h
@@ -71,4 +71,14 @@ extern void tick_cancel_sched_timer(int cpu);
71static inline void tick_cancel_sched_timer(int cpu) { } 71static inline void tick_cancel_sched_timer(int cpu) { }
72#endif 72#endif
73 73
74#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
75extern int __tick_broadcast_oneshot_control(enum tick_broadcast_state state);
76#else
77static inline int
78__tick_broadcast_oneshot_control(enum tick_broadcast_state state)
79{
80 return -EBUSY;
81}
82#endif
83
74#endif 84#endif