aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/Kconfig2
-rw-r--r--kernel/time/Makefile5
-rw-r--r--kernel/time/clockevents.c40
-rw-r--r--kernel/time/ntp.c5
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c106
-rw-r--r--kernel/time/tick-broadcast.c85
-rw-r--r--kernel/time/tick-common.c16
-rw-r--r--kernel/time/tick-internal.h11
-rw-r--r--kernel/time/timekeeping.c8
-rw-r--r--kernel/time/timekeeping_debug.c2
10 files changed, 241 insertions, 39 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 3ce6e8c5f3fc..f448513a45ed 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -124,7 +124,7 @@ config NO_HZ_FULL
124endchoice 124endchoice
125 125
126config NO_HZ_FULL_ALL 126config NO_HZ_FULL_ALL
127 bool "Full dynticks system on all CPUs by default" 127 bool "Full dynticks system on all CPUs by default (except CPU 0)"
128 depends on NO_HZ_FULL 128 depends on NO_HZ_FULL
129 help 129 help
130 If the user doesn't pass the nohz_full boot option to 130 If the user doesn't pass the nohz_full boot option to
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 9250130646f5..57a413fd0ebf 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -3,7 +3,10 @@ obj-y += timeconv.o posix-clock.o alarmtimer.o
3 3
4obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o 4obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
5obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o 5obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
6obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += tick-broadcast.o 6ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y)
7 obj-y += tick-broadcast.o
8 obj-$(CONFIG_TICK_ONESHOT) += tick-broadcast-hrtimer.o
9endif
7obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o 10obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o
8obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o 11obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o
9obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o 12obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 086ad6043bcb..ad362c260ef4 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -439,6 +439,19 @@ void clockevents_config_and_register(struct clock_event_device *dev,
439} 439}
440EXPORT_SYMBOL_GPL(clockevents_config_and_register); 440EXPORT_SYMBOL_GPL(clockevents_config_and_register);
441 441
442int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
443{
444 clockevents_config(dev, freq);
445
446 if (dev->mode == CLOCK_EVT_MODE_ONESHOT)
447 return clockevents_program_event(dev, dev->next_event, false);
448
449 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
450 dev->set_mode(CLOCK_EVT_MODE_PERIODIC, dev);
451
452 return 0;
453}
454
442/** 455/**
443 * clockevents_update_freq - Update frequency and reprogram a clock event device. 456 * clockevents_update_freq - Update frequency and reprogram a clock event device.
444 * @dev: device to modify 457 * @dev: device to modify
@@ -446,17 +459,22 @@ EXPORT_SYMBOL_GPL(clockevents_config_and_register);
446 * 459 *
447 * Reconfigure and reprogram a clock event device in oneshot 460 * Reconfigure and reprogram a clock event device in oneshot
448 * mode. Must be called on the cpu for which the device delivers per 461 * mode. Must be called on the cpu for which the device delivers per
449 * cpu timer events with interrupts disabled! Returns 0 on success, 462 * cpu timer events. If called for the broadcast device the core takes
450 * -ETIME when the event is in the past. 463 * care of serialization.
464 *
465 * Returns 0 on success, -ETIME when the event is in the past.
451 */ 466 */
452int clockevents_update_freq(struct clock_event_device *dev, u32 freq) 467int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
453{ 468{
454 clockevents_config(dev, freq); 469 unsigned long flags;
455 470 int ret;
456 if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
457 return 0;
458 471
459 return clockevents_program_event(dev, dev->next_event, false); 472 local_irq_save(flags);
473 ret = tick_broadcast_update_freq(dev, freq);
474 if (ret == -ENODEV)
475 ret = __clockevents_update_freq(dev, freq);
476 local_irq_restore(flags);
477 return ret;
460} 478}
461 479
462/* 480/*
@@ -524,12 +542,13 @@ void clockevents_resume(void)
524#ifdef CONFIG_GENERIC_CLOCKEVENTS 542#ifdef CONFIG_GENERIC_CLOCKEVENTS
525/** 543/**
526 * clockevents_notify - notification about relevant events 544 * clockevents_notify - notification about relevant events
545 * Returns 0 on success, any other value on error
527 */ 546 */
528void clockevents_notify(unsigned long reason, void *arg) 547int clockevents_notify(unsigned long reason, void *arg)
529{ 548{
530 struct clock_event_device *dev, *tmp; 549 struct clock_event_device *dev, *tmp;
531 unsigned long flags; 550 unsigned long flags;
532 int cpu; 551 int cpu, ret = 0;
533 552
534 raw_spin_lock_irqsave(&clockevents_lock, flags); 553 raw_spin_lock_irqsave(&clockevents_lock, flags);
535 554
@@ -542,7 +561,7 @@ void clockevents_notify(unsigned long reason, void *arg)
542 561
543 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: 562 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
544 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: 563 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
545 tick_broadcast_oneshot_control(reason); 564 ret = tick_broadcast_oneshot_control(reason);
546 break; 565 break;
547 566
548 case CLOCK_EVT_NOTIFY_CPU_DYING: 567 case CLOCK_EVT_NOTIFY_CPU_DYING:
@@ -585,6 +604,7 @@ void clockevents_notify(unsigned long reason, void *arg)
585 break; 604 break;
586 } 605 }
587 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 606 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
607 return ret;
588} 608}
589EXPORT_SYMBOL_GPL(clockevents_notify); 609EXPORT_SYMBOL_GPL(clockevents_notify);
590 610
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index af8d1d4f3d55..419a52cecd20 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -514,12 +514,13 @@ static void sync_cmos_clock(struct work_struct *work)
514 next.tv_sec++; 514 next.tv_sec++;
515 next.tv_nsec -= NSEC_PER_SEC; 515 next.tv_nsec -= NSEC_PER_SEC;
516 } 516 }
517 schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next)); 517 queue_delayed_work(system_power_efficient_wq,
518 &sync_cmos_work, timespec_to_jiffies(&next));
518} 519}
519 520
520void ntp_notify_cmos_timer(void) 521void ntp_notify_cmos_timer(void)
521{ 522{
522 schedule_delayed_work(&sync_cmos_work, 0); 523 queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
523} 524}
524 525
525#else 526#else
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
new file mode 100644
index 000000000000..eb682d5c697c
--- /dev/null
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -0,0 +1,106 @@
1/*
2 * linux/kernel/time/tick-broadcast-hrtimer.c
3 * This file emulates a local clock event device
4 * via a pseudo clock device.
5 */
6#include <linux/cpu.h>
7#include <linux/err.h>
8#include <linux/hrtimer.h>
9#include <linux/interrupt.h>
10#include <linux/percpu.h>
11#include <linux/profile.h>
12#include <linux/clockchips.h>
13#include <linux/sched.h>
14#include <linux/smp.h>
15#include <linux/module.h>
16
17#include "tick-internal.h"
18
19static struct hrtimer bctimer;
20
21static void bc_set_mode(enum clock_event_mode mode,
22 struct clock_event_device *bc)
23{
24 switch (mode) {
25 case CLOCK_EVT_MODE_SHUTDOWN:
26 /*
27 * Note, we cannot cancel the timer here as we might
28 * run into the following live lock scenario:
29 *
30 * cpu 0 cpu1
31 * lock(broadcast_lock);
32 * hrtimer_interrupt()
33 * bc_handler()
34 * tick_handle_oneshot_broadcast();
35 * lock(broadcast_lock);
36 * hrtimer_cancel()
37 * wait_for_callback()
38 */
39 hrtimer_try_to_cancel(&bctimer);
40 break;
41 default:
42 break;
43 }
44}
45
46/*
47 * This is called from the guts of the broadcast code when the cpu
48 * which is about to enter idle has the earliest broadcast timer event.
49 */
50static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
51{
52 /*
53 * We try to cancel the timer first. If the callback is on
54 * flight on some other cpu then we let it handle it. If we
55 * were able to cancel the timer nothing can rearm it as we
56 * own broadcast_lock.
57 *
58 * However we can also be called from the event handler of
59 * ce_broadcast_hrtimer itself when it expires. We cannot
60 * restart the timer because we are in the callback, but we
61 * can set the expiry time and let the callback return
62 * HRTIMER_RESTART.
63 */
64 if (hrtimer_try_to_cancel(&bctimer) >= 0) {
65 hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
66 /* Bind the "device" to the cpu */
67 bc->bound_on = smp_processor_id();
68 } else if (bc->bound_on == smp_processor_id()) {
69 hrtimer_set_expires(&bctimer, expires);
70 }
71 return 0;
72}
73
74static struct clock_event_device ce_broadcast_hrtimer = {
75 .set_mode = bc_set_mode,
76 .set_next_ktime = bc_set_next,
77 .features = CLOCK_EVT_FEAT_ONESHOT |
78 CLOCK_EVT_FEAT_KTIME |
79 CLOCK_EVT_FEAT_HRTIMER,
80 .rating = 0,
81 .bound_on = -1,
82 .min_delta_ns = 1,
83 .max_delta_ns = KTIME_MAX,
84 .min_delta_ticks = 1,
85 .max_delta_ticks = ULONG_MAX,
86 .mult = 1,
87 .shift = 0,
88 .cpumask = cpu_all_mask,
89};
90
91static enum hrtimer_restart bc_handler(struct hrtimer *t)
92{
93 ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
94
95 if (ce_broadcast_hrtimer.next_event.tv64 == KTIME_MAX)
96 return HRTIMER_NORESTART;
97
98 return HRTIMER_RESTART;
99}
100
101void tick_setup_hrtimer_broadcast(void)
102{
103 hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
104 bctimer.function = bc_handler;
105 clockevents_register_device(&ce_broadcast_hrtimer);
106}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 98977a57ac72..64c5990fd500 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -120,6 +120,19 @@ int tick_is_broadcast_device(struct clock_event_device *dev)
120 return (dev && tick_broadcast_device.evtdev == dev); 120 return (dev && tick_broadcast_device.evtdev == dev);
121} 121}
122 122
123int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
124{
125 int ret = -ENODEV;
126
127 if (tick_is_broadcast_device(dev)) {
128 raw_spin_lock(&tick_broadcast_lock);
129 ret = __clockevents_update_freq(dev, freq);
130 raw_spin_unlock(&tick_broadcast_lock);
131 }
132 return ret;
133}
134
135
123static void err_broadcast(const struct cpumask *mask) 136static void err_broadcast(const struct cpumask *mask)
124{ 137{
125 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n"); 138 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
@@ -272,12 +285,8 @@ static void tick_do_broadcast(struct cpumask *mask)
272 */ 285 */
273static void tick_do_periodic_broadcast(void) 286static void tick_do_periodic_broadcast(void)
274{ 287{
275 raw_spin_lock(&tick_broadcast_lock);
276
277 cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); 288 cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
278 tick_do_broadcast(tmpmask); 289 tick_do_broadcast(tmpmask);
279
280 raw_spin_unlock(&tick_broadcast_lock);
281} 290}
282 291
283/* 292/*
@@ -287,13 +296,15 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
287{ 296{
288 ktime_t next; 297 ktime_t next;
289 298
299 raw_spin_lock(&tick_broadcast_lock);
300
290 tick_do_periodic_broadcast(); 301 tick_do_periodic_broadcast();
291 302
292 /* 303 /*
293 * The device is in periodic mode. No reprogramming necessary: 304 * The device is in periodic mode. No reprogramming necessary:
294 */ 305 */
295 if (dev->mode == CLOCK_EVT_MODE_PERIODIC) 306 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
296 return; 307 goto unlock;
297 308
298 /* 309 /*
299 * Setup the next period for devices, which do not have 310 * Setup the next period for devices, which do not have
@@ -306,9 +317,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
306 next = ktime_add(next, tick_period); 317 next = ktime_add(next, tick_period);
307 318
308 if (!clockevents_program_event(dev, next, false)) 319 if (!clockevents_program_event(dev, next, false))
309 return; 320 goto unlock;
310 tick_do_periodic_broadcast(); 321 tick_do_periodic_broadcast();
311 } 322 }
323unlock:
324 raw_spin_unlock(&tick_broadcast_lock);
312} 325}
313 326
314/* 327/*
@@ -630,24 +643,61 @@ again:
630 raw_spin_unlock(&tick_broadcast_lock); 643 raw_spin_unlock(&tick_broadcast_lock);
631} 644}
632 645
646static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
647{
648 if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
649 return 0;
650 if (bc->next_event.tv64 == KTIME_MAX)
651 return 0;
652 return bc->bound_on == cpu ? -EBUSY : 0;
653}
654
655static void broadcast_shutdown_local(struct clock_event_device *bc,
656 struct clock_event_device *dev)
657{
658 /*
659 * For hrtimer based broadcasting we cannot shutdown the cpu
660 * local device if our own event is the first one to expire or
661 * if we own the broadcast timer.
662 */
663 if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
664 if (broadcast_needs_cpu(bc, smp_processor_id()))
665 return;
666 if (dev->next_event.tv64 < bc->next_event.tv64)
667 return;
668 }
669 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
670}
671
672static void broadcast_move_bc(int deadcpu)
673{
674 struct clock_event_device *bc = tick_broadcast_device.evtdev;
675
676 if (!bc || !broadcast_needs_cpu(bc, deadcpu))
677 return;
678 /* This moves the broadcast assignment to this cpu */
679 clockevents_program_event(bc, bc->next_event, 1);
680}
681
633/* 682/*
634 * Powerstate information: The system enters/leaves a state, where 683 * Powerstate information: The system enters/leaves a state, where
635 * affected devices might stop 684 * affected devices might stop
685 * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
636 */ 686 */
637void tick_broadcast_oneshot_control(unsigned long reason) 687int tick_broadcast_oneshot_control(unsigned long reason)
638{ 688{
639 struct clock_event_device *bc, *dev; 689 struct clock_event_device *bc, *dev;
640 struct tick_device *td; 690 struct tick_device *td;
641 unsigned long flags; 691 unsigned long flags;
642 ktime_t now; 692 ktime_t now;
643 int cpu; 693 int cpu, ret = 0;
644 694
645 /* 695 /*
646 * Periodic mode does not care about the enter/exit of power 696 * Periodic mode does not care about the enter/exit of power
647 * states 697 * states
648 */ 698 */
649 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 699 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
650 return; 700 return 0;
651 701
652 /* 702 /*
653 * We are called with preemtion disabled from the depth of the 703 * We are called with preemtion disabled from the depth of the
@@ -658,7 +708,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
658 dev = td->evtdev; 708 dev = td->evtdev;
659 709
660 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) 710 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
661 return; 711 return 0;
662 712
663 bc = tick_broadcast_device.evtdev; 713 bc = tick_broadcast_device.evtdev;
664 714
@@ -666,7 +716,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
666 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { 716 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
667 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { 717 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
668 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); 718 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
669 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 719 broadcast_shutdown_local(bc, dev);
670 /* 720 /*
671 * We only reprogram the broadcast timer if we 721 * We only reprogram the broadcast timer if we
672 * did not mark ourself in the force mask and 722 * did not mark ourself in the force mask and
@@ -679,6 +729,16 @@ void tick_broadcast_oneshot_control(unsigned long reason)
679 dev->next_event.tv64 < bc->next_event.tv64) 729 dev->next_event.tv64 < bc->next_event.tv64)
680 tick_broadcast_set_event(bc, cpu, dev->next_event, 1); 730 tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
681 } 731 }
732 /*
733 * If the current CPU owns the hrtimer broadcast
734 * mechanism, it cannot go deep idle and we remove the
735 * CPU from the broadcast mask. We don't have to go
736 * through the EXIT path as the local timer is not
737 * shutdown.
738 */
739 ret = broadcast_needs_cpu(bc, cpu);
740 if (ret)
741 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
682 } else { 742 } else {
683 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { 743 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
684 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 744 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
@@ -746,6 +806,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
746 } 806 }
747out: 807out:
748 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 808 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
809 return ret;
749} 810}
750 811
751/* 812/*
@@ -852,6 +913,8 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
852 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); 913 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
853 cpumask_clear_cpu(cpu, tick_broadcast_force_mask); 914 cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
854 915
916 broadcast_move_bc(cpu);
917
855 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 918 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
856} 919}
857 920
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 20b2fe37d105..015661279b68 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -98,18 +98,19 @@ static void tick_periodic(int cpu)
98void tick_handle_periodic(struct clock_event_device *dev) 98void tick_handle_periodic(struct clock_event_device *dev)
99{ 99{
100 int cpu = smp_processor_id(); 100 int cpu = smp_processor_id();
101 ktime_t next; 101 ktime_t next = dev->next_event;
102 102
103 tick_periodic(cpu); 103 tick_periodic(cpu);
104 104
105 if (dev->mode != CLOCK_EVT_MODE_ONESHOT) 105 if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
106 return; 106 return;
107 /*
108 * Setup the next period for devices, which do not have
109 * periodic mode:
110 */
111 next = ktime_add(dev->next_event, tick_period);
112 for (;;) { 107 for (;;) {
108 /*
109 * Setup the next period for devices, which do not have
110 * periodic mode:
111 */
112 next = ktime_add(next, tick_period);
113
113 if (!clockevents_program_event(dev, next, false)) 114 if (!clockevents_program_event(dev, next, false))
114 return; 115 return;
115 /* 116 /*
@@ -118,12 +119,11 @@ void tick_handle_periodic(struct clock_event_device *dev)
118 * to be sure we're using a real hardware clocksource. 119 * to be sure we're using a real hardware clocksource.
119 * Otherwise we could get trapped in an infinite 120 * Otherwise we could get trapped in an infinite
120 * loop, as the tick_periodic() increments jiffies, 121 * loop, as the tick_periodic() increments jiffies,
121 * when then will increment time, posibly causing 122 * which then will increment time, possibly causing
122 * the loop to trigger again and again. 123 * the loop to trigger again and again.
123 */ 124 */
124 if (timekeeping_valid_for_hres()) 125 if (timekeeping_valid_for_hres())
125 tick_periodic(cpu); 126 tick_periodic(cpu);
126 next = ktime_add(next, tick_period);
127 } 127 }
128} 128}
129 129
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 8329669b51ec..7ab92b19965a 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -46,7 +46,7 @@ extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *));
46extern void tick_resume_oneshot(void); 46extern void tick_resume_oneshot(void);
47# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 47# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
48extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); 48extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
49extern void tick_broadcast_oneshot_control(unsigned long reason); 49extern int tick_broadcast_oneshot_control(unsigned long reason);
50extern void tick_broadcast_switch_to_oneshot(void); 50extern void tick_broadcast_switch_to_oneshot(void);
51extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); 51extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
52extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); 52extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
@@ -58,7 +58,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
58{ 58{
59 BUG(); 59 BUG();
60} 60}
61static inline void tick_broadcast_oneshot_control(unsigned long reason) { } 61static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; }
62static inline void tick_broadcast_switch_to_oneshot(void) { } 62static inline void tick_broadcast_switch_to_oneshot(void) { }
63static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } 63static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
64static inline int tick_broadcast_oneshot_active(void) { return 0; } 64static inline int tick_broadcast_oneshot_active(void) { return 0; }
@@ -87,7 +87,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
87{ 87{
88 BUG(); 88 BUG();
89} 89}
90static inline void tick_broadcast_oneshot_control(unsigned long reason) { } 90static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; }
91static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } 91static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
92static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) 92static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
93{ 93{
@@ -111,6 +111,7 @@ extern int tick_resume_broadcast(void);
111extern void tick_broadcast_init(void); 111extern void tick_broadcast_init(void);
112extern void 112extern void
113tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); 113tick_set_periodic_handler(struct clock_event_device *dev, int broadcast);
114int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq);
114 115
115#else /* !BROADCAST */ 116#else /* !BROADCAST */
116 117
@@ -133,6 +134,8 @@ static inline void tick_shutdown_broadcast(unsigned int *cpup) { }
133static inline void tick_suspend_broadcast(void) { } 134static inline void tick_suspend_broadcast(void) { }
134static inline int tick_resume_broadcast(void) { return 0; } 135static inline int tick_resume_broadcast(void) { return 0; }
135static inline void tick_broadcast_init(void) { } 136static inline void tick_broadcast_init(void) { }
137static inline int tick_broadcast_update_freq(struct clock_event_device *dev,
138 u32 freq) { return -ENODEV; }
136 139
137/* 140/*
138 * Set the periodic handler in non broadcast mode 141 * Set the periodic handler in non broadcast mode
@@ -152,6 +155,8 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
152 return !(dev->features & CLOCK_EVT_FEAT_DUMMY); 155 return !(dev->features & CLOCK_EVT_FEAT_DUMMY);
153} 156}
154 157
158int __clockevents_update_freq(struct clock_event_device *dev, u32 freq);
159
155#endif 160#endif
156 161
157extern void do_timer(unsigned long ticks); 162extern void do_timer(unsigned long ticks);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 0aa4ce81bc16..f7df8ea21707 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -22,6 +22,7 @@
22#include <linux/tick.h> 22#include <linux/tick.h>
23#include <linux/stop_machine.h> 23#include <linux/stop_machine.h>
24#include <linux/pvclock_gtod.h> 24#include <linux/pvclock_gtod.h>
25#include <linux/compiler.h>
25 26
26#include "tick-internal.h" 27#include "tick-internal.h"
27#include "ntp_internal.h" 28#include "ntp_internal.h"
@@ -760,7 +761,7 @@ u64 timekeeping_max_deferment(void)
760 * 761 *
761 * XXX - Do be sure to remove it once all arches implement it. 762 * XXX - Do be sure to remove it once all arches implement it.
762 */ 763 */
763void __attribute__((weak)) read_persistent_clock(struct timespec *ts) 764void __weak read_persistent_clock(struct timespec *ts)
764{ 765{
765 ts->tv_sec = 0; 766 ts->tv_sec = 0;
766 ts->tv_nsec = 0; 767 ts->tv_nsec = 0;
@@ -775,7 +776,7 @@ void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
775 * 776 *
776 * XXX - Do be sure to remove it once all arches implement it. 777 * XXX - Do be sure to remove it once all arches implement it.
777 */ 778 */
778void __attribute__((weak)) read_boot_clock(struct timespec *ts) 779void __weak read_boot_clock(struct timespec *ts)
779{ 780{
780 ts->tv_sec = 0; 781 ts->tv_sec = 0;
781 ts->tv_nsec = 0; 782 ts->tv_nsec = 0;
@@ -1435,7 +1436,8 @@ void update_wall_time(void)
1435out: 1436out:
1436 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1437 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1437 if (clock_set) 1438 if (clock_set)
1438 clock_was_set(); 1439 /* Have to call _delayed version, since in irq context*/
1440 clock_was_set_delayed();
1439} 1441}
1440 1442
1441/** 1443/**
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
index 802433a4f5eb..4d54f97558df 100644
--- a/kernel/time/timekeeping_debug.c
+++ b/kernel/time/timekeeping_debug.c
@@ -21,6 +21,8 @@
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/time.h> 22#include <linux/time.h>
23 23
24#include "timekeeping_internal.h"
25
24static unsigned int sleep_time_bin[32] = {0}; 26static unsigned int sleep_time_bin[32] = {0};
25 27
26static int tk_debug_show_sleep_time(struct seq_file *s, void *data) 28static int tk_debug_show_sleep_time(struct seq_file *s, void *data)