diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/alpha/kernel/rtc.c | 8 | ||||
-rw-r--r-- | arch/arm/common/bL_switcher.c | 16 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/time.h | 3 | ||||
-rw-r--r-- | arch/arm/kernel/time.c | 6 | ||||
-rw-r--r-- | arch/arm/mach-omap2/cpuidle44xx.c | 10 | ||||
-rw-r--r-- | arch/arm/mach-tegra/cpuidle-tegra114.c | 6 | ||||
-rw-r--r-- | arch/arm/mach-tegra/cpuidle-tegra20.c | 10 | ||||
-rw-r--r-- | arch/arm/mach-tegra/cpuidle-tegra30.c | 10 | ||||
-rw-r--r-- | arch/arm/plat-omap/counter_32k.c | 20 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso.c | 10 | ||||
-rw-r--r-- | arch/mips/lasat/sysctl.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/time.c | 20 | ||||
-rw-r--r-- | arch/sparc/kernel/time_32.c | 6 | ||||
-rw-r--r-- | arch/tile/kernel/time.c | 24 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/vsyscall_gtod.c | 24 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 14 | ||||
-rw-r--r-- | arch/x86/xen/suspend.c | 11 |
18 files changed, 92 insertions, 123 deletions
diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c index c8d284d8521f..f535a3fd0f60 100644 --- a/arch/alpha/kernel/rtc.c +++ b/arch/alpha/kernel/rtc.c | |||
@@ -116,7 +116,7 @@ alpha_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
116 | } | 116 | } |
117 | 117 | ||
118 | static int | 118 | static int |
119 | alpha_rtc_set_mmss(struct device *dev, unsigned long nowtime) | 119 | alpha_rtc_set_mmss(struct device *dev, time64_t nowtime) |
120 | { | 120 | { |
121 | int retval = 0; | 121 | int retval = 0; |
122 | int real_seconds, real_minutes, cmos_minutes; | 122 | int real_seconds, real_minutes, cmos_minutes; |
@@ -211,7 +211,7 @@ alpha_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
211 | static const struct rtc_class_ops alpha_rtc_ops = { | 211 | static const struct rtc_class_ops alpha_rtc_ops = { |
212 | .read_time = alpha_rtc_read_time, | 212 | .read_time = alpha_rtc_read_time, |
213 | .set_time = alpha_rtc_set_time, | 213 | .set_time = alpha_rtc_set_time, |
214 | .set_mmss = alpha_rtc_set_mmss, | 214 | .set_mmss64 = alpha_rtc_set_mmss, |
215 | .ioctl = alpha_rtc_ioctl, | 215 | .ioctl = alpha_rtc_ioctl, |
216 | }; | 216 | }; |
217 | 217 | ||
@@ -276,7 +276,7 @@ do_remote_mmss(void *data) | |||
276 | } | 276 | } |
277 | 277 | ||
278 | static int | 278 | static int |
279 | remote_set_mmss(struct device *dev, unsigned long now) | 279 | remote_set_mmss(struct device *dev, time64_t now) |
280 | { | 280 | { |
281 | union remote_data x; | 281 | union remote_data x; |
282 | if (smp_processor_id() != boot_cpuid) { | 282 | if (smp_processor_id() != boot_cpuid) { |
@@ -290,7 +290,7 @@ remote_set_mmss(struct device *dev, unsigned long now) | |||
290 | static const struct rtc_class_ops remote_rtc_ops = { | 290 | static const struct rtc_class_ops remote_rtc_ops = { |
291 | .read_time = remote_read_time, | 291 | .read_time = remote_read_time, |
292 | .set_time = remote_set_time, | 292 | .set_time = remote_set_time, |
293 | .set_mmss = remote_set_mmss, | 293 | .set_mmss64 = remote_set_mmss, |
294 | .ioctl = alpha_rtc_ioctl, | 294 | .ioctl = alpha_rtc_ioctl, |
295 | }; | 295 | }; |
296 | #endif | 296 | #endif |
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c index 6eaddc47c43d..37dc0fe1093f 100644 --- a/arch/arm/common/bL_switcher.c +++ b/arch/arm/common/bL_switcher.c | |||
@@ -151,8 +151,6 @@ static int bL_switch_to(unsigned int new_cluster_id) | |||
151 | unsigned int mpidr, this_cpu, that_cpu; | 151 | unsigned int mpidr, this_cpu, that_cpu; |
152 | unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster; | 152 | unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster; |
153 | struct completion inbound_alive; | 153 | struct completion inbound_alive; |
154 | struct tick_device *tdev; | ||
155 | enum clock_event_mode tdev_mode; | ||
156 | long volatile *handshake_ptr; | 154 | long volatile *handshake_ptr; |
157 | int ipi_nr, ret; | 155 | int ipi_nr, ret; |
158 | 156 | ||
@@ -219,13 +217,7 @@ static int bL_switch_to(unsigned int new_cluster_id) | |||
219 | /* redirect GIC's SGIs to our counterpart */ | 217 | /* redirect GIC's SGIs to our counterpart */ |
220 | gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); | 218 | gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); |
221 | 219 | ||
222 | tdev = tick_get_device(this_cpu); | 220 | tick_suspend_local(); |
223 | if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu))) | ||
224 | tdev = NULL; | ||
225 | if (tdev) { | ||
226 | tdev_mode = tdev->evtdev->mode; | ||
227 | clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN); | ||
228 | } | ||
229 | 221 | ||
230 | ret = cpu_pm_enter(); | 222 | ret = cpu_pm_enter(); |
231 | 223 | ||
@@ -251,11 +243,7 @@ static int bL_switch_to(unsigned int new_cluster_id) | |||
251 | 243 | ||
252 | ret = cpu_pm_exit(); | 244 | ret = cpu_pm_exit(); |
253 | 245 | ||
254 | if (tdev) { | 246 | tick_resume_local(); |
255 | clockevents_set_mode(tdev->evtdev, tdev_mode); | ||
256 | clockevents_program_event(tdev->evtdev, | ||
257 | tdev->evtdev->next_event, 1); | ||
258 | } | ||
259 | 247 | ||
260 | trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr); | 248 | trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr); |
261 | local_fiq_enable(); | 249 | local_fiq_enable(); |
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h index 90c12e1e695c..0f79e4dec7f9 100644 --- a/arch/arm/include/asm/mach/time.h +++ b/arch/arm/include/asm/mach/time.h | |||
@@ -12,8 +12,7 @@ | |||
12 | 12 | ||
13 | extern void timer_tick(void); | 13 | extern void timer_tick(void); |
14 | 14 | ||
15 | struct timespec; | 15 | typedef void (*clock_access_fn)(struct timespec64 *); |
16 | typedef void (*clock_access_fn)(struct timespec *); | ||
17 | extern int register_persistent_clock(clock_access_fn read_boot, | 16 | extern int register_persistent_clock(clock_access_fn read_boot, |
18 | clock_access_fn read_persistent); | 17 | clock_access_fn read_persistent); |
19 | 18 | ||
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 0cc7e58c47cc..a66e37e211a9 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c | |||
@@ -76,7 +76,7 @@ void timer_tick(void) | |||
76 | } | 76 | } |
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | static void dummy_clock_access(struct timespec *ts) | 79 | static void dummy_clock_access(struct timespec64 *ts) |
80 | { | 80 | { |
81 | ts->tv_sec = 0; | 81 | ts->tv_sec = 0; |
82 | ts->tv_nsec = 0; | 82 | ts->tv_nsec = 0; |
@@ -85,12 +85,12 @@ static void dummy_clock_access(struct timespec *ts) | |||
85 | static clock_access_fn __read_persistent_clock = dummy_clock_access; | 85 | static clock_access_fn __read_persistent_clock = dummy_clock_access; |
86 | static clock_access_fn __read_boot_clock = dummy_clock_access;; | 86 | static clock_access_fn __read_boot_clock = dummy_clock_access;; |
87 | 87 | ||
88 | void read_persistent_clock(struct timespec *ts) | 88 | void read_persistent_clock64(struct timespec64 *ts) |
89 | { | 89 | { |
90 | __read_persistent_clock(ts); | 90 | __read_persistent_clock(ts); |
91 | } | 91 | } |
92 | 92 | ||
93 | void read_boot_clock(struct timespec *ts) | 93 | void read_boot_clock64(struct timespec64 *ts) |
94 | { | 94 | { |
95 | __read_boot_clock(ts); | 95 | __read_boot_clock(ts); |
96 | } | 96 | } |
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index 01e398a868bc..57d429830e09 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/cpuidle.h> | 14 | #include <linux/cpuidle.h> |
15 | #include <linux/cpu_pm.h> | 15 | #include <linux/cpu_pm.h> |
16 | #include <linux/export.h> | 16 | #include <linux/export.h> |
17 | #include <linux/clockchips.h> | 17 | #include <linux/tick.h> |
18 | 18 | ||
19 | #include <asm/cpuidle.h> | 19 | #include <asm/cpuidle.h> |
20 | #include <asm/proc-fns.h> | 20 | #include <asm/proc-fns.h> |
@@ -84,7 +84,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, | |||
84 | { | 84 | { |
85 | struct idle_statedata *cx = state_ptr + index; | 85 | struct idle_statedata *cx = state_ptr + index; |
86 | u32 mpuss_can_lose_context = 0; | 86 | u32 mpuss_can_lose_context = 0; |
87 | int cpu_id = smp_processor_id(); | ||
88 | 87 | ||
89 | /* | 88 | /* |
90 | * CPU0 has to wait and stay ON until CPU1 is OFF state. | 89 | * CPU0 has to wait and stay ON until CPU1 is OFF state. |
@@ -112,7 +111,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, | |||
112 | mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && | 111 | mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && |
113 | (cx->mpu_logic_state == PWRDM_POWER_OFF); | 112 | (cx->mpu_logic_state == PWRDM_POWER_OFF); |
114 | 113 | ||
115 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); | 114 | tick_broadcast_enter(); |
116 | 115 | ||
117 | /* | 116 | /* |
118 | * Call idle CPU PM enter notifier chain so that | 117 | * Call idle CPU PM enter notifier chain so that |
@@ -169,7 +168,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, | |||
169 | if (dev->cpu == 0 && mpuss_can_lose_context) | 168 | if (dev->cpu == 0 && mpuss_can_lose_context) |
170 | cpu_cluster_pm_exit(); | 169 | cpu_cluster_pm_exit(); |
171 | 170 | ||
172 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); | 171 | tick_broadcast_exit(); |
173 | 172 | ||
174 | fail: | 173 | fail: |
175 | cpuidle_coupled_parallel_barrier(dev, &abort_barrier); | 174 | cpuidle_coupled_parallel_barrier(dev, &abort_barrier); |
@@ -184,8 +183,7 @@ fail: | |||
184 | */ | 183 | */ |
185 | static void omap_setup_broadcast_timer(void *arg) | 184 | static void omap_setup_broadcast_timer(void *arg) |
186 | { | 185 | { |
187 | int cpu = smp_processor_id(); | 186 | tick_broadcast_enable(); |
188 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu); | ||
189 | } | 187 | } |
190 | 188 | ||
191 | static struct cpuidle_driver omap4_idle_driver = { | 189 | static struct cpuidle_driver omap4_idle_driver = { |
diff --git a/arch/arm/mach-tegra/cpuidle-tegra114.c b/arch/arm/mach-tegra/cpuidle-tegra114.c index f2b586d7b15d..155807fa6fdd 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra114.c +++ b/arch/arm/mach-tegra/cpuidle-tegra114.c | |||
@@ -15,7 +15,7 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <asm/firmware.h> | 17 | #include <asm/firmware.h> |
18 | #include <linux/clockchips.h> | 18 | #include <linux/tick.h> |
19 | #include <linux/cpuidle.h> | 19 | #include <linux/cpuidle.h> |
20 | #include <linux/cpu_pm.h> | 20 | #include <linux/cpu_pm.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
@@ -44,7 +44,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev, | |||
44 | tegra_set_cpu_in_lp2(); | 44 | tegra_set_cpu_in_lp2(); |
45 | cpu_pm_enter(); | 45 | cpu_pm_enter(); |
46 | 46 | ||
47 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); | 47 | tick_broadcast_enter(); |
48 | 48 | ||
49 | call_firmware_op(prepare_idle); | 49 | call_firmware_op(prepare_idle); |
50 | 50 | ||
@@ -52,7 +52,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev, | |||
52 | if (call_firmware_op(do_idle, 0) == -ENOSYS) | 52 | if (call_firmware_op(do_idle, 0) == -ENOSYS) |
53 | cpu_suspend(0, tegra30_sleep_cpu_secondary_finish); | 53 | cpu_suspend(0, tegra30_sleep_cpu_secondary_finish); |
54 | 54 | ||
55 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); | 55 | tick_broadcast_exit(); |
56 | 56 | ||
57 | cpu_pm_exit(); | 57 | cpu_pm_exit(); |
58 | tegra_clear_cpu_in_lp2(); | 58 | tegra_clear_cpu_in_lp2(); |
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c index 4f25a7c7ca0f..48844ae6c3a1 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra20.c +++ b/arch/arm/mach-tegra/cpuidle-tegra20.c | |||
@@ -20,7 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/clk/tegra.h> | 22 | #include <linux/clk/tegra.h> |
23 | #include <linux/clockchips.h> | 23 | #include <linux/tick.h> |
24 | #include <linux/cpuidle.h> | 24 | #include <linux/cpuidle.h> |
25 | #include <linux/cpu_pm.h> | 25 | #include <linux/cpu_pm.h> |
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
@@ -136,11 +136,11 @@ static bool tegra20_cpu_cluster_power_down(struct cpuidle_device *dev, | |||
136 | if (tegra20_reset_cpu_1() || !tegra_cpu_rail_off_ready()) | 136 | if (tegra20_reset_cpu_1() || !tegra_cpu_rail_off_ready()) |
137 | return false; | 137 | return false; |
138 | 138 | ||
139 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); | 139 | tick_broadcast_enter(); |
140 | 140 | ||
141 | tegra_idle_lp2_last(); | 141 | tegra_idle_lp2_last(); |
142 | 142 | ||
143 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); | 143 | tick_broadcast_exit(); |
144 | 144 | ||
145 | if (cpu_online(1)) | 145 | if (cpu_online(1)) |
146 | tegra20_wake_cpu1_from_reset(); | 146 | tegra20_wake_cpu1_from_reset(); |
@@ -153,13 +153,13 @@ static bool tegra20_idle_enter_lp2_cpu_1(struct cpuidle_device *dev, | |||
153 | struct cpuidle_driver *drv, | 153 | struct cpuidle_driver *drv, |
154 | int index) | 154 | int index) |
155 | { | 155 | { |
156 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); | 156 | tick_broadcast_enter(); |
157 | 157 | ||
158 | cpu_suspend(0, tegra20_sleep_cpu_secondary_finish); | 158 | cpu_suspend(0, tegra20_sleep_cpu_secondary_finish); |
159 | 159 | ||
160 | tegra20_cpu_clear_resettable(); | 160 | tegra20_cpu_clear_resettable(); |
161 | 161 | ||
162 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); | 162 | tick_broadcast_exit(); |
163 | 163 | ||
164 | return true; | 164 | return true; |
165 | } | 165 | } |
diff --git a/arch/arm/mach-tegra/cpuidle-tegra30.c b/arch/arm/mach-tegra/cpuidle-tegra30.c index f8815ed65d9d..84d809a3cba3 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra30.c +++ b/arch/arm/mach-tegra/cpuidle-tegra30.c | |||
@@ -20,7 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/clk/tegra.h> | 22 | #include <linux/clk/tegra.h> |
23 | #include <linux/clockchips.h> | 23 | #include <linux/tick.h> |
24 | #include <linux/cpuidle.h> | 24 | #include <linux/cpuidle.h> |
25 | #include <linux/cpu_pm.h> | 25 | #include <linux/cpu_pm.h> |
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
@@ -76,11 +76,11 @@ static bool tegra30_cpu_cluster_power_down(struct cpuidle_device *dev, | |||
76 | return false; | 76 | return false; |
77 | } | 77 | } |
78 | 78 | ||
79 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); | 79 | tick_broadcast_enter(); |
80 | 80 | ||
81 | tegra_idle_lp2_last(); | 81 | tegra_idle_lp2_last(); |
82 | 82 | ||
83 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); | 83 | tick_broadcast_exit(); |
84 | 84 | ||
85 | return true; | 85 | return true; |
86 | } | 86 | } |
@@ -90,13 +90,13 @@ static bool tegra30_cpu_core_power_down(struct cpuidle_device *dev, | |||
90 | struct cpuidle_driver *drv, | 90 | struct cpuidle_driver *drv, |
91 | int index) | 91 | int index) |
92 | { | 92 | { |
93 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); | 93 | tick_broadcast_enter(); |
94 | 94 | ||
95 | smp_wmb(); | 95 | smp_wmb(); |
96 | 96 | ||
97 | cpu_suspend(0, tegra30_sleep_cpu_secondary_finish); | 97 | cpu_suspend(0, tegra30_sleep_cpu_secondary_finish); |
98 | 98 | ||
99 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); | 99 | tick_broadcast_exit(); |
100 | 100 | ||
101 | return true; | 101 | return true; |
102 | } | 102 | } |
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c index 61b4d705c267..2438b96004c1 100644 --- a/arch/arm/plat-omap/counter_32k.c +++ b/arch/arm/plat-omap/counter_32k.c | |||
@@ -44,24 +44,20 @@ static u64 notrace omap_32k_read_sched_clock(void) | |||
44 | } | 44 | } |
45 | 45 | ||
46 | /** | 46 | /** |
47 | * omap_read_persistent_clock - Return time from a persistent clock. | 47 | * omap_read_persistent_clock64 - Return time from a persistent clock. |
48 | * | 48 | * |
49 | * Reads the time from a source which isn't disabled during PM, the | 49 | * Reads the time from a source which isn't disabled during PM, the |
50 | * 32k sync timer. Convert the cycles elapsed since last read into | 50 | * 32k sync timer. Convert the cycles elapsed since last read into |
51 | * nsecs and adds to a monotonically increasing timespec. | 51 | * nsecs and adds to a monotonically increasing timespec64. |
52 | */ | 52 | */ |
53 | static struct timespec persistent_ts; | 53 | static struct timespec64 persistent_ts; |
54 | static cycles_t cycles; | 54 | static cycles_t cycles; |
55 | static unsigned int persistent_mult, persistent_shift; | 55 | static unsigned int persistent_mult, persistent_shift; |
56 | static DEFINE_SPINLOCK(read_persistent_clock_lock); | ||
57 | 56 | ||
58 | static void omap_read_persistent_clock(struct timespec *ts) | 57 | static void omap_read_persistent_clock64(struct timespec64 *ts) |
59 | { | 58 | { |
60 | unsigned long long nsecs; | 59 | unsigned long long nsecs; |
61 | cycles_t last_cycles; | 60 | cycles_t last_cycles; |
62 | unsigned long flags; | ||
63 | |||
64 | spin_lock_irqsave(&read_persistent_clock_lock, flags); | ||
65 | 61 | ||
66 | last_cycles = cycles; | 62 | last_cycles = cycles; |
67 | cycles = sync32k_cnt_reg ? readl_relaxed(sync32k_cnt_reg) : 0; | 63 | cycles = sync32k_cnt_reg ? readl_relaxed(sync32k_cnt_reg) : 0; |
@@ -69,11 +65,9 @@ static void omap_read_persistent_clock(struct timespec *ts) | |||
69 | nsecs = clocksource_cyc2ns(cycles - last_cycles, | 65 | nsecs = clocksource_cyc2ns(cycles - last_cycles, |
70 | persistent_mult, persistent_shift); | 66 | persistent_mult, persistent_shift); |
71 | 67 | ||
72 | timespec_add_ns(&persistent_ts, nsecs); | 68 | timespec64_add_ns(&persistent_ts, nsecs); |
73 | 69 | ||
74 | *ts = persistent_ts; | 70 | *ts = persistent_ts; |
75 | |||
76 | spin_unlock_irqrestore(&read_persistent_clock_lock, flags); | ||
77 | } | 71 | } |
78 | 72 | ||
79 | /** | 73 | /** |
@@ -103,7 +97,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase) | |||
103 | 97 | ||
104 | /* | 98 | /* |
105 | * 120000 rough estimate from the calculations in | 99 | * 120000 rough estimate from the calculations in |
106 | * __clocksource_updatefreq_scale. | 100 | * __clocksource_update_freq_scale. |
107 | */ | 101 | */ |
108 | clocks_calc_mult_shift(&persistent_mult, &persistent_shift, | 102 | clocks_calc_mult_shift(&persistent_mult, &persistent_shift, |
109 | 32768, NSEC_PER_SEC, 120000); | 103 | 32768, NSEC_PER_SEC, 120000); |
@@ -116,7 +110,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase) | |||
116 | } | 110 | } |
117 | 111 | ||
118 | sched_clock_register(omap_32k_read_sched_clock, 32, 32768); | 112 | sched_clock_register(omap_32k_read_sched_clock, 32, 32768); |
119 | register_persistent_clock(NULL, omap_read_persistent_clock); | 113 | register_persistent_clock(NULL, omap_read_persistent_clock64); |
120 | pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n"); | 114 | pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n"); |
121 | 115 | ||
122 | return 0; | 116 | return 0; |
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 32aeea083d93..ec37ab3f524f 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c | |||
@@ -200,7 +200,7 @@ up_fail: | |||
200 | void update_vsyscall(struct timekeeper *tk) | 200 | void update_vsyscall(struct timekeeper *tk) |
201 | { | 201 | { |
202 | struct timespec xtime_coarse; | 202 | struct timespec xtime_coarse; |
203 | u32 use_syscall = strcmp(tk->tkr.clock->name, "arch_sys_counter"); | 203 | u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter"); |
204 | 204 | ||
205 | ++vdso_data->tb_seq_count; | 205 | ++vdso_data->tb_seq_count; |
206 | smp_wmb(); | 206 | smp_wmb(); |
@@ -213,11 +213,11 @@ void update_vsyscall(struct timekeeper *tk) | |||
213 | vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; | 213 | vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; |
214 | 214 | ||
215 | if (!use_syscall) { | 215 | if (!use_syscall) { |
216 | vdso_data->cs_cycle_last = tk->tkr.cycle_last; | 216 | vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; |
217 | vdso_data->xtime_clock_sec = tk->xtime_sec; | 217 | vdso_data->xtime_clock_sec = tk->xtime_sec; |
218 | vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; | 218 | vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; |
219 | vdso_data->cs_mult = tk->tkr.mult; | 219 | vdso_data->cs_mult = tk->tkr_mono.mult; |
220 | vdso_data->cs_shift = tk->tkr.shift; | 220 | vdso_data->cs_shift = tk->tkr_mono.shift; |
221 | } | 221 | } |
222 | 222 | ||
223 | smp_wmb(); | 223 | smp_wmb(); |
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c index 3b7f65cc4218..cf9b4633257e 100644 --- a/arch/mips/lasat/sysctl.c +++ b/arch/mips/lasat/sysctl.c | |||
@@ -75,11 +75,11 @@ static int rtctmp; | |||
75 | int proc_dolasatrtc(struct ctl_table *table, int write, | 75 | int proc_dolasatrtc(struct ctl_table *table, int write, |
76 | void *buffer, size_t *lenp, loff_t *ppos) | 76 | void *buffer, size_t *lenp, loff_t *ppos) |
77 | { | 77 | { |
78 | struct timespec ts; | 78 | struct timespec64 ts; |
79 | int r; | 79 | int r; |
80 | 80 | ||
81 | if (!write) { | 81 | if (!write) { |
82 | read_persistent_clock(&ts); | 82 | read_persistent_clock64(&ts); |
83 | rtctmp = ts.tv_sec; | 83 | rtctmp = ts.tv_sec; |
84 | /* check for time < 0 and set to 0 */ | 84 | /* check for time < 0 and set to 0 */ |
85 | if (rtctmp < 0) | 85 | if (rtctmp < 0) |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 20660dddb2d6..170ddd2018b3 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -215,20 +215,20 @@ void update_vsyscall(struct timekeeper *tk) | |||
215 | { | 215 | { |
216 | u64 nsecps; | 216 | u64 nsecps; |
217 | 217 | ||
218 | if (tk->tkr.clock != &clocksource_tod) | 218 | if (tk->tkr_mono.clock != &clocksource_tod) |
219 | return; | 219 | return; |
220 | 220 | ||
221 | /* Make userspace gettimeofday spin until we're done. */ | 221 | /* Make userspace gettimeofday spin until we're done. */ |
222 | ++vdso_data->tb_update_count; | 222 | ++vdso_data->tb_update_count; |
223 | smp_wmb(); | 223 | smp_wmb(); |
224 | vdso_data->xtime_tod_stamp = tk->tkr.cycle_last; | 224 | vdso_data->xtime_tod_stamp = tk->tkr_mono.cycle_last; |
225 | vdso_data->xtime_clock_sec = tk->xtime_sec; | 225 | vdso_data->xtime_clock_sec = tk->xtime_sec; |
226 | vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; | 226 | vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; |
227 | vdso_data->wtom_clock_sec = | 227 | vdso_data->wtom_clock_sec = |
228 | tk->xtime_sec + tk->wall_to_monotonic.tv_sec; | 228 | tk->xtime_sec + tk->wall_to_monotonic.tv_sec; |
229 | vdso_data->wtom_clock_nsec = tk->tkr.xtime_nsec + | 229 | vdso_data->wtom_clock_nsec = tk->tkr_mono.xtime_nsec + |
230 | + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr.shift); | 230 | + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift); |
231 | nsecps = (u64) NSEC_PER_SEC << tk->tkr.shift; | 231 | nsecps = (u64) NSEC_PER_SEC << tk->tkr_mono.shift; |
232 | while (vdso_data->wtom_clock_nsec >= nsecps) { | 232 | while (vdso_data->wtom_clock_nsec >= nsecps) { |
233 | vdso_data->wtom_clock_nsec -= nsecps; | 233 | vdso_data->wtom_clock_nsec -= nsecps; |
234 | vdso_data->wtom_clock_sec++; | 234 | vdso_data->wtom_clock_sec++; |
@@ -236,7 +236,7 @@ void update_vsyscall(struct timekeeper *tk) | |||
236 | 236 | ||
237 | vdso_data->xtime_coarse_sec = tk->xtime_sec; | 237 | vdso_data->xtime_coarse_sec = tk->xtime_sec; |
238 | vdso_data->xtime_coarse_nsec = | 238 | vdso_data->xtime_coarse_nsec = |
239 | (long)(tk->tkr.xtime_nsec >> tk->tkr.shift); | 239 | (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); |
240 | vdso_data->wtom_coarse_sec = | 240 | vdso_data->wtom_coarse_sec = |
241 | vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec; | 241 | vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec; |
242 | vdso_data->wtom_coarse_nsec = | 242 | vdso_data->wtom_coarse_nsec = |
@@ -246,8 +246,8 @@ void update_vsyscall(struct timekeeper *tk) | |||
246 | vdso_data->wtom_coarse_sec++; | 246 | vdso_data->wtom_coarse_sec++; |
247 | } | 247 | } |
248 | 248 | ||
249 | vdso_data->tk_mult = tk->tkr.mult; | 249 | vdso_data->tk_mult = tk->tkr_mono.mult; |
250 | vdso_data->tk_shift = tk->tkr.shift; | 250 | vdso_data->tk_shift = tk->tkr_mono.shift; |
251 | smp_wmb(); | 251 | smp_wmb(); |
252 | ++vdso_data->tb_update_count; | 252 | ++vdso_data->tb_update_count; |
253 | } | 253 | } |
@@ -283,7 +283,7 @@ void __init time_init(void) | |||
283 | if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt)) | 283 | if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt)) |
284 | panic("Couldn't request external interrupt 0x1406"); | 284 | panic("Couldn't request external interrupt 0x1406"); |
285 | 285 | ||
286 | if (clocksource_register(&clocksource_tod) != 0) | 286 | if (__clocksource_register(&clocksource_tod) != 0) |
287 | panic("Could not register TOD clock source"); | 287 | panic("Could not register TOD clock source"); |
288 | 288 | ||
289 | /* Enable TOD clock interrupts on the boot cpu. */ | 289 | /* Enable TOD clock interrupts on the boot cpu. */ |
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 2f80d23a0a44..18147a5523d9 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c | |||
@@ -181,17 +181,13 @@ static struct clocksource timer_cs = { | |||
181 | .rating = 100, | 181 | .rating = 100, |
182 | .read = timer_cs_read, | 182 | .read = timer_cs_read, |
183 | .mask = CLOCKSOURCE_MASK(64), | 183 | .mask = CLOCKSOURCE_MASK(64), |
184 | .shift = 2, | ||
185 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 184 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
186 | }; | 185 | }; |
187 | 186 | ||
188 | static __init int setup_timer_cs(void) | 187 | static __init int setup_timer_cs(void) |
189 | { | 188 | { |
190 | timer_cs_enabled = 1; | 189 | timer_cs_enabled = 1; |
191 | timer_cs.mult = clocksource_hz2mult(sparc_config.clock_rate, | 190 | return clocksource_register_hz(&timer_cs, sparc_config.clock_rate); |
192 | timer_cs.shift); | ||
193 | |||
194 | return clocksource_register(&timer_cs); | ||
195 | } | 191 | } |
196 | 192 | ||
197 | #ifdef CONFIG_SMP | 193 | #ifdef CONFIG_SMP |
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index d412b0856c0a..00178ecf9aea 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c | |||
@@ -257,34 +257,34 @@ void update_vsyscall_tz(void) | |||
257 | 257 | ||
258 | void update_vsyscall(struct timekeeper *tk) | 258 | void update_vsyscall(struct timekeeper *tk) |
259 | { | 259 | { |
260 | if (tk->tkr.clock != &cycle_counter_cs) | 260 | if (tk->tkr_mono.clock != &cycle_counter_cs) |
261 | return; | 261 | return; |
262 | 262 | ||
263 | write_seqcount_begin(&vdso_data->tb_seq); | 263 | write_seqcount_begin(&vdso_data->tb_seq); |
264 | 264 | ||
265 | vdso_data->cycle_last = tk->tkr.cycle_last; | 265 | vdso_data->cycle_last = tk->tkr_mono.cycle_last; |
266 | vdso_data->mask = tk->tkr.mask; | 266 | vdso_data->mask = tk->tkr_mono.mask; |
267 | vdso_data->mult = tk->tkr.mult; | 267 | vdso_data->mult = tk->tkr_mono.mult; |
268 | vdso_data->shift = tk->tkr.shift; | 268 | vdso_data->shift = tk->tkr_mono.shift; |
269 | 269 | ||
270 | vdso_data->wall_time_sec = tk->xtime_sec; | 270 | vdso_data->wall_time_sec = tk->xtime_sec; |
271 | vdso_data->wall_time_snsec = tk->tkr.xtime_nsec; | 271 | vdso_data->wall_time_snsec = tk->tkr_mono.xtime_nsec; |
272 | 272 | ||
273 | vdso_data->monotonic_time_sec = tk->xtime_sec | 273 | vdso_data->monotonic_time_sec = tk->xtime_sec |
274 | + tk->wall_to_monotonic.tv_sec; | 274 | + tk->wall_to_monotonic.tv_sec; |
275 | vdso_data->monotonic_time_snsec = tk->tkr.xtime_nsec | 275 | vdso_data->monotonic_time_snsec = tk->tkr_mono.xtime_nsec |
276 | + ((u64)tk->wall_to_monotonic.tv_nsec | 276 | + ((u64)tk->wall_to_monotonic.tv_nsec |
277 | << tk->tkr.shift); | 277 | << tk->tkr_mono.shift); |
278 | while (vdso_data->monotonic_time_snsec >= | 278 | while (vdso_data->monotonic_time_snsec >= |
279 | (((u64)NSEC_PER_SEC) << tk->tkr.shift)) { | 279 | (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { |
280 | vdso_data->monotonic_time_snsec -= | 280 | vdso_data->monotonic_time_snsec -= |
281 | ((u64)NSEC_PER_SEC) << tk->tkr.shift; | 281 | ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift; |
282 | vdso_data->monotonic_time_sec++; | 282 | vdso_data->monotonic_time_sec++; |
283 | } | 283 | } |
284 | 284 | ||
285 | vdso_data->wall_time_coarse_sec = tk->xtime_sec; | 285 | vdso_data->wall_time_coarse_sec = tk->xtime_sec; |
286 | vdso_data->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >> | 286 | vdso_data->wall_time_coarse_nsec = (long)(tk->tkr_mono.xtime_nsec >> |
287 | tk->tkr.shift); | 287 | tk->tkr_mono.shift); |
288 | 288 | ||
289 | vdso_data->monotonic_time_coarse_sec = | 289 | vdso_data->monotonic_time_coarse_sec = |
290 | vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; | 290 | vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index a388bb883128..7af7b6478637 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/pm.h> | 11 | #include <linux/pm.h> |
12 | #include <linux/clockchips.h> | 12 | #include <linux/tick.h> |
13 | #include <linux/random.h> | 13 | #include <linux/random.h> |
14 | #include <linux/user-return-notifier.h> | 14 | #include <linux/user-return-notifier.h> |
15 | #include <linux/dmi.h> | 15 | #include <linux/dmi.h> |
@@ -378,14 +378,11 @@ static void amd_e400_idle(void) | |||
378 | 378 | ||
379 | if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) { | 379 | if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) { |
380 | cpumask_set_cpu(cpu, amd_e400_c1e_mask); | 380 | cpumask_set_cpu(cpu, amd_e400_c1e_mask); |
381 | /* | 381 | /* Force broadcast so ACPI can not interfere. */ |
382 | * Force broadcast so ACPI can not interfere. | 382 | tick_broadcast_force(); |
383 | */ | ||
384 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, | ||
385 | &cpu); | ||
386 | pr_info("Switch to broadcast mode on CPU%d\n", cpu); | 383 | pr_info("Switch to broadcast mode on CPU%d\n", cpu); |
387 | } | 384 | } |
388 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); | 385 | tick_broadcast_enter(); |
389 | 386 | ||
390 | default_idle(); | 387 | default_idle(); |
391 | 388 | ||
@@ -394,7 +391,7 @@ static void amd_e400_idle(void) | |||
394 | * called with interrupts disabled. | 391 | * called with interrupts disabled. |
395 | */ | 392 | */ |
396 | local_irq_disable(); | 393 | local_irq_disable(); |
397 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); | 394 | tick_broadcast_exit(); |
398 | local_irq_enable(); | 395 | local_irq_enable(); |
399 | } else | 396 | } else |
400 | default_idle(); | 397 | default_idle(); |
diff --git a/arch/x86/kernel/vsyscall_gtod.c b/arch/x86/kernel/vsyscall_gtod.c index c7d791f32b98..51e330416995 100644 --- a/arch/x86/kernel/vsyscall_gtod.c +++ b/arch/x86/kernel/vsyscall_gtod.c | |||
@@ -31,30 +31,30 @@ void update_vsyscall(struct timekeeper *tk) | |||
31 | gtod_write_begin(vdata); | 31 | gtod_write_begin(vdata); |
32 | 32 | ||
33 | /* copy vsyscall data */ | 33 | /* copy vsyscall data */ |
34 | vdata->vclock_mode = tk->tkr.clock->archdata.vclock_mode; | 34 | vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; |
35 | vdata->cycle_last = tk->tkr.cycle_last; | 35 | vdata->cycle_last = tk->tkr_mono.cycle_last; |
36 | vdata->mask = tk->tkr.mask; | 36 | vdata->mask = tk->tkr_mono.mask; |
37 | vdata->mult = tk->tkr.mult; | 37 | vdata->mult = tk->tkr_mono.mult; |
38 | vdata->shift = tk->tkr.shift; | 38 | vdata->shift = tk->tkr_mono.shift; |
39 | 39 | ||
40 | vdata->wall_time_sec = tk->xtime_sec; | 40 | vdata->wall_time_sec = tk->xtime_sec; |
41 | vdata->wall_time_snsec = tk->tkr.xtime_nsec; | 41 | vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec; |
42 | 42 | ||
43 | vdata->monotonic_time_sec = tk->xtime_sec | 43 | vdata->monotonic_time_sec = tk->xtime_sec |
44 | + tk->wall_to_monotonic.tv_sec; | 44 | + tk->wall_to_monotonic.tv_sec; |
45 | vdata->monotonic_time_snsec = tk->tkr.xtime_nsec | 45 | vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec |
46 | + ((u64)tk->wall_to_monotonic.tv_nsec | 46 | + ((u64)tk->wall_to_monotonic.tv_nsec |
47 | << tk->tkr.shift); | 47 | << tk->tkr_mono.shift); |
48 | while (vdata->monotonic_time_snsec >= | 48 | while (vdata->monotonic_time_snsec >= |
49 | (((u64)NSEC_PER_SEC) << tk->tkr.shift)) { | 49 | (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { |
50 | vdata->monotonic_time_snsec -= | 50 | vdata->monotonic_time_snsec -= |
51 | ((u64)NSEC_PER_SEC) << tk->tkr.shift; | 51 | ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift; |
52 | vdata->monotonic_time_sec++; | 52 | vdata->monotonic_time_sec++; |
53 | } | 53 | } |
54 | 54 | ||
55 | vdata->wall_time_coarse_sec = tk->xtime_sec; | 55 | vdata->wall_time_coarse_sec = tk->xtime_sec; |
56 | vdata->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >> | 56 | vdata->wall_time_coarse_nsec = (long)(tk->tkr_mono.xtime_nsec >> |
57 | tk->tkr.shift); | 57 | tk->tkr_mono.shift); |
58 | 58 | ||
59 | vdata->monotonic_time_coarse_sec = | 59 | vdata->monotonic_time_coarse_sec = |
60 | vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; | 60 | vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2b2dd030ea3b..e1a81267f3f6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1081,19 +1081,19 @@ static void update_pvclock_gtod(struct timekeeper *tk) | |||
1081 | struct pvclock_gtod_data *vdata = &pvclock_gtod_data; | 1081 | struct pvclock_gtod_data *vdata = &pvclock_gtod_data; |
1082 | u64 boot_ns; | 1082 | u64 boot_ns; |
1083 | 1083 | ||
1084 | boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot)); | 1084 | boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot)); |
1085 | 1085 | ||
1086 | write_seqcount_begin(&vdata->seq); | 1086 | write_seqcount_begin(&vdata->seq); |
1087 | 1087 | ||
1088 | /* copy pvclock gtod data */ | 1088 | /* copy pvclock gtod data */ |
1089 | vdata->clock.vclock_mode = tk->tkr.clock->archdata.vclock_mode; | 1089 | vdata->clock.vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; |
1090 | vdata->clock.cycle_last = tk->tkr.cycle_last; | 1090 | vdata->clock.cycle_last = tk->tkr_mono.cycle_last; |
1091 | vdata->clock.mask = tk->tkr.mask; | 1091 | vdata->clock.mask = tk->tkr_mono.mask; |
1092 | vdata->clock.mult = tk->tkr.mult; | 1092 | vdata->clock.mult = tk->tkr_mono.mult; |
1093 | vdata->clock.shift = tk->tkr.shift; | 1093 | vdata->clock.shift = tk->tkr_mono.shift; |
1094 | 1094 | ||
1095 | vdata->boot_ns = boot_ns; | 1095 | vdata->boot_ns = boot_ns; |
1096 | vdata->nsec_base = tk->tkr.xtime_nsec; | 1096 | vdata->nsec_base = tk->tkr_mono.xtime_nsec; |
1097 | 1097 | ||
1098 | write_seqcount_end(&vdata->seq); | 1098 | write_seqcount_end(&vdata->seq); |
1099 | } | 1099 | } |
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index c4df9dbd63b7..d9497698645a 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include <linux/types.h> | 1 | #include <linux/types.h> |
2 | #include <linux/clockchips.h> | 2 | #include <linux/tick.h> |
3 | 3 | ||
4 | #include <xen/interface/xen.h> | 4 | #include <xen/interface/xen.h> |
5 | #include <xen/grant_table.h> | 5 | #include <xen/grant_table.h> |
@@ -81,17 +81,14 @@ void xen_arch_post_suspend(int cancelled) | |||
81 | 81 | ||
82 | static void xen_vcpu_notify_restore(void *data) | 82 | static void xen_vcpu_notify_restore(void *data) |
83 | { | 83 | { |
84 | unsigned long reason = (unsigned long)data; | ||
85 | |||
86 | /* Boot processor notified via generic timekeeping_resume() */ | 84 | /* Boot processor notified via generic timekeeping_resume() */ |
87 | if ( smp_processor_id() == 0) | 85 | if (smp_processor_id() == 0) |
88 | return; | 86 | return; |
89 | 87 | ||
90 | clockevents_notify(reason, NULL); | 88 | tick_resume_local(); |
91 | } | 89 | } |
92 | 90 | ||
93 | void xen_arch_resume(void) | 91 | void xen_arch_resume(void) |
94 | { | 92 | { |
95 | on_each_cpu(xen_vcpu_notify_restore, | 93 | on_each_cpu(xen_vcpu_notify_restore, NULL, 1); |
96 | (void *)CLOCK_EVT_NOTIFY_RESUME, 1); | ||
97 | } | 94 | } |