aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/clockchips.h142
-rw-r--r--include/linux/hrtimer.h5
-rw-r--r--kernel/hrtimer.c13
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/clockevents.c345
-rw-r--r--kernel/timer.c4
6 files changed, 497 insertions, 14 deletions
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
new file mode 100644
index 000000000000..4ea7e7bcfafe
--- /dev/null
+++ b/include/linux/clockchips.h
@@ -0,0 +1,142 @@
1/* linux/include/linux/clockchips.h
2 *
3 * This file contains the structure definitions for clockchips.
4 *
5 * If you are not a clockchip, or the time of day code, you should
6 * not be including this file!
7 */
8#ifndef _LINUX_CLOCKCHIPS_H
9#define _LINUX_CLOCKCHIPS_H
10
11#ifdef CONFIG_GENERIC_CLOCKEVENTS
12
13#include <linux/clocksource.h>
14#include <linux/cpumask.h>
15#include <linux/ktime.h>
16#include <linux/notifier.h>
17
18struct clock_event_device;
19
20/* Clock event mode commands */
21enum clock_event_mode {
22 CLOCK_EVT_MODE_UNUSED = 0,
23 CLOCK_EVT_MODE_SHUTDOWN,
24 CLOCK_EVT_MODE_PERIODIC,
25 CLOCK_EVT_MODE_ONESHOT,
26};
27
28/* Clock event notification values */
29enum clock_event_nofitiers {
30 CLOCK_EVT_NOTIFY_ADD,
31 CLOCK_EVT_NOTIFY_BROADCAST_ON,
32 CLOCK_EVT_NOTIFY_BROADCAST_OFF,
33 CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
34 CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
35 CLOCK_EVT_NOTIFY_SUSPEND,
36 CLOCK_EVT_NOTIFY_RESUME,
37 CLOCK_EVT_NOTIFY_CPU_DEAD,
38};
39
40/*
41 * Clock event features
42 */
43#define CLOCK_EVT_FEAT_PERIODIC 0x000001
44#define CLOCK_EVT_FEAT_ONESHOT 0x000002
45/*
46 * x86(64) specific misfeatures:
47 *
48 * - Clockevent source stops in C3 State and needs broadcast support.
49 * - Local APIC timer is used as a dummy device.
50 */
51#define CLOCK_EVT_FEAT_C3STOP 0x000004
52#define CLOCK_EVT_FEAT_DUMMY 0x000008
53
54/**
55 * struct clock_event_device - clock event device descriptor
56 * @name: ptr to clock event name
57 * @hints: usage hints
58 * @max_delta_ns: maximum delta value in ns
59 * @min_delta_ns: minimum delta value in ns
60 * @mult: nanosecond to cycles multiplier
61 * @shift: nanoseconds to cycles divisor (power of two)
62 * @rating: variable to rate clock event devices
63 * @irq: irq number (only for non cpu local devices)
64 * @cpumask: cpumask to indicate for which cpus this device works
65 * @set_next_event: set next event
66 * @set_mode: set mode function
67 * @evthandler: Assigned by the framework to be called by the low
68 * level handler of the event source
69 * @broadcast: function to broadcast events
70 * @list: list head for the management code
71 * @mode: operating mode assigned by the management code
72 * @next_event: local storage for the next event in oneshot mode
73 */
74struct clock_event_device {
75 const char *name;
76 unsigned int features;
77 unsigned long max_delta_ns;
78 unsigned long min_delta_ns;
79 unsigned long mult;
80 int shift;
81 int rating;
82 int irq;
83 cpumask_t cpumask;
84 int (*set_next_event)(unsigned long evt,
85 struct clock_event_device *);
86 void (*set_mode)(enum clock_event_mode mode,
87 struct clock_event_device *);
88 void (*event_handler)(struct clock_event_device *);
89 void (*broadcast)(cpumask_t mask);
90 struct list_head list;
91 enum clock_event_mode mode;
92 ktime_t next_event;
93};
94
95/*
96 * Calculate a multiplication factor for scaled math, which is used to convert
97 * nanoseconds based values to clock ticks:
98 *
99 * clock_ticks = (nanoseconds * factor) >> shift.
100 *
101 * div_sc is the rearranged equation to calculate a factor from a given clock
102 * ticks / nanoseconds ratio:
103 *
104 * factor = (clock_ticks << shift) / nanoseconds
105 */
106static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec,
107 int shift)
108{
109 uint64_t tmp = ((uint64_t)ticks) << shift;
110
111 do_div(tmp, nsec);
112 return (unsigned long) tmp;
113}
114
115/* Clock event layer functions */
116extern unsigned long clockevent_delta2ns(unsigned long latch,
117 struct clock_event_device *evt);
118extern void clockevents_register_device(struct clock_event_device *dev);
119
120extern void clockevents_exchange_device(struct clock_event_device *old,
121 struct clock_event_device *new);
122extern
123struct clock_event_device *clockevents_request_device(unsigned int features,
124 cpumask_t cpumask);
125extern void clockevents_release_device(struct clock_event_device *dev);
126extern void clockevents_set_mode(struct clock_event_device *dev,
127 enum clock_event_mode mode);
128extern int clockevents_register_notifier(struct notifier_block *nb);
129extern void clockevents_unregister_notifier(struct notifier_block *nb);
130extern int clockevents_program_event(struct clock_event_device *dev,
131 ktime_t expires, ktime_t now);
132
133extern void clockevents_notify(unsigned long reason, void *arg);
134
135#else
136
137static inline void clockevents_resume_events(void) { }
138#define clockevents_notify(reason, arg) do { } while (0)
139
140#endif
141
142#endif
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 9041405d0b71..a759636fd09f 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -144,6 +144,8 @@ struct hrtimer_cpu_base {
144 * is expired in the next softirq when the clock was advanced. 144 * is expired in the next softirq when the clock was advanced.
145 */ 145 */
146#define clock_was_set() do { } while (0) 146#define clock_was_set() do { } while (0)
147extern ktime_t ktime_get(void);
148extern ktime_t ktime_get_real(void);
147 149
148/* Exported timer functions: */ 150/* Exported timer functions: */
149 151
@@ -196,9 +198,6 @@ extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
196/* Soft interrupt function to run the hrtimer queues: */ 198/* Soft interrupt function to run the hrtimer queues: */
197extern void hrtimer_run_queues(void); 199extern void hrtimer_run_queues(void);
198 200
199/* Resume notification */
200void hrtimer_notify_resume(void);
201
202/* Bootup initialization: */ 201/* Bootup initialization: */
203extern void __init hrtimers_init(void); 202extern void __init hrtimers_init(void);
204 203
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index eca0f5593a75..a2310d1bebe1 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -46,7 +46,7 @@
46 * 46 *
47 * returns the time in ktime_t format 47 * returns the time in ktime_t format
48 */ 48 */
49static ktime_t ktime_get(void) 49ktime_t ktime_get(void)
50{ 50{
51 struct timespec now; 51 struct timespec now;
52 52
@@ -60,7 +60,7 @@ static ktime_t ktime_get(void)
60 * 60 *
61 * returns the time in ktime_t format 61 * returns the time in ktime_t format
62 */ 62 */
63static ktime_t ktime_get_real(void) 63ktime_t ktime_get_real(void)
64{ 64{
65 struct timespec now; 65 struct timespec now;
66 66
@@ -311,14 +311,6 @@ static unsigned long ktime_divns(const ktime_t kt, s64 div)
311#endif /* BITS_PER_LONG >= 64 */ 311#endif /* BITS_PER_LONG >= 64 */
312 312
313/* 313/*
314 * Timekeeping resumed notification
315 */
316void hrtimer_notify_resume(void)
317{
318 clock_was_set();
319}
320
321/*
322 * Counterpart to lock_timer_base above: 314 * Counterpart to lock_timer_base above:
323 */ 315 */
324static inline 316static inline
@@ -889,6 +881,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
889 881
890#ifdef CONFIG_HOTPLUG_CPU 882#ifdef CONFIG_HOTPLUG_CPU
891 case CPU_DEAD: 883 case CPU_DEAD:
884 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
892 migrate_hrtimers(cpu); 885 migrate_hrtimers(cpu);
893 break; 886 break;
894#endif 887#endif
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 61a3907d16fb..2bf1805914d0 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1 +1,3 @@
1obj-y += ntp.o clocksource.o jiffies.o 1obj-y += ntp.o clocksource.o jiffies.o
2
3obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
new file mode 100644
index 000000000000..67932ea78c17
--- /dev/null
+++ b/kernel/time/clockevents.c
@@ -0,0 +1,345 @@
1/*
2 * linux/kernel/time/clockevents.c
3 *
4 * This file contains functions which manage clock event devices.
5 *
6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
9 *
10 * This code is licenced under the GPL version 2. For details see
11 * kernel-base/COPYING.
12 */
13
14#include <linux/clockchips.h>
15#include <linux/hrtimer.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/notifier.h>
19#include <linux/smp.h>
20#include <linux/sysdev.h>
21
22/* The registered clock event devices */
23static LIST_HEAD(clockevent_devices);
24static LIST_HEAD(clockevents_released);
25
26/* Notification for clock events */
27static RAW_NOTIFIER_HEAD(clockevents_chain);
28
29/* Protection for the above */
30static DEFINE_SPINLOCK(clockevents_lock);
31
32/**
33 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
34 * @latch: value to convert
35 * @evt: pointer to clock event device descriptor
36 *
37 * Math helper, returns latch value converted to nanoseconds (bound checked)
38 */
39unsigned long clockevent_delta2ns(unsigned long latch,
40 struct clock_event_device *evt)
41{
42 u64 clc = ((u64) latch << evt->shift);
43
44 do_div(clc, evt->mult);
45 if (clc < 1000)
46 clc = 1000;
47 if (clc > LONG_MAX)
48 clc = LONG_MAX;
49
50 return (unsigned long) clc;
51}
52
53/**
54 * clockevents_set_mode - set the operating mode of a clock event device
55 * @dev: device to modify
56 * @mode: new mode
57 *
58 * Must be called with interrupts disabled !
59 */
60void clockevents_set_mode(struct clock_event_device *dev,
61 enum clock_event_mode mode)
62{
63 if (dev->mode != mode) {
64 dev->set_mode(mode, dev);
65 dev->mode = mode;
66 }
67}
68
69/**
70 * clockevents_program_event - Reprogram the clock event device.
71 * @expires: absolute expiry time (monotonic clock)
72 *
73 * Returns 0 on success, -ETIME when the event is in the past.
74 */
75int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
76 ktime_t now)
77{
78 unsigned long long clc;
79 int64_t delta;
80
81 delta = ktime_to_ns(ktime_sub(expires, now));
82
83 if (delta <= 0)
84 return -ETIME;
85
86 dev->next_event = expires;
87
88 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
89 return 0;
90
91 if (delta > dev->max_delta_ns)
92 delta = dev->max_delta_ns;
93 if (delta < dev->min_delta_ns)
94 delta = dev->min_delta_ns;
95
96 clc = delta * dev->mult;
97 clc >>= dev->shift;
98
99 return dev->set_next_event((unsigned long) clc, dev);
100}
101
102/**
103 * clockevents_register_notifier - register a clock events change listener
104 */
105int clockevents_register_notifier(struct notifier_block *nb)
106{
107 int ret;
108
109 spin_lock(&clockevents_lock);
110 ret = raw_notifier_chain_register(&clockevents_chain, nb);
111 spin_unlock(&clockevents_lock);
112
113 return ret;
114}
115
116/**
117 * clockevents_unregister_notifier - unregister a clock events change listener
118 */
119void clockevents_unregister_notifier(struct notifier_block *nb)
120{
121 spin_lock(&clockevents_lock);
122 raw_notifier_chain_unregister(&clockevents_chain, nb);
123 spin_unlock(&clockevents_lock);
124}
125
126/*
127 * Notify about a clock event change. Called with clockevents_lock
128 * held.
129 */
130static void clockevents_do_notify(unsigned long reason, void *dev)
131{
132 raw_notifier_call_chain(&clockevents_chain, reason, dev);
133}
134
135/*
136 * Called after a notify add to make devices availble which were
137 * released from the notifier call.
138 */
139static void clockevents_notify_released(void)
140{
141 struct clock_event_device *dev;
142
143 while (!list_empty(&clockevents_released)) {
144 dev = list_entry(clockevents_released.next,
145 struct clock_event_device, list);
146 list_del(&dev->list);
147 list_add(&dev->list, &clockevent_devices);
148 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
149 }
150}
151
152/**
153 * clockevents_register_device - register a clock event device
154 * @dev: device to register
155 */
156void clockevents_register_device(struct clock_event_device *dev)
157{
158 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
159
160 spin_lock(&clockevents_lock);
161
162 list_add(&dev->list, &clockevent_devices);
163 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
164 clockevents_notify_released();
165
166 spin_unlock(&clockevents_lock);
167}
168
169/*
170 * Noop handler when we shut down an event device
171 */
172static void clockevents_handle_noop(struct clock_event_device *dev)
173{
174}
175
176/**
177 * clockevents_exchange_device - release and request clock devices
178 * @old: device to release (can be NULL)
179 * @new: device to request (can be NULL)
180 *
181 * Called from the notifier chain. clockevents_lock is held already
182 */
183void clockevents_exchange_device(struct clock_event_device *old,
184 struct clock_event_device *new)
185{
186 unsigned long flags;
187
188 local_irq_save(flags);
189 /*
190 * Caller releases a clock event device. We queue it into the
191 * released list and do a notify add later.
192 */
193 if (old) {
194 old->event_handler = clockevents_handle_noop;
195 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
196 list_del(&old->list);
197 list_add(&old->list, &clockevents_released);
198 }
199
200 if (new) {
201 BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
202 clockevents_set_mode(new, CLOCK_EVT_MODE_SHUTDOWN);
203 }
204 local_irq_restore(flags);
205}
206
207/**
208 * clockevents_request_device
209 */
210struct clock_event_device *clockevents_request_device(unsigned int features,
211 cpumask_t cpumask)
212{
213 struct clock_event_device *cur, *dev = NULL;
214 struct list_head *tmp;
215
216 spin_lock(&clockevents_lock);
217
218 list_for_each(tmp, &clockevent_devices) {
219 cur = list_entry(tmp, struct clock_event_device, list);
220
221 if ((cur->features & features) == features &&
222 cpus_equal(cpumask, cur->cpumask)) {
223 if (!dev || dev->rating < cur->rating)
224 dev = cur;
225 }
226 }
227
228 clockevents_exchange_device(NULL, dev);
229
230 spin_unlock(&clockevents_lock);
231
232 return dev;
233}
234
235/**
236 * clockevents_release_device
237 */
238void clockevents_release_device(struct clock_event_device *dev)
239{
240 spin_lock(&clockevents_lock);
241
242 clockevents_exchange_device(dev, NULL);
243 clockevents_notify_released();
244
245 spin_unlock(&clockevents_lock);
246}
247
248/**
249 * clockevents_notify - notification about relevant events
250 */
251void clockevents_notify(unsigned long reason, void *arg)
252{
253 spin_lock(&clockevents_lock);
254 clockevents_do_notify(reason, arg);
255
256 switch (reason) {
257 case CLOCK_EVT_NOTIFY_CPU_DEAD:
258 /*
259 * Unregister the clock event devices which were
260 * released from the users in the notify chain.
261 */
262 while (!list_empty(&clockevents_released)) {
263 struct clock_event_device *dev;
264
265 dev = list_entry(clockevents_released.next,
266 struct clock_event_device, list);
267 list_del(&dev->list);
268 }
269 break;
270 default:
271 break;
272 }
273 spin_unlock(&clockevents_lock);
274}
275EXPORT_SYMBOL_GPL(clockevents_notify);
276
277#ifdef CONFIG_SYSFS
278
279/**
280 * clockevents_show_registered - sysfs interface for listing clockevents
281 * @dev: unused
282 * @buf: char buffer to be filled with clock events list
283 *
284 * Provides sysfs interface for listing registered clock event devices
285 */
286static ssize_t clockevents_show_registered(struct sys_device *dev, char *buf)
287{
288 struct list_head *tmp;
289 char *p = buf;
290 int cpu;
291
292 spin_lock(&clockevents_lock);
293
294 list_for_each(tmp, &clockevent_devices) {
295 struct clock_event_device *ce;
296
297 ce = list_entry(tmp, struct clock_event_device, list);
298 p += sprintf(p, "%-20s F:%04x M:%d", ce->name,
299 ce->features, ce->mode);
300 p += sprintf(p, " C:");
301 if (!cpus_equal(ce->cpumask, cpu_possible_map)) {
302 for_each_cpu_mask(cpu, ce->cpumask)
303 p += sprintf(p, " %d", cpu);
304 } else {
305 /*
306 * FIXME: Add the cpu which is handling this sucker
307 */
308 }
309 p += sprintf(p, "\n");
310 }
311
312 spin_unlock(&clockevents_lock);
313
314 return p - buf;
315}
316
317/*
318 * Sysfs setup bits:
319 */
320static SYSDEV_ATTR(registered, 0600,
321 clockevents_show_registered, NULL);
322
323static struct sysdev_class clockevents_sysclass = {
324 set_kset_name("clockevents"),
325};
326
327static struct sys_device clockevents_sys_device = {
328 .id = 0,
329 .cls = &clockevents_sysclass,
330};
331
332static int __init clockevents_sysfs_init(void)
333{
334 int error = sysdev_class_register(&clockevents_sysclass);
335
336 if (!error)
337 error = sysdev_register(&clockevents_sys_device);
338 if (!error)
339 error = sysdev_create_file(
340 &clockevents_sys_device,
341 &attr_registered);
342 return error;
343}
344device_initcall(clockevents_sysfs_init);
345#endif
diff --git a/kernel/timer.c b/kernel/timer.c
index 6d843e100e75..7d522bdf8265 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -34,6 +34,7 @@
34#include <linux/cpu.h> 34#include <linux/cpu.h>
35#include <linux/syscalls.h> 35#include <linux/syscalls.h>
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/clockchips.h>
37 38
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39#include <asm/unistd.h> 40#include <asm/unistd.h>
@@ -970,7 +971,8 @@ static int timekeeping_resume(struct sys_device *dev)
970 write_sequnlock_irqrestore(&xtime_lock, flags); 971 write_sequnlock_irqrestore(&xtime_lock, flags);
971 972
972 touch_softlockup_watchdog(); 973 touch_softlockup_watchdog();
973 hrtimer_notify_resume(); 974 /* Resume hrtimers */
975 clock_was_set();
974 976
975 return 0; 977 return 0;
976} 978}