aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-02-16 04:28:02 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-16 11:13:59 -0500
commitf8381cba04ba8173fd5a2b8e5cd8b3290ee13a98 (patch)
treead8c9f91ce031a04c62ff75fcd3237fc666f1c2e /kernel/time
parent906568c9c668ff994f4078932ec6ae1e3950d1af (diff)
[PATCH] tick-management: broadcast functionality
With Ingo Molnar <mingo@elte.hu> Add broadcast functionality, so per cpu clock event devices can be registered as dummy devices or switched from/to broadcast on demand. The broadcast function distributes the events via the broadcast function of the clock event device. This is primarily designed to replace the switch apic timer to / from IPI in power states, where the apic stops. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: john stultz <johnstul@us.ibm.com> Cc: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/Makefile5
-rw-r--r--kernel/time/tick-broadcast.c270
-rw-r--r--kernel/time/tick-common.c53
-rw-r--r--kernel/time/tick-internal.h65
4 files changed, 382 insertions, 11 deletions
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 337daef72755..a941743c3ff8 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,4 +1,5 @@
1obj-y += ntp.o clocksource.o jiffies.o 1obj-y += ntp.o clocksource.o jiffies.o
2 2
3obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o 3obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o
4obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o 4obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
5obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += tick-broadcast.o
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
new file mode 100644
index 000000000000..0ee4968ff791
--- /dev/null
+++ b/kernel/time/tick-broadcast.c
@@ -0,0 +1,270 @@
1/*
2 * linux/kernel/time/tick-broadcast.c
3 *
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
6 *
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 *
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
13 */
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
17#include <linux/irq.h>
18#include <linux/percpu.h>
19#include <linux/profile.h>
20#include <linux/sched.h>
21#include <linux/tick.h>
22
23#include "tick-internal.h"
24
25/*
26 * Broadcast support for broken x86 hardware, where the local apic
27 * timer stops in C3 state.
28 */
29
30struct tick_device tick_broadcast_device;
31static cpumask_t tick_broadcast_mask;
32DEFINE_SPINLOCK(tick_broadcast_lock);
33
34/*
35 * Start the device in periodic mode
36 */
37static void tick_broadcast_start_periodic(struct clock_event_device *bc)
38{
39 if (bc && bc->mode == CLOCK_EVT_MODE_SHUTDOWN)
40 tick_setup_periodic(bc, 1);
41}
42
43/*
44 * Check, if the device can be utilized as broadcast device:
45 */
46int tick_check_broadcast_device(struct clock_event_device *dev)
47{
48 if (tick_broadcast_device.evtdev ||
49 (dev->features & CLOCK_EVT_FEAT_C3STOP))
50 return 0;
51
52 clockevents_exchange_device(NULL, dev);
53 tick_broadcast_device.evtdev = dev;
54 if (!cpus_empty(tick_broadcast_mask))
55 tick_broadcast_start_periodic(dev);
56 return 1;
57}
58
59/*
60 * Check, if the device is the broadcast device
61 */
62int tick_is_broadcast_device(struct clock_event_device *dev)
63{
64 return (dev && tick_broadcast_device.evtdev == dev);
65}
66
67/*
68 * Check, if the device is disfunctional and a place holder, which
69 * needs to be handled by the broadcast device.
70 */
71int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
72{
73 unsigned long flags;
74 int ret = 0;
75
76 spin_lock_irqsave(&tick_broadcast_lock, flags);
77
78 /*
79 * Devices might be registered with both periodic and oneshot
80 * mode disabled. This signals, that the device needs to be
81 * operated from the broadcast device and is a placeholder for
82 * the cpu local device.
83 */
84 if (!tick_device_is_functional(dev)) {
85 dev->event_handler = tick_handle_periodic;
86 cpu_set(cpu, tick_broadcast_mask);
87 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
88 ret = 1;
89 }
90
91 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
92 return ret;
93}
94
95/*
96 * Broadcast the event to the cpus, which are set in the mask
97 */
98int tick_do_broadcast(cpumask_t mask)
99{
100 int ret = 0, cpu = smp_processor_id();
101 struct tick_device *td;
102
103 /*
104 * Check, if the current cpu is in the mask
105 */
106 if (cpu_isset(cpu, mask)) {
107 cpu_clear(cpu, mask);
108 td = &per_cpu(tick_cpu_device, cpu);
109 td->evtdev->event_handler(td->evtdev);
110 ret = 1;
111 }
112
113 if (!cpus_empty(mask)) {
114 /*
115 * It might be necessary to actually check whether the devices
116 * have different broadcast functions. For now, just use the
117 * one of the first device. This works as long as we have this
118 * misfeature only on x86 (lapic)
119 */
120 cpu = first_cpu(mask);
121 td = &per_cpu(tick_cpu_device, cpu);
122 td->evtdev->broadcast(mask);
123 ret = 1;
124 }
125 return ret;
126}
127
128/*
129 * Periodic broadcast:
130 * - invoke the broadcast handlers
131 */
132static void tick_do_periodic_broadcast(void)
133{
134 cpumask_t mask;
135
136 spin_lock(&tick_broadcast_lock);
137
138 cpus_and(mask, cpu_online_map, tick_broadcast_mask);
139 tick_do_broadcast(mask);
140
141 spin_unlock(&tick_broadcast_lock);
142}
143
144/*
145 * Event handler for periodic broadcast ticks
146 */
147static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
148{
149 dev->next_event.tv64 = KTIME_MAX;
150
151 tick_do_periodic_broadcast();
152
153 /*
154 * The device is in periodic mode. No reprogramming necessary:
155 */
156 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
157 return;
158
159 /*
160 * Setup the next period for devices, which do not have
161 * periodic mode:
162 */
163 for (;;) {
164 ktime_t next = ktime_add(dev->next_event, tick_period);
165
166 if (!clockevents_program_event(dev, next, ktime_get()))
167 return;
168 tick_do_periodic_broadcast();
169 }
170}
171
172/*
173 * Powerstate information: The system enters/leaves a state, where
174 * affected devices might stop
175 */
176static void tick_do_broadcast_on_off(void *why)
177{
178 struct clock_event_device *bc, *dev;
179 struct tick_device *td;
180 unsigned long flags, *reason = why;
181 int cpu;
182
183 spin_lock_irqsave(&tick_broadcast_lock, flags);
184
185 cpu = smp_processor_id();
186 td = &per_cpu(tick_cpu_device, cpu);
187 dev = td->evtdev;
188 bc = tick_broadcast_device.evtdev;
189
190 /*
191 * Is the device in broadcast mode forever or is it not
192 * affected by the powerstate ?
193 */
194 if (!dev || !tick_device_is_functional(dev) ||
195 !(dev->features & CLOCK_EVT_FEAT_C3STOP))
196 goto out;
197
198 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_ON) {
199 if (!cpu_isset(cpu, tick_broadcast_mask)) {
200 cpu_set(cpu, tick_broadcast_mask);
201 if (td->mode == TICKDEV_MODE_PERIODIC)
202 clockevents_set_mode(dev,
203 CLOCK_EVT_MODE_SHUTDOWN);
204 }
205 } else {
206 if (cpu_isset(cpu, tick_broadcast_mask)) {
207 cpu_clear(cpu, tick_broadcast_mask);
208 if (td->mode == TICKDEV_MODE_PERIODIC)
209 tick_setup_periodic(dev, 0);
210 }
211 }
212
213 if (cpus_empty(tick_broadcast_mask))
214 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
215 else {
216 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
217 tick_broadcast_start_periodic(bc);
218 }
219out:
220 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
221}
222
223/*
224 * Powerstate information: The system enters/leaves a state, where
225 * affected devices might stop.
226 */
227void tick_broadcast_on_off(unsigned long reason, int *oncpu)
228{
229 int cpu = get_cpu();
230
231 if (cpu == *oncpu)
232 tick_do_broadcast_on_off(&reason);
233 else
234 smp_call_function_single(*oncpu, tick_do_broadcast_on_off,
235 &reason, 1, 1);
236 put_cpu();
237}
238
239/*
240 * Set the periodic handler depending on broadcast on/off
241 */
242void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
243{
244 if (!broadcast)
245 dev->event_handler = tick_handle_periodic;
246 else
247 dev->event_handler = tick_handle_periodic_broadcast;
248}
249
250/*
251 * Remove a CPU from broadcasting
252 */
253void tick_shutdown_broadcast(unsigned int *cpup)
254{
255 struct clock_event_device *bc;
256 unsigned long flags;
257 unsigned int cpu = *cpup;
258
259 spin_lock_irqsave(&tick_broadcast_lock, flags);
260
261 bc = tick_broadcast_device.evtdev;
262 cpu_clear(cpu, tick_broadcast_mask);
263
264 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
265 if (bc && cpus_empty(tick_broadcast_mask))
266 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
267 }
268
269 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
270}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 46e4381c26ea..48167a6ae55c 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -20,17 +20,19 @@
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/tick.h> 21#include <linux/tick.h>
22 22
23#include "tick-internal.h"
24
23/* 25/*
24 * Tick devices 26 * Tick devices
25 */ 27 */
26static DEFINE_PER_CPU(struct tick_device, tick_cpu_device); 28DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
27/* 29/*
28 * Tick next event: keeps track of the tick time 30 * Tick next event: keeps track of the tick time
29 */ 31 */
30static ktime_t tick_next_period; 32ktime_t tick_next_period;
31static ktime_t tick_period; 33ktime_t tick_period;
32static int tick_do_timer_cpu = -1; 34static int tick_do_timer_cpu = -1;
33static DEFINE_SPINLOCK(tick_device_lock); 35DEFINE_SPINLOCK(tick_device_lock);
34 36
35/* 37/*
36 * Periodic tick 38 * Periodic tick
@@ -78,9 +80,13 @@ void tick_handle_periodic(struct clock_event_device *dev)
78/* 80/*
79 * Setup the device for a periodic tick 81 * Setup the device for a periodic tick
80 */ 82 */
81void tick_setup_periodic(struct clock_event_device *dev) 83void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
82{ 84{
83 dev->event_handler = tick_handle_periodic; 85 tick_set_periodic_handler(dev, broadcast);
86
87 /* Broadcast setup ? */
88 if (!tick_device_is_functional(dev))
89 return;
84 90
85 if (dev->features & CLOCK_EVT_FEAT_PERIODIC) { 91 if (dev->features & CLOCK_EVT_FEAT_PERIODIC) {
86 clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); 92 clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
@@ -145,6 +151,15 @@ static void tick_setup_device(struct tick_device *td,
145 if (!cpus_equal(newdev->cpumask, cpumask)) 151 if (!cpus_equal(newdev->cpumask, cpumask))
146 irq_set_affinity(newdev->irq, cpumask); 152 irq_set_affinity(newdev->irq, cpumask);
147 153
154 /*
155 * When global broadcasting is active, check if the current
156 * device is registered as a placeholder for broadcast mode.
157 * This allows us to handle this x86 misfeature in a generic
158 * way.
159 */
160 if (tick_device_uses_broadcast(newdev, cpu))
161 return;
162
148 if (td->mode == TICKDEV_MODE_PERIODIC) 163 if (td->mode == TICKDEV_MODE_PERIODIC)
149 tick_setup_periodic(newdev, 0); 164 tick_setup_periodic(newdev, 0);
150} 165}
@@ -197,19 +212,33 @@ static int tick_check_new_device(struct clock_event_device *newdev)
197 * Check the rating 212 * Check the rating
198 */ 213 */
199 if (curdev->rating >= newdev->rating) 214 if (curdev->rating >= newdev->rating)
200 goto out; 215 goto out_bc;
201 } 216 }
202 217
203 /* 218 /*
204 * Replace the eventually existing device by the new 219 * Replace the eventually existing device by the new
205 * device. 220 * device. If the current device is the broadcast device, do
221 * not give it back to the clockevents layer !
206 */ 222 */
223 if (tick_is_broadcast_device(curdev)) {
224 clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN);
225 curdev = NULL;
226 }
207 clockevents_exchange_device(curdev, newdev); 227 clockevents_exchange_device(curdev, newdev);
208 tick_setup_device(td, newdev, cpu, cpumask); 228 tick_setup_device(td, newdev, cpu, cpumask);
209 ret = NOTIFY_STOP;
210 229
230 spin_unlock_irqrestore(&tick_device_lock, flags);
231 return NOTIFY_STOP;
232
233out_bc:
234 /*
235 * Can the new device be used as a broadcast device ?
236 */
237 if (tick_check_broadcast_device(newdev))
238 ret = NOTIFY_STOP;
211out: 239out:
212 spin_unlock_irqrestore(&tick_device_lock, flags); 240 spin_unlock_irqrestore(&tick_device_lock, flags);
241
213 return ret; 242 return ret;
214} 243}
215 244
@@ -251,7 +280,13 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason,
251 case CLOCK_EVT_NOTIFY_ADD: 280 case CLOCK_EVT_NOTIFY_ADD:
252 return tick_check_new_device(dev); 281 return tick_check_new_device(dev);
253 282
283 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
284 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
285 tick_broadcast_on_off(reason, dev);
286 break;
287
254 case CLOCK_EVT_NOTIFY_CPU_DEAD: 288 case CLOCK_EVT_NOTIFY_CPU_DEAD:
289 tick_shutdown_broadcast(dev);
255 tick_shutdown(dev); 290 tick_shutdown(dev);
256 break; 291 break;
257 292
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
new file mode 100644
index 000000000000..9272f446b21c
--- /dev/null
+++ b/kernel/time/tick-internal.h
@@ -0,0 +1,65 @@
1/*
2 * tick internal variable and functions used by low/high res code
3 */
4DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
5extern spinlock_t tick_device_lock;
6extern ktime_t tick_next_period;
7extern ktime_t tick_period;
8
9extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
10extern void tick_handle_periodic(struct clock_event_device *dev);
11
12/*
13 * Broadcasting support
14 */
15#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
16extern int tick_do_broadcast(cpumask_t mask);
17extern struct tick_device tick_broadcast_device;
18extern spinlock_t tick_broadcast_lock;
19
20extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu);
21extern int tick_check_broadcast_device(struct clock_event_device *dev);
22extern int tick_is_broadcast_device(struct clock_event_device *dev);
23extern void tick_broadcast_on_off(unsigned long reason, int *oncpu);
24extern void tick_shutdown_broadcast(unsigned int *cpup);
25
26extern void
27tick_set_periodic_handler(struct clock_event_device *dev, int broadcast);
28
29#else /* !BROADCAST */
30
31static inline int tick_check_broadcast_device(struct clock_event_device *dev)
32{
33 return 0;
34}
35
36static inline int tick_is_broadcast_device(struct clock_event_device *dev)
37{
38 return 0;
39}
40static inline int tick_device_uses_broadcast(struct clock_event_device *dev,
41 int cpu)
42{
43 return 0;
44}
45static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { }
46static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { }
47static inline void tick_shutdown_broadcast(unsigned int *cpup) { }
48
49/*
50 * Set the periodic handler in non broadcast mode
51 */
52static inline void tick_set_periodic_handler(struct clock_event_device *dev,
53 int broadcast)
54{
55 dev->event_handler = tick_handle_periodic;
56}
57#endif /* !BROADCAST */
58
59/*
60 * Check, if the device is functional or a dummy for broadcast
61 */
62static inline int tick_device_is_functional(struct clock_event_device *dev)
63{
64 return !(dev->features & CLOCK_EVT_FEAT_DUMMY);
65}