aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/tick-common.c22
1 files changed, 5 insertions, 17 deletions
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index dbf4e18d5101..170a4bdfa99e 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -33,7 +33,6 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
33ktime_t tick_next_period; 33ktime_t tick_next_period;
34ktime_t tick_period; 34ktime_t tick_period;
35int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; 35int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
36static DEFINE_RAW_SPINLOCK(tick_device_lock);
37 36
38/* 37/*
39 * Debugging: see timer_list.c 38 * Debugging: see timer_list.c
@@ -206,16 +205,14 @@ static void tick_setup_device(struct tick_device *td,
206} 205}
207 206
208/* 207/*
209 * Check, if the new registered device should be used. 208 * Check, if the new registered device should be used. Called with
209 * clockevents_lock held and interrupts disabled.
210 */ 210 */
211void tick_check_new_device(struct clock_event_device *newdev) 211void tick_check_new_device(struct clock_event_device *newdev)
212{ 212{
213 struct clock_event_device *curdev; 213 struct clock_event_device *curdev;
214 struct tick_device *td; 214 struct tick_device *td;
215 int cpu; 215 int cpu;
216 unsigned long flags;
217
218 raw_spin_lock_irqsave(&tick_device_lock, flags);
219 216
220 cpu = smp_processor_id(); 217 cpu = smp_processor_id();
221 if (!cpumask_test_cpu(cpu, newdev->cpumask)) 218 if (!cpumask_test_cpu(cpu, newdev->cpumask))
@@ -273,8 +270,6 @@ void tick_check_new_device(struct clock_event_device *newdev)
273 tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); 270 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
274 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) 271 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
275 tick_oneshot_notify(); 272 tick_oneshot_notify();
276
277 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
278 return; 273 return;
279 274
280out_bc: 275out_bc:
@@ -282,7 +277,6 @@ out_bc:
282 * Can the new device be used as a broadcast device ? 277 * Can the new device be used as a broadcast device ?
283 */ 278 */
284 tick_install_broadcast_device(newdev); 279 tick_install_broadcast_device(newdev);
285 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
286} 280}
287 281
288/* 282/*
@@ -311,9 +305,7 @@ static void tick_shutdown(unsigned int *cpup)
311{ 305{
312 struct tick_device *td = &per_cpu(tick_cpu_device, *cpup); 306 struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
313 struct clock_event_device *dev = td->evtdev; 307 struct clock_event_device *dev = td->evtdev;
314 unsigned long flags;
315 308
316 raw_spin_lock_irqsave(&tick_device_lock, flags);
317 td->mode = TICKDEV_MODE_PERIODIC; 309 td->mode = TICKDEV_MODE_PERIODIC;
318 if (dev) { 310 if (dev) {
319 /* 311 /*
@@ -325,26 +317,20 @@ static void tick_shutdown(unsigned int *cpup)
325 dev->event_handler = clockevents_handle_noop; 317 dev->event_handler = clockevents_handle_noop;
326 td->evtdev = NULL; 318 td->evtdev = NULL;
327 } 319 }
328 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
329} 320}
330 321
331static void tick_suspend(void) 322static void tick_suspend(void)
332{ 323{
333 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 324 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
334 unsigned long flags;
335 325
336 raw_spin_lock_irqsave(&tick_device_lock, flags);
337 clockevents_shutdown(td->evtdev); 326 clockevents_shutdown(td->evtdev);
338 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
339} 327}
340 328
341static void tick_resume(void) 329static void tick_resume(void)
342{ 330{
343 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 331 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
344 unsigned long flags;
345 int broadcast = tick_resume_broadcast(); 332 int broadcast = tick_resume_broadcast();
346 333
347 raw_spin_lock_irqsave(&tick_device_lock, flags);
348 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); 334 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
349 335
350 if (!broadcast) { 336 if (!broadcast) {
@@ -353,9 +339,11 @@ static void tick_resume(void)
353 else 339 else
354 tick_resume_oneshot(); 340 tick_resume_oneshot();
355 } 341 }
356 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
357} 342}
358 343
344/*
345 * Called with clockevents_lock held and interrupts disabled
346 */
359void tick_notify(unsigned long reason, void *dev) 347void tick_notify(unsigned long reason, void *dev)
360{ 348{
361 switch (reason) { 349 switch (reason) {