diff options
Diffstat (limited to 'kernel/time/tick-broadcast.c')
-rw-r--r-- | kernel/time/tick-broadcast.c | 191 |
1 files changed, 190 insertions, 1 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 0ee4968ff791..8314ecb32d33 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | struct tick_device tick_broadcast_device; | 30 | struct tick_device tick_broadcast_device; |
31 | static cpumask_t tick_broadcast_mask; | 31 | static cpumask_t tick_broadcast_mask; |
32 | DEFINE_SPINLOCK(tick_broadcast_lock); | 32 | static DEFINE_SPINLOCK(tick_broadcast_lock); |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * Start the device in periodic mode | 35 | * Start the device in periodic mode |
@@ -215,6 +215,8 @@ static void tick_do_broadcast_on_off(void *why) | |||
215 | else { | 215 | else { |
216 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | 216 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
217 | tick_broadcast_start_periodic(bc); | 217 | tick_broadcast_start_periodic(bc); |
218 | else | ||
219 | tick_broadcast_setup_oneshot(bc); | ||
218 | } | 220 | } |
219 | out: | 221 | out: |
220 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 222 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
@@ -268,3 +270,190 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
268 | 270 | ||
269 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 271 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
270 | } | 272 | } |
273 | |||
274 | #ifdef CONFIG_TICK_ONESHOT | ||
275 | |||
276 | static cpumask_t tick_broadcast_oneshot_mask; | ||
277 | |||
278 | static int tick_broadcast_set_event(ktime_t expires, int force) | ||
279 | { | ||
280 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | ||
281 | ktime_t now = ktime_get(); | ||
282 | int res; | ||
283 | |||
284 | for(;;) { | ||
285 | res = clockevents_program_event(bc, expires, now); | ||
286 | if (!res || !force) | ||
287 | return res; | ||
288 | now = ktime_get(); | ||
289 | expires = ktime_add(now, ktime_set(0, bc->min_delta_ns)); | ||
290 | } | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * Reprogram the broadcast device: | ||
295 | * | ||
296 | * Called with tick_broadcast_lock held and interrupts disabled. | ||
297 | */ | ||
298 | static int tick_broadcast_reprogram(void) | ||
299 | { | ||
300 | ktime_t expires = { .tv64 = KTIME_MAX }; | ||
301 | struct tick_device *td; | ||
302 | int cpu; | ||
303 | |||
304 | /* | ||
305 | * Find the event which expires next: | ||
306 | */ | ||
307 | for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; | ||
308 | cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { | ||
309 | td = &per_cpu(tick_cpu_device, cpu); | ||
310 | if (td->evtdev->next_event.tv64 < expires.tv64) | ||
311 | expires = td->evtdev->next_event; | ||
312 | } | ||
313 | |||
314 | if (expires.tv64 == KTIME_MAX) | ||
315 | return 0; | ||
316 | |||
317 | return tick_broadcast_set_event(expires, 0); | ||
318 | } | ||
319 | |||
320 | /* | ||
321 | * Handle oneshot mode broadcasting | ||
322 | */ | ||
323 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) | ||
324 | { | ||
325 | struct tick_device *td; | ||
326 | cpumask_t mask; | ||
327 | ktime_t now; | ||
328 | int cpu; | ||
329 | |||
330 | spin_lock(&tick_broadcast_lock); | ||
331 | again: | ||
332 | dev->next_event.tv64 = KTIME_MAX; | ||
333 | mask = CPU_MASK_NONE; | ||
334 | now = ktime_get(); | ||
335 | /* Find all expired events */ | ||
336 | for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; | ||
337 | cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { | ||
338 | td = &per_cpu(tick_cpu_device, cpu); | ||
339 | if (td->evtdev->next_event.tv64 <= now.tv64) | ||
340 | cpu_set(cpu, mask); | ||
341 | } | ||
342 | |||
343 | /* | ||
344 | * Wakeup the cpus which have an expired event. The broadcast | ||
345 | * device is reprogrammed in the return from idle code. | ||
346 | */ | ||
347 | if (!tick_do_broadcast(mask)) { | ||
348 | /* | ||
349 | * The global event did not expire any CPU local | ||
350 | * events. This happens in dyntick mode, as the | ||
351 | * maximum PIT delta is quite small. | ||
352 | */ | ||
353 | if (tick_broadcast_reprogram()) | ||
354 | goto again; | ||
355 | } | ||
356 | spin_unlock(&tick_broadcast_lock); | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * Powerstate information: The system enters/leaves a state, where | ||
361 | * affected devices might stop | ||
362 | */ | ||
363 | void tick_broadcast_oneshot_control(unsigned long reason) | ||
364 | { | ||
365 | struct clock_event_device *bc, *dev; | ||
366 | struct tick_device *td; | ||
367 | unsigned long flags; | ||
368 | int cpu; | ||
369 | |||
370 | spin_lock_irqsave(&tick_broadcast_lock, flags); | ||
371 | |||
372 | /* | ||
373 | * Periodic mode does not care about the enter/exit of power | ||
374 | * states | ||
375 | */ | ||
376 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | ||
377 | goto out; | ||
378 | |||
379 | bc = tick_broadcast_device.evtdev; | ||
380 | cpu = smp_processor_id(); | ||
381 | td = &per_cpu(tick_cpu_device, cpu); | ||
382 | dev = td->evtdev; | ||
383 | |||
384 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) | ||
385 | goto out; | ||
386 | |||
387 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { | ||
388 | if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) { | ||
389 | cpu_set(cpu, tick_broadcast_oneshot_mask); | ||
390 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | ||
391 | if (dev->next_event.tv64 < bc->next_event.tv64) | ||
392 | tick_broadcast_set_event(dev->next_event, 1); | ||
393 | } | ||
394 | } else { | ||
395 | if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { | ||
396 | cpu_clear(cpu, tick_broadcast_oneshot_mask); | ||
397 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); | ||
398 | if (dev->next_event.tv64 != KTIME_MAX) | ||
399 | tick_program_event(dev->next_event, 1); | ||
400 | } | ||
401 | } | ||
402 | |||
403 | out: | ||
404 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||
405 | } | ||
406 | |||
407 | /** | ||
408 | * tick_broadcast_setup_highres - setup the broadcast device for highres | ||
409 | */ | ||
410 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | ||
411 | { | ||
412 | if (bc->mode != CLOCK_EVT_MODE_ONESHOT) { | ||
413 | bc->event_handler = tick_handle_oneshot_broadcast; | ||
414 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | ||
415 | bc->next_event.tv64 = KTIME_MAX; | ||
416 | } | ||
417 | } | ||
418 | |||
419 | /* | ||
420 | * Select oneshot operating mode for the broadcast device | ||
421 | */ | ||
422 | void tick_broadcast_switch_to_oneshot(void) | ||
423 | { | ||
424 | struct clock_event_device *bc; | ||
425 | unsigned long flags; | ||
426 | |||
427 | spin_lock_irqsave(&tick_broadcast_lock, flags); | ||
428 | |||
429 | tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; | ||
430 | bc = tick_broadcast_device.evtdev; | ||
431 | if (bc) | ||
432 | tick_broadcast_setup_oneshot(bc); | ||
433 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||
434 | } | ||
435 | |||
436 | |||
437 | /* | ||
438 | * Remove a dead CPU from broadcasting | ||
439 | */ | ||
440 | void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | ||
441 | { | ||
442 | struct clock_event_device *bc; | ||
443 | unsigned long flags; | ||
444 | unsigned int cpu = *cpup; | ||
445 | |||
446 | spin_lock_irqsave(&tick_broadcast_lock, flags); | ||
447 | |||
448 | bc = tick_broadcast_device.evtdev; | ||
449 | cpu_clear(cpu, tick_broadcast_oneshot_mask); | ||
450 | |||
451 | if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) { | ||
452 | if (bc && cpus_empty(tick_broadcast_oneshot_mask)) | ||
453 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | ||
454 | } | ||
455 | |||
456 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||
457 | } | ||
458 | |||
459 | #endif | ||