aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/clockevents.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/clockevents.c')
-rw-r--r--kernel/time/clockevents.c229
1 files changed, 147 insertions, 82 deletions
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 55449909f114..25d942d1da27 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -94,25 +94,76 @@ u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
94} 94}
95EXPORT_SYMBOL_GPL(clockevent_delta2ns); 95EXPORT_SYMBOL_GPL(clockevent_delta2ns);
96 96
97static int __clockevents_set_state(struct clock_event_device *dev,
98 enum clock_event_state state)
99{
100 /* Transition with legacy set_mode() callback */
101 if (dev->set_mode) {
102 /* Legacy callback doesn't support new modes */
103 if (state > CLOCK_EVT_STATE_ONESHOT)
104 return -ENOSYS;
105 /*
106 * 'clock_event_state' and 'clock_event_mode' have 1-to-1
107 * mapping until *_ONESHOT, and so a simple cast will work.
108 */
109 dev->set_mode((enum clock_event_mode)state, dev);
110 dev->mode = (enum clock_event_mode)state;
111 return 0;
112 }
113
114 if (dev->features & CLOCK_EVT_FEAT_DUMMY)
115 return 0;
116
117 /* Transition with new state-specific callbacks */
118 switch (state) {
119 case CLOCK_EVT_STATE_DETACHED:
120 /*
121 * This is an internal state, which is guaranteed to go from
122 * SHUTDOWN to DETACHED. No driver interaction required.
123 */
124 return 0;
125
126 case CLOCK_EVT_STATE_SHUTDOWN:
127 return dev->set_state_shutdown(dev);
128
129 case CLOCK_EVT_STATE_PERIODIC:
130 /* Core internal bug */
131 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
132 return -ENOSYS;
133 return dev->set_state_periodic(dev);
134
135 case CLOCK_EVT_STATE_ONESHOT:
136 /* Core internal bug */
137 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
138 return -ENOSYS;
139 return dev->set_state_oneshot(dev);
140
141 default:
142 return -ENOSYS;
143 }
144}
145
97/** 146/**
98 * clockevents_set_mode - set the operating mode of a clock event device 147 * clockevents_set_state - set the operating state of a clock event device
99 * @dev: device to modify 148 * @dev: device to modify
100 * @mode: new mode 149 * @state: new state
101 * 150 *
102 * Must be called with interrupts disabled ! 151 * Must be called with interrupts disabled !
103 */ 152 */
104void clockevents_set_mode(struct clock_event_device *dev, 153void clockevents_set_state(struct clock_event_device *dev,
105 enum clock_event_mode mode) 154 enum clock_event_state state)
106{ 155{
107 if (dev->mode != mode) { 156 if (dev->state != state) {
108 dev->set_mode(mode, dev); 157 if (__clockevents_set_state(dev, state))
109 dev->mode = mode; 158 return;
159
160 dev->state = state;
110 161
111 /* 162 /*
112 * A nsec2cyc multiplicator of 0 is invalid and we'd crash 163 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
113 * on it, so fix it up and emit a warning: 164 * on it, so fix it up and emit a warning:
114 */ 165 */
115 if (mode == CLOCK_EVT_MODE_ONESHOT) { 166 if (state == CLOCK_EVT_STATE_ONESHOT) {
116 if (unlikely(!dev->mult)) { 167 if (unlikely(!dev->mult)) {
117 dev->mult = 1; 168 dev->mult = 1;
118 WARN_ON(1); 169 WARN_ON(1);
@@ -127,10 +178,28 @@ void clockevents_set_mode(struct clock_event_device *dev,
127 */ 178 */
128void clockevents_shutdown(struct clock_event_device *dev) 179void clockevents_shutdown(struct clock_event_device *dev)
129{ 180{
130 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 181 clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
131 dev->next_event.tv64 = KTIME_MAX; 182 dev->next_event.tv64 = KTIME_MAX;
132} 183}
133 184
185/**
186 * clockevents_tick_resume - Resume the tick device before using it again
187 * @dev: device to resume
188 */
189int clockevents_tick_resume(struct clock_event_device *dev)
190{
191 int ret = 0;
192
193 if (dev->set_mode) {
194 dev->set_mode(CLOCK_EVT_MODE_RESUME, dev);
195 dev->mode = CLOCK_EVT_MODE_RESUME;
196 } else if (dev->tick_resume) {
197 ret = dev->tick_resume(dev);
198 }
199
200 return ret;
201}
202
134#ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST 203#ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
135 204
136/* Limit min_delta to a jiffie */ 205/* Limit min_delta to a jiffie */
@@ -183,7 +252,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev)
183 delta = dev->min_delta_ns; 252 delta = dev->min_delta_ns;
184 dev->next_event = ktime_add_ns(ktime_get(), delta); 253 dev->next_event = ktime_add_ns(ktime_get(), delta);
185 254
186 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 255 if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
187 return 0; 256 return 0;
188 257
189 dev->retries++; 258 dev->retries++;
@@ -220,7 +289,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev)
220 delta = dev->min_delta_ns; 289 delta = dev->min_delta_ns;
221 dev->next_event = ktime_add_ns(ktime_get(), delta); 290 dev->next_event = ktime_add_ns(ktime_get(), delta);
222 291
223 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 292 if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
224 return 0; 293 return 0;
225 294
226 dev->retries++; 295 dev->retries++;
@@ -252,7 +321,7 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
252 321
253 dev->next_event = expires; 322 dev->next_event = expires;
254 323
255 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 324 if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
256 return 0; 325 return 0;
257 326
258 /* Shortcut for clockevent devices that can deal with ktime. */ 327 /* Shortcut for clockevent devices that can deal with ktime. */
@@ -297,7 +366,7 @@ static int clockevents_replace(struct clock_event_device *ced)
297 struct clock_event_device *dev, *newdev = NULL; 366 struct clock_event_device *dev, *newdev = NULL;
298 367
299 list_for_each_entry(dev, &clockevent_devices, list) { 368 list_for_each_entry(dev, &clockevent_devices, list) {
300 if (dev == ced || dev->mode != CLOCK_EVT_MODE_UNUSED) 369 if (dev == ced || dev->state != CLOCK_EVT_STATE_DETACHED)
301 continue; 370 continue;
302 371
303 if (!tick_check_replacement(newdev, dev)) 372 if (!tick_check_replacement(newdev, dev))
@@ -323,7 +392,7 @@ static int clockevents_replace(struct clock_event_device *ced)
323static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) 392static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
324{ 393{
325 /* Fast track. Device is unused */ 394 /* Fast track. Device is unused */
326 if (ced->mode == CLOCK_EVT_MODE_UNUSED) { 395 if (ced->state == CLOCK_EVT_STATE_DETACHED) {
327 list_del_init(&ced->list); 396 list_del_init(&ced->list);
328 return 0; 397 return 0;
329 } 398 }
@@ -373,6 +442,37 @@ int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
373} 442}
374EXPORT_SYMBOL_GPL(clockevents_unbind); 443EXPORT_SYMBOL_GPL(clockevents_unbind);
375 444
445/* Sanity check of state transition callbacks */
446static int clockevents_sanity_check(struct clock_event_device *dev)
447{
448 /* Legacy set_mode() callback */
449 if (dev->set_mode) {
450 /* We shouldn't be supporting new modes now */
451 WARN_ON(dev->set_state_periodic || dev->set_state_oneshot ||
452 dev->set_state_shutdown || dev->tick_resume);
453
454 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
455 return 0;
456 }
457
458 if (dev->features & CLOCK_EVT_FEAT_DUMMY)
459 return 0;
460
461 /* New state-specific callbacks */
462 if (!dev->set_state_shutdown)
463 return -EINVAL;
464
465 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
466 !dev->set_state_periodic)
467 return -EINVAL;
468
469 if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) &&
470 !dev->set_state_oneshot)
471 return -EINVAL;
472
473 return 0;
474}
475
376/** 476/**
377 * clockevents_register_device - register a clock event device 477 * clockevents_register_device - register a clock event device
378 * @dev: device to register 478 * @dev: device to register
@@ -381,7 +481,11 @@ void clockevents_register_device(struct clock_event_device *dev)
381{ 481{
382 unsigned long flags; 482 unsigned long flags;
383 483
384 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 484 BUG_ON(clockevents_sanity_check(dev));
485
486 /* Initialize state to DETACHED */
487 dev->state = CLOCK_EVT_STATE_DETACHED;
488
385 if (!dev->cpumask) { 489 if (!dev->cpumask) {
386 WARN_ON(num_possible_cpus() > 1); 490 WARN_ON(num_possible_cpus() > 1);
387 dev->cpumask = cpumask_of(smp_processor_id()); 491 dev->cpumask = cpumask_of(smp_processor_id());
@@ -445,11 +549,11 @@ int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
445{ 549{
446 clockevents_config(dev, freq); 550 clockevents_config(dev, freq);
447 551
448 if (dev->mode == CLOCK_EVT_MODE_ONESHOT) 552 if (dev->state == CLOCK_EVT_STATE_ONESHOT)
449 return clockevents_program_event(dev, dev->next_event, false); 553 return clockevents_program_event(dev, dev->next_event, false);
450 554
451 if (dev->mode == CLOCK_EVT_MODE_PERIODIC) 555 if (dev->state == CLOCK_EVT_STATE_PERIODIC)
452 dev->set_mode(CLOCK_EVT_MODE_PERIODIC, dev); 556 return __clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
453 557
454 return 0; 558 return 0;
455} 559}
@@ -491,30 +595,27 @@ void clockevents_handle_noop(struct clock_event_device *dev)
491 * @old: device to release (can be NULL) 595 * @old: device to release (can be NULL)
492 * @new: device to request (can be NULL) 596 * @new: device to request (can be NULL)
493 * 597 *
494 * Called from the notifier chain. clockevents_lock is held already 598 * Called from various tick functions with clockevents_lock held and
599 * interrupts disabled.
495 */ 600 */
496void clockevents_exchange_device(struct clock_event_device *old, 601void clockevents_exchange_device(struct clock_event_device *old,
497 struct clock_event_device *new) 602 struct clock_event_device *new)
498{ 603{
499 unsigned long flags;
500
501 local_irq_save(flags);
502 /* 604 /*
503 * Caller releases a clock event device. We queue it into the 605 * Caller releases a clock event device. We queue it into the
504 * released list and do a notify add later. 606 * released list and do a notify add later.
505 */ 607 */
506 if (old) { 608 if (old) {
507 module_put(old->owner); 609 module_put(old->owner);
508 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); 610 clockevents_set_state(old, CLOCK_EVT_STATE_DETACHED);
509 list_del(&old->list); 611 list_del(&old->list);
510 list_add(&old->list, &clockevents_released); 612 list_add(&old->list, &clockevents_released);
511 } 613 }
512 614
513 if (new) { 615 if (new) {
514 BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); 616 BUG_ON(new->state != CLOCK_EVT_STATE_DETACHED);
515 clockevents_shutdown(new); 617 clockevents_shutdown(new);
516 } 618 }
517 local_irq_restore(flags);
518} 619}
519 620
520/** 621/**
@@ -541,74 +642,40 @@ void clockevents_resume(void)
541 dev->resume(dev); 642 dev->resume(dev);
542} 643}
543 644
544#ifdef CONFIG_GENERIC_CLOCKEVENTS 645#ifdef CONFIG_HOTPLUG_CPU
545/** 646/**
546 * clockevents_notify - notification about relevant events 647 * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu
547 * Returns 0 on success, any other value on error
548 */ 648 */
549int clockevents_notify(unsigned long reason, void *arg) 649void tick_cleanup_dead_cpu(int cpu)
550{ 650{
551 struct clock_event_device *dev, *tmp; 651 struct clock_event_device *dev, *tmp;
552 unsigned long flags; 652 unsigned long flags;
553 int cpu, ret = 0;
554 653
555 raw_spin_lock_irqsave(&clockevents_lock, flags); 654 raw_spin_lock_irqsave(&clockevents_lock, flags);
556 655
557 switch (reason) { 656 tick_shutdown_broadcast_oneshot(cpu);
558 case CLOCK_EVT_NOTIFY_BROADCAST_ON: 657 tick_shutdown_broadcast(cpu);
559 case CLOCK_EVT_NOTIFY_BROADCAST_OFF: 658 tick_shutdown(cpu);
560 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 659 /*
561 tick_broadcast_on_off(reason, arg); 660 * Unregister the clock event devices which were
562 break; 661 * released from the users in the notify chain.
563 662 */
564 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: 663 list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
565 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: 664 list_del(&dev->list);
566 ret = tick_broadcast_oneshot_control(reason); 665 /*
567 break; 666 * Now check whether the CPU has left unused per cpu devices
568 667 */
569 case CLOCK_EVT_NOTIFY_CPU_DYING: 668 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
570 tick_handover_do_timer(arg); 669 if (cpumask_test_cpu(cpu, dev->cpumask) &&
571 break; 670 cpumask_weight(dev->cpumask) == 1 &&
572 671 !tick_is_broadcast_device(dev)) {
573 case CLOCK_EVT_NOTIFY_SUSPEND: 672 BUG_ON(dev->state != CLOCK_EVT_STATE_DETACHED);
574 tick_suspend();
575 tick_suspend_broadcast();
576 break;
577
578 case CLOCK_EVT_NOTIFY_RESUME:
579 tick_resume();
580 break;
581
582 case CLOCK_EVT_NOTIFY_CPU_DEAD:
583 tick_shutdown_broadcast_oneshot(arg);
584 tick_shutdown_broadcast(arg);
585 tick_shutdown(arg);
586 /*
587 * Unregister the clock event devices which were
588 * released from the users in the notify chain.
589 */
590 list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
591 list_del(&dev->list); 673 list_del(&dev->list);
592 /*
593 * Now check whether the CPU has left unused per cpu devices
594 */
595 cpu = *((int *)arg);
596 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
597 if (cpumask_test_cpu(cpu, dev->cpumask) &&
598 cpumask_weight(dev->cpumask) == 1 &&
599 !tick_is_broadcast_device(dev)) {
600 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
601 list_del(&dev->list);
602 }
603 } 674 }
604 break;
605 default:
606 break;
607 } 675 }
608 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 676 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
609 return ret;
610} 677}
611EXPORT_SYMBOL_GPL(clockevents_notify); 678#endif
612 679
613#ifdef CONFIG_SYSFS 680#ifdef CONFIG_SYSFS
614struct bus_type clockevents_subsys = { 681struct bus_type clockevents_subsys = {
@@ -727,5 +794,3 @@ static int __init clockevents_init_sysfs(void)
727} 794}
728device_initcall(clockevents_init_sysfs); 795device_initcall(clockevents_init_sysfs);
729#endif /* SYSFS */ 796#endif /* SYSFS */
730
731#endif /* GENERIC_CLOCK_EVENTS */