summaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2015-05-21 04:03:46 -0400
committerThomas Gleixner <tglx@linutronix.de>2015-06-02 08:40:47 -0400
commit472c4a9437d3c6a0b1e59df7c5aa14075946aa70 (patch)
tree6398aeeb0a87cd8c96fb8ba2247efa139f412543 /kernel/time
parent3434d23b694e5cb6e44e966914563406c31c4053 (diff)
clockevents: Use helpers to check the state of a clockevent device
Use accessor functions to check the state of clockevent devices in core code. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Cc: linaro-kernel@lists.linaro.org Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/fa2b9869fd17f210eaa156ec2b594efd0230b6c7.1432192527.git.viresh.kumar@linaro.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/clockevents.c24
-rw-r--r--kernel/time/tick-broadcast.c6
-rw-r--r--kernel/time/tick-common.c2
-rw-r--r--kernel/time/tick-oneshot.c2
4 files changed, 17 insertions, 17 deletions
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 2a5c369e50ab..e568ec8c320b 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -136,7 +136,7 @@ static int __clockevents_set_state(struct clock_event_device *dev,
136 136
137 case CLOCK_EVT_STATE_ONESHOT_STOPPED: 137 case CLOCK_EVT_STATE_ONESHOT_STOPPED:
138 /* Core internal bug */ 138 /* Core internal bug */
139 if (WARN_ONCE(dev->state != CLOCK_EVT_STATE_ONESHOT, 139 if (WARN_ONCE(!clockevent_state_oneshot(dev),
140 "Current state: %d\n", dev->state)) 140 "Current state: %d\n", dev->state))
141 return -EINVAL; 141 return -EINVAL;
142 142
@@ -170,7 +170,7 @@ void clockevents_set_state(struct clock_event_device *dev,
170 * A nsec2cyc multiplicator of 0 is invalid and we'd crash 170 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
171 * on it, so fix it up and emit a warning: 171 * on it, so fix it up and emit a warning:
172 */ 172 */
173 if (state == CLOCK_EVT_STATE_ONESHOT) { 173 if (clockevent_state_oneshot(dev)) {
174 if (unlikely(!dev->mult)) { 174 if (unlikely(!dev->mult)) {
175 dev->mult = 1; 175 dev->mult = 1;
176 WARN_ON(1); 176 WARN_ON(1);
@@ -259,7 +259,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev)
259 delta = dev->min_delta_ns; 259 delta = dev->min_delta_ns;
260 dev->next_event = ktime_add_ns(ktime_get(), delta); 260 dev->next_event = ktime_add_ns(ktime_get(), delta);
261 261
262 if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) 262 if (clockevent_state_shutdown(dev))
263 return 0; 263 return 0;
264 264
265 dev->retries++; 265 dev->retries++;
@@ -296,7 +296,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev)
296 delta = dev->min_delta_ns; 296 delta = dev->min_delta_ns;
297 dev->next_event = ktime_add_ns(ktime_get(), delta); 297 dev->next_event = ktime_add_ns(ktime_get(), delta);
298 298
299 if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) 299 if (clockevent_state_shutdown(dev))
300 return 0; 300 return 0;
301 301
302 dev->retries++; 302 dev->retries++;
@@ -328,11 +328,11 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
328 328
329 dev->next_event = expires; 329 dev->next_event = expires;
330 330
331 if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) 331 if (clockevent_state_shutdown(dev))
332 return 0; 332 return 0;
333 333
334 /* We must be in ONESHOT state here */ 334 /* We must be in ONESHOT state here */
335 WARN_ONCE(dev->state != CLOCK_EVT_STATE_ONESHOT, "Current state: %d\n", 335 WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n",
336 dev->state); 336 dev->state);
337 337
338 /* Shortcut for clockevent devices that can deal with ktime. */ 338 /* Shortcut for clockevent devices that can deal with ktime. */
@@ -377,7 +377,7 @@ static int clockevents_replace(struct clock_event_device *ced)
377 struct clock_event_device *dev, *newdev = NULL; 377 struct clock_event_device *dev, *newdev = NULL;
378 378
379 list_for_each_entry(dev, &clockevent_devices, list) { 379 list_for_each_entry(dev, &clockevent_devices, list) {
380 if (dev == ced || dev->state != CLOCK_EVT_STATE_DETACHED) 380 if (dev == ced || !clockevent_state_detached(dev))
381 continue; 381 continue;
382 382
383 if (!tick_check_replacement(newdev, dev)) 383 if (!tick_check_replacement(newdev, dev))
@@ -403,7 +403,7 @@ static int clockevents_replace(struct clock_event_device *ced)
403static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) 403static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
404{ 404{
405 /* Fast track. Device is unused */ 405 /* Fast track. Device is unused */
406 if (ced->state == CLOCK_EVT_STATE_DETACHED) { 406 if (clockevent_state_detached(ced)) {
407 list_del_init(&ced->list); 407 list_del_init(&ced->list);
408 return 0; 408 return 0;
409 } 409 }
@@ -561,10 +561,10 @@ int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
561{ 561{
562 clockevents_config(dev, freq); 562 clockevents_config(dev, freq);
563 563
564 if (dev->state == CLOCK_EVT_STATE_ONESHOT) 564 if (clockevent_state_oneshot(dev))
565 return clockevents_program_event(dev, dev->next_event, false); 565 return clockevents_program_event(dev, dev->next_event, false);
566 566
567 if (dev->state == CLOCK_EVT_STATE_PERIODIC) 567 if (clockevent_state_periodic(dev))
568 return __clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC); 568 return __clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
569 569
570 return 0; 570 return 0;
@@ -625,7 +625,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
625 } 625 }
626 626
627 if (new) { 627 if (new) {
628 BUG_ON(new->state != CLOCK_EVT_STATE_DETACHED); 628 BUG_ON(!clockevent_state_detached(new));
629 clockevents_shutdown(new); 629 clockevents_shutdown(new);
630 } 630 }
631} 631}
@@ -681,7 +681,7 @@ void tick_cleanup_dead_cpu(int cpu)
681 if (cpumask_test_cpu(cpu, dev->cpumask) && 681 if (cpumask_test_cpu(cpu, dev->cpumask) &&
682 cpumask_weight(dev->cpumask) == 1 && 682 cpumask_weight(dev->cpumask) == 1 &&
683 !tick_is_broadcast_device(dev)) { 683 !tick_is_broadcast_device(dev)) {
684 BUG_ON(dev->state != CLOCK_EVT_STATE_DETACHED); 684 BUG_ON(!clockevent_state_detached(dev));
685 list_del(&dev->list); 685 list_del(&dev->list);
686 } 686 }
687 } 687 }
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 12fcc55d607a..132f819fdcdf 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -303,7 +303,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
303 raw_spin_lock(&tick_broadcast_lock); 303 raw_spin_lock(&tick_broadcast_lock);
304 bc_local = tick_do_periodic_broadcast(); 304 bc_local = tick_do_periodic_broadcast();
305 305
306 if (dev->state == CLOCK_EVT_STATE_ONESHOT) { 306 if (clockevent_state_oneshot(dev)) {
307 ktime_t next = ktime_add(dev->next_event, tick_period); 307 ktime_t next = ktime_add(dev->next_event, tick_period);
308 308
309 clockevents_program_event(dev, next, true); 309 clockevents_program_event(dev, next, true);
@@ -528,7 +528,7 @@ static void tick_broadcast_set_affinity(struct clock_event_device *bc,
528static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu, 528static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
529 ktime_t expires) 529 ktime_t expires)
530{ 530{
531 if (bc->state != CLOCK_EVT_STATE_ONESHOT) 531 if (!clockevent_state_oneshot(bc))
532 clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT); 532 clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
533 533
534 clockevents_program_event(bc, expires, 1); 534 clockevents_program_event(bc, expires, 1);
@@ -831,7 +831,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
831 831
832 /* Set it up only once ! */ 832 /* Set it up only once ! */
833 if (bc->event_handler != tick_handle_oneshot_broadcast) { 833 if (bc->event_handler != tick_handle_oneshot_broadcast) {
834 int was_periodic = bc->state == CLOCK_EVT_STATE_PERIODIC; 834 int was_periodic = clockevent_state_periodic(bc);
835 835
836 bc->event_handler = tick_handle_oneshot_broadcast; 836 bc->event_handler = tick_handle_oneshot_broadcast;
837 837
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index ea5f9eae8f74..cf881c62c3c5 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -112,7 +112,7 @@ void tick_handle_periodic(struct clock_event_device *dev)
112 return; 112 return;
113#endif 113#endif
114 114
115 if (dev->state != CLOCK_EVT_STATE_ONESHOT) 115 if (!clockevent_state_oneshot(dev))
116 return; 116 return;
117 for (;;) { 117 for (;;) {
118 /* 118 /*
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index f8de75715c2f..3f9715bec291 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -36,7 +36,7 @@ int tick_program_event(ktime_t expires, int force)
36 return 0; 36 return 0;
37 } 37 }
38 38
39 if (unlikely(dev->state == CLOCK_EVT_STATE_ONESHOT_STOPPED)) { 39 if (unlikely(clockevent_state_oneshot_stopped(dev))) {
40 /* 40 /*
41 * We need the clock event again, configure it in ONESHOT mode 41 * We need the clock event again, configure it in ONESHOT mode
42 * before using it. 42 * before using it.