aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-05-20 07:05:15 -0400
committerThomas Gleixner <tglx@linutronix.de>2011-05-23 07:59:54 -0400
commitab8177bc53e8ae3a3ba6d200ce2c2dae263f7ee5 (patch)
treec8b370496497b4f96d6a17da906bdd9314e9a090 /kernel/hrtimer.c
parentf24444b01bf6c51c300fd3ffc73423383d747882 (diff)
hrtimers: Avoid touching inactive timer bases
Instead of iterating over all possible timer bases avoid it by marking the active bases in the cpu base. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Peter Zijlstra <peterz@infradead.org>
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c29
1 files changed, 18 insertions, 11 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 26dd32f9f6b2..1b08f6d67f12 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -64,17 +64,20 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
64 .clock_base = 64 .clock_base =
65 { 65 {
66 { 66 {
67 .index = CLOCK_REALTIME, 67 .index = HRTIMER_BASE_REALTIME,
68 .clockid = CLOCK_REALTIME,
68 .get_time = &ktime_get_real, 69 .get_time = &ktime_get_real,
69 .resolution = KTIME_LOW_RES, 70 .resolution = KTIME_LOW_RES,
70 }, 71 },
71 { 72 {
72 .index = CLOCK_MONOTONIC, 73 .index = HRTIMER_BASE_MONOTONIC,
74 .clockid = CLOCK_MONOTONIC,
73 .get_time = &ktime_get, 75 .get_time = &ktime_get,
74 .resolution = KTIME_LOW_RES, 76 .resolution = KTIME_LOW_RES,
75 }, 77 },
76 { 78 {
77 .index = CLOCK_BOOTTIME, 79 .index = HRTIMER_BASE_BOOTTIME,
80 .clockid = CLOCK_BOOTTIME,
78 .get_time = &ktime_get_boottime, 81 .get_time = &ktime_get_boottime,
79 .resolution = KTIME_LOW_RES, 82 .resolution = KTIME_LOW_RES,
80 }, 83 },
@@ -196,7 +199,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
196 struct hrtimer_cpu_base *new_cpu_base; 199 struct hrtimer_cpu_base *new_cpu_base;
197 int this_cpu = smp_processor_id(); 200 int this_cpu = smp_processor_id();
198 int cpu = hrtimer_get_target(this_cpu, pinned); 201 int cpu = hrtimer_get_target(this_cpu, pinned);
199 int basenum = hrtimer_clockid_to_base(base->index); 202 int basenum = base->index;
200 203
201again: 204again:
202 new_cpu_base = &per_cpu(hrtimer_bases, cpu); 205 new_cpu_base = &per_cpu(hrtimer_bases, cpu);
@@ -857,6 +860,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
857 debug_activate(timer); 860 debug_activate(timer);
858 861
859 timerqueue_add(&base->active, &timer->node); 862 timerqueue_add(&base->active, &timer->node);
863 base->cpu_base->active_bases |= 1 << base->index;
860 864
861 /* 865 /*
862 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the 866 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
@@ -898,6 +902,8 @@ static void __remove_hrtimer(struct hrtimer *timer,
898#endif 902#endif
899 } 903 }
900 timerqueue_del(&base->active, &timer->node); 904 timerqueue_del(&base->active, &timer->node);
905 if (!timerqueue_getnext(&base->active))
906 base->cpu_base->active_bases &= ~(1 << base->index);
901out: 907out:
902 timer->state = newstate; 908 timer->state = newstate;
903} 909}
@@ -1235,7 +1241,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
1235void hrtimer_interrupt(struct clock_event_device *dev) 1241void hrtimer_interrupt(struct clock_event_device *dev)
1236{ 1242{
1237 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1243 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1238 struct hrtimer_clock_base *base;
1239 ktime_t expires_next, now, entry_time, delta; 1244 ktime_t expires_next, now, entry_time, delta;
1240 int i, retries = 0; 1245 int i, retries = 0;
1241 1246
@@ -1257,12 +1262,15 @@ retry:
1257 */ 1262 */
1258 cpu_base->expires_next.tv64 = KTIME_MAX; 1263 cpu_base->expires_next.tv64 = KTIME_MAX;
1259 1264
1260 base = cpu_base->clock_base;
1261
1262 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1265 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1263 ktime_t basenow; 1266 struct hrtimer_clock_base *base;
1264 struct timerqueue_node *node; 1267 struct timerqueue_node *node;
1268 ktime_t basenow;
1269
1270 if (!(cpu_base->active_bases & (1 << i)))
1271 continue;
1265 1272
1273 base = cpu_base->clock_base + i;
1266 basenow = ktime_add(now, base->offset); 1274 basenow = ktime_add(now, base->offset);
1267 1275
1268 while ((node = timerqueue_getnext(&base->active))) { 1276 while ((node = timerqueue_getnext(&base->active))) {
@@ -1295,7 +1303,6 @@ retry:
1295 1303
1296 __run_hrtimer(timer, &basenow); 1304 __run_hrtimer(timer, &basenow);
1297 } 1305 }
1298 base++;
1299 } 1306 }
1300 1307
1301 /* 1308 /*
@@ -1526,7 +1533,7 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1526 struct timespec __user *rmtp; 1533 struct timespec __user *rmtp;
1527 int ret = 0; 1534 int ret = 0;
1528 1535
1529 hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, 1536 hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
1530 HRTIMER_MODE_ABS); 1537 HRTIMER_MODE_ABS);
1531 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); 1538 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1532 1539
@@ -1578,7 +1585,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1578 1585
1579 restart = &current_thread_info()->restart_block; 1586 restart = &current_thread_info()->restart_block;
1580 restart->fn = hrtimer_nanosleep_restart; 1587 restart->fn = hrtimer_nanosleep_restart;
1581 restart->nanosleep.index = t.timer.base->index; 1588 restart->nanosleep.clockid = t.timer.base->clockid;
1582 restart->nanosleep.rmtp = rmtp; 1589 restart->nanosleep.rmtp = rmtp;
1583 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); 1590 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1584 1591