diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-07-03 09:44:31 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-07-29 17:30:46 -0400 |
commit | 305d0a1ab668ee560de7fe5eed0b62cf419a50f9 (patch) | |
tree | 19a6356f70b81200035cd62f3ca166ab8bf439f2 /kernel/hrtimer.c | |
parent | f6314be4567839f2850c48dcc7bedad4a5f798dc (diff) |
hrtimer: fixup hrtimer callback changes for preempt-rt
In preempt-rt we can not call the callbacks which take sleeping locks
from the timer interrupt context.
Bring back the softirq split for now, until we fixed the signal
delivery problem for real.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r-- | kernel/hrtimer.c | 141 |
1 files changed, 126 insertions, 15 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 2e318d9d73e0..aecb304768ed 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -535,15 +535,24 @@ static int hrtimer_reprogram(struct hrtimer *timer, | |||
535 | 535 | ||
536 | WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); | 536 | WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); |
537 | 537 | ||
538 | #ifndef CONFIG_PREEMPT_RT | ||
538 | /* | 539 | /* |
539 | * When the callback is running, we do not reprogram the clock event | 540 | * When the callback is running, we do not reprogram the clock event |
540 | * device. The timer callback is either running on a different CPU or | 541 | * device. The timer callback is either running on a different CPU or |
541 | * the callback is executed in the hrtimer_interrupt context. The | 542 | * the callback is executed in the hrtimer_interrupt context. The |
542 | * reprogramming is handled either by the softirq, which called the | 543 | * reprogramming is handled at the end of the hrtimer_interrupt. |
543 | * callback or at the end of the hrtimer_interrupt. | ||
544 | */ | 544 | */ |
545 | if (hrtimer_callback_running(timer)) | 545 | if (hrtimer_callback_running(timer)) |
546 | return 0; | 546 | return 0; |
547 | #else | ||
548 | /* | ||
549 | * preempt-rt changes the rules here as long as we have not | ||
550 | * solved the callback problem. For softirq based timers we | ||
551 | * need to allow reprogramming. | ||
552 | */ | ||
553 | if (hrtimer_callback_running(timer) && timer->irqsafe) | ||
554 | return 0; | ||
555 | #endif | ||
547 | 556 | ||
548 | /* | 557 | /* |
549 | * CLOCK_REALTIME timer might be requested with an absolute | 558 | * CLOCK_REALTIME timer might be requested with an absolute |
@@ -642,6 +651,8 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer) | |||
642 | { | 651 | { |
643 | } | 652 | } |
644 | 653 | ||
654 | static void __run_hrtimer(struct hrtimer *timer); | ||
655 | static int hrtimer_rt_defer(struct hrtimer *timer); | ||
645 | 656 | ||
646 | /* | 657 | /* |
647 | * When High resolution timers are active, try to reprogram. Note, that in case | 658 | * When High resolution timers are active, try to reprogram. Note, that in case |
@@ -654,6 +665,17 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
654 | int wakeup) | 665 | int wakeup) |
655 | { | 666 | { |
656 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { | 667 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { |
668 | #ifdef CONFIG_PREEMPT_RT | ||
669 | /* | ||
670 | * Move softirq based timers away from the rbtree in | ||
671 | * case it expired already. Otherwise we would have a | ||
672 | * stale base->first entry until the softirq runs. | ||
673 | */ | ||
674 | if (!hrtimer_rt_defer(timer)) { | ||
675 | __run_hrtimer(timer); | ||
676 | return 1; | ||
677 | } | ||
678 | #endif | ||
657 | if (wakeup) { | 679 | if (wakeup) { |
658 | atomic_spin_unlock(&base->cpu_base->lock); | 680 | atomic_spin_unlock(&base->cpu_base->lock); |
659 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); | 681 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
@@ -878,6 +900,11 @@ static void __remove_hrtimer(struct hrtimer *timer, | |||
878 | unsigned long newstate, int reprogram) | 900 | unsigned long newstate, int reprogram) |
879 | { | 901 | { |
880 | if (timer->state & HRTIMER_STATE_ENQUEUED) { | 902 | if (timer->state & HRTIMER_STATE_ENQUEUED) { |
903 | |||
904 | if (unlikely(!list_empty(&timer->cb_entry))) { | ||
905 | list_del_init(&timer->cb_entry); | ||
906 | goto out; | ||
907 | } | ||
881 | /* | 908 | /* |
882 | * Remove the timer from the rbtree and replace the | 909 | * Remove the timer from the rbtree and replace the |
883 | * first entry pointer if necessary. | 910 | * first entry pointer if necessary. |
@@ -890,6 +917,7 @@ static void __remove_hrtimer(struct hrtimer *timer, | |||
890 | } | 917 | } |
891 | rb_erase(&timer->node, &base->active); | 918 | rb_erase(&timer->node, &base->active); |
892 | } | 919 | } |
920 | out: | ||
893 | timer->state = newstate; | 921 | timer->state = newstate; |
894 | } | 922 | } |
895 | 923 | ||
@@ -1203,6 +1231,77 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
1203 | timer->state &= ~HRTIMER_STATE_CALLBACK; | 1231 | timer->state &= ~HRTIMER_STATE_CALLBACK; |
1204 | } | 1232 | } |
1205 | 1233 | ||
1234 | #ifdef CONFIG_PREEMPT_RT | ||
1235 | |||
1236 | /* | ||
1237 | * The changes in mainline which removed the callback modes from | ||
1238 | * hrtimer are not yet working with -rt. The non wakeup_process() | ||
1239 | * based callbacks which involve sleeping locks need to be treated | ||
1240 | * seperately. | ||
1241 | */ | ||
1242 | static void hrtimer_rt_run_pending(void) | ||
1243 | { | ||
1244 | enum hrtimer_restart (*fn)(struct hrtimer *); | ||
1245 | struct hrtimer_cpu_base *cpu_base; | ||
1246 | struct hrtimer_clock_base *base; | ||
1247 | struct hrtimer *timer; | ||
1248 | int index, restart; | ||
1249 | |||
1250 | local_irq_disable(); | ||
1251 | cpu_base = &per_cpu(hrtimer_bases, smp_processor_id()); | ||
1252 | |||
1253 | atomic_spin_lock(&cpu_base->lock); | ||
1254 | |||
1255 | for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { | ||
1256 | base = &cpu_base->clock_base[index]; | ||
1257 | |||
1258 | while (!list_empty(&base->expired)) { | ||
1259 | timer = list_first_entry(&base->expired, | ||
1260 | struct hrtimer, cb_entry); | ||
1261 | |||
1262 | /* | ||
1263 | * Same as the above __run_hrtimer function | ||
1264 | * just we run with interrupts enabled. | ||
1265 | */ | ||
1266 | debug_hrtimer_deactivate(timer); | ||
1267 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); | ||
1268 | timer_stats_account_hrtimer(timer); | ||
1269 | fn = timer->function; | ||
1270 | |||
1271 | atomic_spin_unlock_irq(&cpu_base->lock); | ||
1272 | restart = fn(timer); | ||
1273 | atomic_spin_lock_irq(&cpu_base->lock); | ||
1274 | |||
1275 | if (restart != HRTIMER_NORESTART) { | ||
1276 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); | ||
1277 | enqueue_hrtimer(timer, base); | ||
1278 | } | ||
1279 | timer->state &= ~HRTIMER_STATE_CALLBACK; | ||
1280 | } | ||
1281 | } | ||
1282 | |||
1283 | atomic_spin_unlock_irq(&cpu_base->lock); | ||
1284 | |||
1285 | wake_up_timer_waiters(cpu_base); | ||
1286 | } | ||
1287 | |||
1288 | static int hrtimer_rt_defer(struct hrtimer *timer) | ||
1289 | { | ||
1290 | if (timer->irqsafe) | ||
1291 | return 0; | ||
1292 | |||
1293 | __remove_hrtimer(timer, timer->base, timer->state, 0); | ||
1294 | list_add_tail(&timer->cb_entry, &timer->base->expired); | ||
1295 | return 1; | ||
1296 | } | ||
1297 | |||
1298 | #else | ||
1299 | |||
1300 | static inline void hrtimer_rt_run_pending(void) { } | ||
1301 | static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; } | ||
1302 | |||
1303 | #endif | ||
1304 | |||
1206 | #ifdef CONFIG_HIGH_RES_TIMERS | 1305 | #ifdef CONFIG_HIGH_RES_TIMERS |
1207 | 1306 | ||
1208 | static int force_clock_reprogram; | 1307 | static int force_clock_reprogram; |
@@ -1238,7 +1337,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1238 | struct hrtimer_clock_base *base; | 1337 | struct hrtimer_clock_base *base; |
1239 | ktime_t expires_next, now; | 1338 | ktime_t expires_next, now; |
1240 | int nr_retries = 0; | 1339 | int nr_retries = 0; |
1241 | int i; | 1340 | int i, raise = 0; |
1242 | 1341 | ||
1243 | BUG_ON(!cpu_base->hres_active); | 1342 | BUG_ON(!cpu_base->hres_active); |
1244 | cpu_base->nr_events++; | 1343 | cpu_base->nr_events++; |
@@ -1299,7 +1398,10 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1299 | break; | 1398 | break; |
1300 | } | 1399 | } |
1301 | 1400 | ||
1302 | __run_hrtimer(timer); | 1401 | if (!hrtimer_rt_defer(timer)) |
1402 | __run_hrtimer(timer); | ||
1403 | else | ||
1404 | raise = 1; | ||
1303 | } | 1405 | } |
1304 | base++; | 1406 | base++; |
1305 | } | 1407 | } |
@@ -1316,6 +1418,9 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1316 | if (tick_program_event(expires_next, force_clock_reprogram)) | 1418 | if (tick_program_event(expires_next, force_clock_reprogram)) |
1317 | goto retry; | 1419 | goto retry; |
1318 | } | 1420 | } |
1421 | |||
1422 | if (raise) | ||
1423 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); | ||
1319 | } | 1424 | } |
1320 | 1425 | ||
1321 | /* | 1426 | /* |
@@ -1354,17 +1459,18 @@ void hrtimer_peek_ahead_timers(void) | |||
1354 | local_irq_restore(flags); | 1459 | local_irq_restore(flags); |
1355 | } | 1460 | } |
1356 | 1461 | ||
1357 | static void run_hrtimer_softirq(struct softirq_action *h) | ||
1358 | { | ||
1359 | hrtimer_peek_ahead_timers(); | ||
1360 | } | ||
1361 | |||
1362 | #else /* CONFIG_HIGH_RES_TIMERS */ | 1462 | #else /* CONFIG_HIGH_RES_TIMERS */ |
1363 | 1463 | ||
1364 | static inline void __hrtimer_peek_ahead_timers(void) { } | 1464 | static inline void __hrtimer_peek_ahead_timers(void) { } |
1365 | 1465 | ||
1366 | #endif /* !CONFIG_HIGH_RES_TIMERS */ | 1466 | #endif /* !CONFIG_HIGH_RES_TIMERS */ |
1367 | 1467 | ||
1468 | static void run_hrtimer_softirq(struct softirq_action *h) | ||
1469 | { | ||
1470 | hrtimer_peek_ahead_timers(); | ||
1471 | hrtimer_rt_run_pending(); | ||
1472 | } | ||
1473 | |||
1368 | /* | 1474 | /* |
1369 | * Called from timer softirq every jiffy, expire hrtimers: | 1475 | * Called from timer softirq every jiffy, expire hrtimers: |
1370 | * | 1476 | * |
@@ -1399,7 +1505,7 @@ void hrtimer_run_queues(void) | |||
1399 | struct rb_node *node; | 1505 | struct rb_node *node; |
1400 | struct hrtimer_cpu_base *cpu_base; | 1506 | struct hrtimer_cpu_base *cpu_base; |
1401 | struct hrtimer_clock_base *base; | 1507 | struct hrtimer_clock_base *base; |
1402 | int index, gettime = 1; | 1508 | int index, gettime = 1, raise = 0; |
1403 | 1509 | ||
1404 | cpu_base = &per_cpu(hrtimer_bases, raw_smp_processor_id()); | 1510 | cpu_base = &per_cpu(hrtimer_bases, raw_smp_processor_id()); |
1405 | if (hrtimer_hres_active(cpu_base)) | 1511 | if (hrtimer_hres_active(cpu_base)) |
@@ -1426,12 +1532,16 @@ void hrtimer_run_queues(void) | |||
1426 | hrtimer_get_expires_tv64(timer)) | 1532 | hrtimer_get_expires_tv64(timer)) |
1427 | break; | 1533 | break; |
1428 | 1534 | ||
1429 | __run_hrtimer(timer); | 1535 | if (!hrtimer_rt_defer(timer)) |
1536 | __run_hrtimer(timer); | ||
1537 | else | ||
1538 | raise = 1; | ||
1430 | } | 1539 | } |
1431 | atomic_spin_unlock(&cpu_base->lock); | 1540 | atomic_spin_unlock(&cpu_base->lock); |
1432 | } | 1541 | } |
1433 | 1542 | ||
1434 | wake_up_timer_waiters(cpu_base); | 1543 | if (raise) |
1544 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); | ||
1435 | } | 1545 | } |
1436 | 1546 | ||
1437 | /* | 1547 | /* |
@@ -1453,6 +1563,7 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) | |||
1453 | void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) | 1563 | void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) |
1454 | { | 1564 | { |
1455 | sl->timer.function = hrtimer_wakeup; | 1565 | sl->timer.function = hrtimer_wakeup; |
1566 | sl->timer.irqsafe = 1; | ||
1456 | sl->task = task; | 1567 | sl->task = task; |
1457 | } | 1568 | } |
1458 | 1569 | ||
@@ -1587,8 +1698,10 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | |||
1587 | 1698 | ||
1588 | atomic_spin_lock_init(&cpu_base->lock); | 1699 | atomic_spin_lock_init(&cpu_base->lock); |
1589 | 1700 | ||
1590 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | 1701 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1591 | cpu_base->clock_base[i].cpu_base = cpu_base; | 1702 | cpu_base->clock_base[i].cpu_base = cpu_base; |
1703 | INIT_LIST_HEAD(&cpu_base->clock_base[i].expired); | ||
1704 | } | ||
1592 | 1705 | ||
1593 | hrtimer_init_hres(cpu_base); | 1706 | hrtimer_init_hres(cpu_base); |
1594 | #ifdef CONFIG_PREEMPT_SOFTIRQS | 1707 | #ifdef CONFIG_PREEMPT_SOFTIRQS |
@@ -1706,9 +1819,7 @@ void __init hrtimers_init(void) | |||
1706 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, | 1819 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, |
1707 | (void *)(long)smp_processor_id()); | 1820 | (void *)(long)smp_processor_id()); |
1708 | register_cpu_notifier(&hrtimers_nb); | 1821 | register_cpu_notifier(&hrtimers_nb); |
1709 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1710 | open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); | 1822 | open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); |
1711 | #endif | ||
1712 | } | 1823 | } |
1713 | 1824 | ||
1714 | /** | 1825 | /** |