diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-01-25 15:08:31 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:31 -0500 |
commit | 2d44ae4d7135b9aee26439b3523b43473381bc5f (patch) | |
tree | ba3afc0f03142d26f9238974dab5b99bf1dca1db | |
parent | 48d5e258216f1c7713633439beb98a38c7290649 (diff) |
hrtimer: clean up cpu->base locking tricks
In order to more easily allow for the scheduler to use timers, clean up
the locking a bit.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/hrtimer.c | 20 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 8 |
2 files changed, 19 insertions, 9 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index f994bb8065e6..9f850ca032b6 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -1063,7 +1063,9 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1063 | basenow = ktime_add(now, base->offset); | 1063 | basenow = ktime_add(now, base->offset); |
1064 | 1064 | ||
1065 | while ((node = base->first)) { | 1065 | while ((node = base->first)) { |
1066 | enum hrtimer_restart (*fn)(struct hrtimer *); | ||
1066 | struct hrtimer *timer; | 1067 | struct hrtimer *timer; |
1068 | int restart; | ||
1067 | 1069 | ||
1068 | timer = rb_entry(node, struct hrtimer, node); | 1070 | timer = rb_entry(node, struct hrtimer, node); |
1069 | 1071 | ||
@@ -1091,13 +1093,29 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1091 | HRTIMER_STATE_CALLBACK, 0); | 1093 | HRTIMER_STATE_CALLBACK, 0); |
1092 | timer_stats_account_hrtimer(timer); | 1094 | timer_stats_account_hrtimer(timer); |
1093 | 1095 | ||
1096 | fn = timer->function; | ||
1097 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) { | ||
1098 | /* | ||
1099 | * Used for scheduler timers, avoid lock | ||
1100 | * inversion with rq->lock and tasklist_lock. | ||
1101 | * | ||
1102 | * These timers are required to deal with | ||
1103 | * enqueue expiry themselves and are not | ||
1104 | * allowed to migrate. | ||
1105 | */ | ||
1106 | spin_unlock(&cpu_base->lock); | ||
1107 | restart = fn(timer); | ||
1108 | spin_lock(&cpu_base->lock); | ||
1109 | } else | ||
1110 | restart = fn(timer); | ||
1111 | |||
1094 | /* | 1112 | /* |
1095 | * Note: We clear the CALLBACK bit after | 1113 | * Note: We clear the CALLBACK bit after |
1096 | * enqueue_hrtimer to avoid reprogramming of | 1114 | * enqueue_hrtimer to avoid reprogramming of |
1097 | * the event hardware. This happens at the end | 1115 | * the event hardware. This happens at the end |
1098 | * of this function anyway. | 1116 | * of this function anyway. |
1099 | */ | 1117 | */ |
1100 | if (timer->function(timer) != HRTIMER_NORESTART) { | 1118 | if (restart != HRTIMER_NORESTART) { |
1101 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); | 1119 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); |
1102 | enqueue_hrtimer(timer, base, 0); | 1120 | enqueue_hrtimer(timer, base, 0); |
1103 | } | 1121 | } |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 5f9fb645b725..1a21b6fdb674 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -514,7 +514,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
514 | { | 514 | { |
515 | struct tick_sched *ts = | 515 | struct tick_sched *ts = |
516 | container_of(timer, struct tick_sched, sched_timer); | 516 | container_of(timer, struct tick_sched, sched_timer); |
517 | struct hrtimer_cpu_base *base = timer->base->cpu_base; | ||
518 | struct pt_regs *regs = get_irq_regs(); | 517 | struct pt_regs *regs = get_irq_regs(); |
519 | ktime_t now = ktime_get(); | 518 | ktime_t now = ktime_get(); |
520 | int cpu = smp_processor_id(); | 519 | int cpu = smp_processor_id(); |
@@ -552,15 +551,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
552 | touch_softlockup_watchdog(); | 551 | touch_softlockup_watchdog(); |
553 | ts->idle_jiffies++; | 552 | ts->idle_jiffies++; |
554 | } | 553 | } |
555 | /* | ||
556 | * update_process_times() might take tasklist_lock, hence | ||
557 | * drop the base lock. sched-tick hrtimers are per-CPU and | ||
558 | * never accessible by userspace APIs, so this is safe to do. | ||
559 | */ | ||
560 | spin_unlock(&base->lock); | ||
561 | update_process_times(user_mode(regs)); | 554 | update_process_times(user_mode(regs)); |
562 | profile_tick(CPU_PROFILING); | 555 | profile_tick(CPU_PROFILING); |
563 | spin_lock(&base->lock); | ||
564 | } | 556 | } |
565 | 557 | ||
566 | /* Do not restart, when we are in the idle loop */ | 558 | /* Do not restart, when we are in the idle loop */ |