diff options
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r-- | kernel/hrtimer.c | 26 |
1 files changed, 16 insertions, 10 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 2b465dfde426..47e63349d1b2 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -664,14 +664,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
664 | 664 | ||
665 | /* Timer is expired, act upon the callback mode */ | 665 | /* Timer is expired, act upon the callback mode */ |
666 | switch(timer->cb_mode) { | 666 | switch(timer->cb_mode) { |
667 | case HRTIMER_CB_IRQSAFE_NO_RESTART: | ||
668 | debug_hrtimer_deactivate(timer); | ||
669 | /* | ||
670 | * We can call the callback from here. No restart | ||
671 | * happens, so no danger of recursion | ||
672 | */ | ||
673 | BUG_ON(timer->function(timer) != HRTIMER_NORESTART); | ||
674 | return 1; | ||
675 | case HRTIMER_CB_IRQSAFE_PERCPU: | 667 | case HRTIMER_CB_IRQSAFE_PERCPU: |
676 | case HRTIMER_CB_IRQSAFE_UNLOCKED: | 668 | case HRTIMER_CB_IRQSAFE_UNLOCKED: |
677 | /* | 669 | /* |
@@ -683,7 +675,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
683 | */ | 675 | */ |
684 | debug_hrtimer_deactivate(timer); | 676 | debug_hrtimer_deactivate(timer); |
685 | return 1; | 677 | return 1; |
686 | case HRTIMER_CB_IRQSAFE: | ||
687 | case HRTIMER_CB_SOFTIRQ: | 678 | case HRTIMER_CB_SOFTIRQ: |
688 | /* | 679 | /* |
689 | * Move everything else into the softirq pending list ! | 680 | * Move everything else into the softirq pending list ! |
@@ -1209,6 +1200,7 @@ static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base) | |||
1209 | enum hrtimer_restart (*fn)(struct hrtimer *); | 1200 | enum hrtimer_restart (*fn)(struct hrtimer *); |
1210 | struct hrtimer *timer; | 1201 | struct hrtimer *timer; |
1211 | int restart; | 1202 | int restart; |
1203 | int emulate_hardirq_ctx = 0; | ||
1212 | 1204 | ||
1213 | timer = list_entry(cpu_base->cb_pending.next, | 1205 | timer = list_entry(cpu_base->cb_pending.next, |
1214 | struct hrtimer, cb_entry); | 1206 | struct hrtimer, cb_entry); |
@@ -1217,10 +1209,24 @@ static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base) | |||
1217 | timer_stats_account_hrtimer(timer); | 1209 | timer_stats_account_hrtimer(timer); |
1218 | 1210 | ||
1219 | fn = timer->function; | 1211 | fn = timer->function; |
1212 | /* | ||
1213 | * A timer might have been added to the cb_pending list | ||
1214 | * when it was migrated during a cpu-offline operation. | ||
1215 | * Emulate hardirq context for such timers. | ||
1216 | */ | ||
1217 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || | ||
1218 | timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) | ||
1219 | emulate_hardirq_ctx = 1; | ||
1220 | |||
1220 | __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0); | 1221 | __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0); |
1221 | spin_unlock_irq(&cpu_base->lock); | 1222 | spin_unlock_irq(&cpu_base->lock); |
1222 | 1223 | ||
1223 | restart = fn(timer); | 1224 | if (unlikely(emulate_hardirq_ctx)) { |
1225 | local_irq_disable(); | ||
1226 | restart = fn(timer); | ||
1227 | local_irq_enable(); | ||
1228 | } else | ||
1229 | restart = fn(timer); | ||
1224 | 1230 | ||
1225 | spin_lock_irq(&cpu_base->lock); | 1231 | spin_lock_irq(&cpu_base->lock); |
1226 | 1232 | ||