diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-31 16:35:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-31 16:35:31 -0400 |
commit | f22e08a79f3765fecf060b225a46931c94fb0a92 (patch) | |
tree | 2f02777e8893d03289ec0a7f5f414f3ff01c8c37 /kernel | |
parent | f187e9fd68577cdd5f914659b6f7f11124e40485 (diff) | |
parent | e3831edd59edf57ca11fc289f08961b20baf5146 (diff) |
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar.
* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched: Fix incorrect usage of for_each_cpu_mask() in select_fallback_rq()
sched: Fix __schedule_bug() output when called from an interrupt
sched/arch: Introduce the finish_arch_post_lock_switch() scheduler callback
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 13 | ||||
-rw-r--r-- | kernel/sched/sched.h | 3 |
2 files changed, 7 insertions, 9 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e3ed0ecee7c7..4603b9d8f30a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1270,7 +1270,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) | |||
1270 | int dest_cpu; | 1270 | int dest_cpu; |
1271 | 1271 | ||
1272 | /* Look for allowed, online CPU in same node. */ | 1272 | /* Look for allowed, online CPU in same node. */ |
1273 | for_each_cpu_mask(dest_cpu, *nodemask) { | 1273 | for_each_cpu(dest_cpu, nodemask) { |
1274 | if (!cpu_online(dest_cpu)) | 1274 | if (!cpu_online(dest_cpu)) |
1275 | continue; | 1275 | continue; |
1276 | if (!cpu_active(dest_cpu)) | 1276 | if (!cpu_active(dest_cpu)) |
@@ -1281,7 +1281,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) | |||
1281 | 1281 | ||
1282 | for (;;) { | 1282 | for (;;) { |
1283 | /* Any allowed, online CPU? */ | 1283 | /* Any allowed, online CPU? */ |
1284 | for_each_cpu_mask(dest_cpu, *tsk_cpus_allowed(p)) { | 1284 | for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { |
1285 | if (!cpu_online(dest_cpu)) | 1285 | if (!cpu_online(dest_cpu)) |
1286 | continue; | 1286 | continue; |
1287 | if (!cpu_active(dest_cpu)) | 1287 | if (!cpu_active(dest_cpu)) |
@@ -1964,6 +1964,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
1964 | local_irq_enable(); | 1964 | local_irq_enable(); |
1965 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | 1965 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
1966 | finish_lock_switch(rq, prev); | 1966 | finish_lock_switch(rq, prev); |
1967 | finish_arch_post_lock_switch(); | ||
1967 | 1968 | ||
1968 | fire_sched_in_preempt_notifiers(current); | 1969 | fire_sched_in_preempt_notifiers(current); |
1969 | if (mm) | 1970 | if (mm) |
@@ -3101,8 +3102,6 @@ EXPORT_SYMBOL(sub_preempt_count); | |||
3101 | */ | 3102 | */ |
3102 | static noinline void __schedule_bug(struct task_struct *prev) | 3103 | static noinline void __schedule_bug(struct task_struct *prev) |
3103 | { | 3104 | { |
3104 | struct pt_regs *regs = get_irq_regs(); | ||
3105 | |||
3106 | if (oops_in_progress) | 3105 | if (oops_in_progress) |
3107 | return; | 3106 | return; |
3108 | 3107 | ||
@@ -3113,11 +3112,7 @@ static noinline void __schedule_bug(struct task_struct *prev) | |||
3113 | print_modules(); | 3112 | print_modules(); |
3114 | if (irqs_disabled()) | 3113 | if (irqs_disabled()) |
3115 | print_irqtrace_events(prev); | 3114 | print_irqtrace_events(prev); |
3116 | 3115 | dump_stack(); | |
3117 | if (regs) | ||
3118 | show_regs(regs); | ||
3119 | else | ||
3120 | dump_stack(); | ||
3121 | } | 3116 | } |
3122 | 3117 | ||
3123 | /* | 3118 | /* |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 42b1f304b044..fb3acba4d52e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -681,6 +681,9 @@ static inline int task_running(struct rq *rq, struct task_struct *p) | |||
681 | #ifndef finish_arch_switch | 681 | #ifndef finish_arch_switch |
682 | # define finish_arch_switch(prev) do { } while (0) | 682 | # define finish_arch_switch(prev) do { } while (0) |
683 | #endif | 683 | #endif |
684 | #ifndef finish_arch_post_lock_switch | ||
685 | # define finish_arch_post_lock_switch() do { } while (0) | ||
686 | #endif | ||
684 | 687 | ||
685 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW | 688 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW |
686 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | 689 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |