aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-21 13:12:00 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-21 13:12:00 -0500
commit1814f2da5ebd7a516805e0a62047cb45eee10bdc (patch)
treeac822f64051b2a0a519241a98a20706db1739d12
parent9d5eb6787aeb1dd0a0c71c5c41ca1b4cac3e6be4 (diff)
parent70f1120527797adb31c68bdc6f1b45e182c342c7 (diff)
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: Fix hotplug hang sched: Restore printk sanity
-rw-r--r--kernel/sched.c91
-rw-r--r--kernel/sched_idletask.c2
2 files changed, 51 insertions, 42 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 720df108a2d6..87f1f47beffe 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -26,8 +26,6 @@
26 * Thomas Gleixner, Mike Kravetz 26 * Thomas Gleixner, Mike Kravetz
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/mm.h> 29#include <linux/mm.h>
32#include <linux/module.h> 30#include <linux/module.h>
33#include <linux/nmi.h> 31#include <linux/nmi.h>
@@ -2348,7 +2346,7 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
2348 * not worry about this generic constraint ] 2346 * not worry about this generic constraint ]
2349 */ 2347 */
2350 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || 2348 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
2351 !cpu_active(cpu))) 2349 !cpu_online(cpu)))
2352 cpu = select_fallback_rq(task_cpu(p), p); 2350 cpu = select_fallback_rq(task_cpu(p), p);
2353 2351
2354 return cpu; 2352 return cpu;
@@ -5375,8 +5373,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
5375{ 5373{
5376 struct pt_regs *regs = get_irq_regs(); 5374 struct pt_regs *regs = get_irq_regs();
5377 5375
5378 pr_err("BUG: scheduling while atomic: %s/%d/0x%08x\n", 5376 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5379 prev->comm, prev->pid, preempt_count()); 5377 prev->comm, prev->pid, preempt_count());
5380 5378
5381 debug_show_held_locks(prev); 5379 debug_show_held_locks(prev);
5382 print_modules(); 5380 print_modules();
@@ -6940,23 +6938,23 @@ void sched_show_task(struct task_struct *p)
6940 unsigned state; 6938 unsigned state;
6941 6939
6942 state = p->state ? __ffs(p->state) + 1 : 0; 6940 state = p->state ? __ffs(p->state) + 1 : 0;
6943 pr_info("%-13.13s %c", p->comm, 6941 printk(KERN_INFO "%-13.13s %c", p->comm,
6944 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 6942 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
6945#if BITS_PER_LONG == 32 6943#if BITS_PER_LONG == 32
6946 if (state == TASK_RUNNING) 6944 if (state == TASK_RUNNING)
6947 pr_cont(" running "); 6945 printk(KERN_CONT " running ");
6948 else 6946 else
6949 pr_cont(" %08lx ", thread_saved_pc(p)); 6947 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
6950#else 6948#else
6951 if (state == TASK_RUNNING) 6949 if (state == TASK_RUNNING)
6952 pr_cont(" running task "); 6950 printk(KERN_CONT " running task ");
6953 else 6951 else
6954 pr_cont(" %016lx ", thread_saved_pc(p)); 6952 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
6955#endif 6953#endif
6956#ifdef CONFIG_DEBUG_STACK_USAGE 6954#ifdef CONFIG_DEBUG_STACK_USAGE
6957 free = stack_not_used(p); 6955 free = stack_not_used(p);
6958#endif 6956#endif
6959 pr_cont("%5lu %5d %6d 0x%08lx\n", free, 6957 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
6960 task_pid_nr(p), task_pid_nr(p->real_parent), 6958 task_pid_nr(p), task_pid_nr(p->real_parent),
6961 (unsigned long)task_thread_info(p)->flags); 6959 (unsigned long)task_thread_info(p)->flags);
6962 6960
@@ -6968,9 +6966,11 @@ void show_state_filter(unsigned long state_filter)
6968 struct task_struct *g, *p; 6966 struct task_struct *g, *p;
6969 6967
6970#if BITS_PER_LONG == 32 6968#if BITS_PER_LONG == 32
6971 pr_info(" task PC stack pid father\n"); 6969 printk(KERN_INFO
6970 " task PC stack pid father\n");
6972#else 6971#else
6973 pr_info(" task PC stack pid father\n"); 6972 printk(KERN_INFO
6973 " task PC stack pid father\n");
6974#endif 6974#endif
6975 read_lock(&tasklist_lock); 6975 read_lock(&tasklist_lock);
6976 do_each_thread(g, p) { 6976 do_each_thread(g, p) {
@@ -7828,44 +7828,48 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7828 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 7828 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
7829 7829
7830 if (!(sd->flags & SD_LOAD_BALANCE)) { 7830 if (!(sd->flags & SD_LOAD_BALANCE)) {
7831 pr_cont("does not load-balance\n"); 7831 printk("does not load-balance\n");
7832 if (sd->parent) 7832 if (sd->parent)
7833 pr_err("ERROR: !SD_LOAD_BALANCE domain has parent\n"); 7833 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
7834 " has parent");
7834 return -1; 7835 return -1;
7835 } 7836 }
7836 7837
7837 pr_cont("span %s level %s\n", str, sd->name); 7838 printk(KERN_CONT "span %s level %s\n", str, sd->name);
7838 7839
7839 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 7840 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
7840 pr_err("ERROR: domain->span does not contain CPU%d\n", cpu); 7841 printk(KERN_ERR "ERROR: domain->span does not contain "
7842 "CPU%d\n", cpu);
7841 } 7843 }
7842 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { 7844 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
7843 pr_err("ERROR: domain->groups does not contain CPU%d\n", cpu); 7845 printk(KERN_ERR "ERROR: domain->groups does not contain"
7846 " CPU%d\n", cpu);
7844 } 7847 }
7845 7848
7846 printk(KERN_DEBUG "%*s groups:", level + 1, ""); 7849 printk(KERN_DEBUG "%*s groups:", level + 1, "");
7847 do { 7850 do {
7848 if (!group) { 7851 if (!group) {
7849 pr_cont("\n"); 7852 printk("\n");
7850 pr_err("ERROR: group is NULL\n"); 7853 printk(KERN_ERR "ERROR: group is NULL\n");
7851 break; 7854 break;
7852 } 7855 }
7853 7856
7854 if (!group->cpu_power) { 7857 if (!group->cpu_power) {
7855 pr_cont("\n"); 7858 printk(KERN_CONT "\n");
7856 pr_err("ERROR: domain->cpu_power not set\n"); 7859 printk(KERN_ERR "ERROR: domain->cpu_power not "
7860 "set\n");
7857 break; 7861 break;
7858 } 7862 }
7859 7863
7860 if (!cpumask_weight(sched_group_cpus(group))) { 7864 if (!cpumask_weight(sched_group_cpus(group))) {
7861 pr_cont("\n"); 7865 printk(KERN_CONT "\n");
7862 pr_err("ERROR: empty group\n"); 7866 printk(KERN_ERR "ERROR: empty group\n");
7863 break; 7867 break;
7864 } 7868 }
7865 7869
7866 if (cpumask_intersects(groupmask, sched_group_cpus(group))) { 7870 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
7867 pr_cont("\n"); 7871 printk(KERN_CONT "\n");
7868 pr_err("ERROR: repeated CPUs\n"); 7872 printk(KERN_ERR "ERROR: repeated CPUs\n");
7869 break; 7873 break;
7870 } 7874 }
7871 7875
@@ -7873,21 +7877,23 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7873 7877
7874 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 7878 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
7875 7879
7876 pr_cont(" %s", str); 7880 printk(KERN_CONT " %s", str);
7877 if (group->cpu_power != SCHED_LOAD_SCALE) { 7881 if (group->cpu_power != SCHED_LOAD_SCALE) {
7878 pr_cont(" (cpu_power = %d)", group->cpu_power); 7882 printk(KERN_CONT " (cpu_power = %d)",
7883 group->cpu_power);
7879 } 7884 }
7880 7885
7881 group = group->next; 7886 group = group->next;
7882 } while (group != sd->groups); 7887 } while (group != sd->groups);
7883 pr_cont("\n"); 7888 printk(KERN_CONT "\n");
7884 7889
7885 if (!cpumask_equal(sched_domain_span(sd), groupmask)) 7890 if (!cpumask_equal(sched_domain_span(sd), groupmask))
7886 pr_err("ERROR: groups don't span domain->span\n"); 7891 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
7887 7892
7888 if (sd->parent && 7893 if (sd->parent &&
7889 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 7894 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
7890 pr_err("ERROR: parent span is not a superset of domain->span\n"); 7895 printk(KERN_ERR "ERROR: parent span is not a superset "
7896 "of domain->span\n");
7891 return 0; 7897 return 0;
7892} 7898}
7893 7899
@@ -8443,7 +8449,8 @@ static int build_numa_sched_groups(struct s_data *d,
8443 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), 8449 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
8444 GFP_KERNEL, num); 8450 GFP_KERNEL, num);
8445 if (!sg) { 8451 if (!sg) {
8446 pr_warning("Can not alloc domain group for node %d\n", num); 8452 printk(KERN_WARNING "Can not alloc domain group for node %d\n",
8453 num);
8447 return -ENOMEM; 8454 return -ENOMEM;
8448 } 8455 }
8449 d->sched_group_nodes[num] = sg; 8456 d->sched_group_nodes[num] = sg;
@@ -8472,8 +8479,8 @@ static int build_numa_sched_groups(struct s_data *d,
8472 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), 8479 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
8473 GFP_KERNEL, num); 8480 GFP_KERNEL, num);
8474 if (!sg) { 8481 if (!sg) {
8475 pr_warning("Can not alloc domain group for node %d\n", 8482 printk(KERN_WARNING
8476 j); 8483 "Can not alloc domain group for node %d\n", j);
8477 return -ENOMEM; 8484 return -ENOMEM;
8478 } 8485 }
8479 sg->cpu_power = 0; 8486 sg->cpu_power = 0;
@@ -8701,7 +8708,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
8701 d->sched_group_nodes = kcalloc(nr_node_ids, 8708 d->sched_group_nodes = kcalloc(nr_node_ids,
8702 sizeof(struct sched_group *), GFP_KERNEL); 8709 sizeof(struct sched_group *), GFP_KERNEL);
8703 if (!d->sched_group_nodes) { 8710 if (!d->sched_group_nodes) {
8704 pr_warning("Can not alloc sched group node list\n"); 8711 printk(KERN_WARNING "Can not alloc sched group node list\n");
8705 return sa_notcovered; 8712 return sa_notcovered;
8706 } 8713 }
8707 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes; 8714 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
@@ -8718,7 +8725,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
8718 return sa_send_covered; 8725 return sa_send_covered;
8719 d->rd = alloc_rootdomain(); 8726 d->rd = alloc_rootdomain();
8720 if (!d->rd) { 8727 if (!d->rd) {
8721 pr_warning("Cannot alloc root domain\n"); 8728 printk(KERN_WARNING "Cannot alloc root domain\n");
8722 return sa_tmpmask; 8729 return sa_tmpmask;
8723 } 8730 }
8724 return sa_rootdomain; 8731 return sa_rootdomain;
@@ -9700,11 +9707,13 @@ void __might_sleep(char *file, int line, int preempt_offset)
9700 return; 9707 return;
9701 prev_jiffy = jiffies; 9708 prev_jiffy = jiffies;
9702 9709
9703 pr_err("BUG: sleeping function called from invalid context at %s:%d\n", 9710 printk(KERN_ERR
9704 file, line); 9711 "BUG: sleeping function called from invalid context at %s:%d\n",
9705 pr_err("in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 9712 file, line);
9706 in_atomic(), irqs_disabled(), 9713 printk(KERN_ERR
9707 current->pid, current->comm); 9714 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
9715 in_atomic(), irqs_disabled(),
9716 current->pid, current->comm);
9708 9717
9709 debug_show_held_locks(current); 9718 debug_show_held_locks(current);
9710 if (irqs_disabled()) 9719 if (irqs_disabled())
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 21b969a28725..5f93b570d383 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -35,7 +35,7 @@ static void
35dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) 35dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
36{ 36{
37 raw_spin_unlock_irq(&rq->lock); 37 raw_spin_unlock_irq(&rq->lock);
38 pr_err("bad: scheduling from the idle thread!\n"); 38 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
39 dump_stack(); 39 dump_stack();
40 raw_spin_lock_irq(&rq->lock); 40 raw_spin_lock_irq(&rq->lock);
41} 41}