diff options
-rw-r--r-- | kernel/sched.c | 94 | ||||
-rw-r--r-- | kernel/sched_idletask.c | 2 |
2 files changed, 43 insertions, 53 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 8b3532f262d7..258c73c6a2f3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -26,6 +26,8 @@ | |||
26 | * Thomas Gleixner, Mike Kravetz | 26 | * Thomas Gleixner, Mike Kravetz |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
30 | |||
29 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
30 | #include <linux/module.h> | 32 | #include <linux/module.h> |
31 | #include <linux/nmi.h> | 33 | #include <linux/nmi.h> |
@@ -5337,8 +5339,8 @@ static noinline void __schedule_bug(struct task_struct *prev) | |||
5337 | { | 5339 | { |
5338 | struct pt_regs *regs = get_irq_regs(); | 5340 | struct pt_regs *regs = get_irq_regs(); |
5339 | 5341 | ||
5340 | printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", | 5342 | pr_err("BUG: scheduling while atomic: %s/%d/0x%08x\n", |
5341 | prev->comm, prev->pid, preempt_count()); | 5343 | prev->comm, prev->pid, preempt_count()); |
5342 | 5344 | ||
5343 | debug_show_held_locks(prev); | 5345 | debug_show_held_locks(prev); |
5344 | print_modules(); | 5346 | print_modules(); |
@@ -6906,23 +6908,23 @@ void sched_show_task(struct task_struct *p) | |||
6906 | unsigned state; | 6908 | unsigned state; |
6907 | 6909 | ||
6908 | state = p->state ? __ffs(p->state) + 1 : 0; | 6910 | state = p->state ? __ffs(p->state) + 1 : 0; |
6909 | printk(KERN_INFO "%-13.13s %c", p->comm, | 6911 | pr_info("%-13.13s %c", p->comm, |
6910 | state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); | 6912 | state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); |
6911 | #if BITS_PER_LONG == 32 | 6913 | #if BITS_PER_LONG == 32 |
6912 | if (state == TASK_RUNNING) | 6914 | if (state == TASK_RUNNING) |
6913 | printk(KERN_CONT " running "); | 6915 | pr_cont(" running "); |
6914 | else | 6916 | else |
6915 | printk(KERN_CONT " %08lx ", thread_saved_pc(p)); | 6917 | pr_cont(" %08lx ", thread_saved_pc(p)); |
6916 | #else | 6918 | #else |
6917 | if (state == TASK_RUNNING) | 6919 | if (state == TASK_RUNNING) |
6918 | printk(KERN_CONT " running task "); | 6920 | pr_cont(" running task "); |
6919 | else | 6921 | else |
6920 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); | 6922 | pr_cont(" %016lx ", thread_saved_pc(p)); |
6921 | #endif | 6923 | #endif |
6922 | #ifdef CONFIG_DEBUG_STACK_USAGE | 6924 | #ifdef CONFIG_DEBUG_STACK_USAGE |
6923 | free = stack_not_used(p); | 6925 | free = stack_not_used(p); |
6924 | #endif | 6926 | #endif |
6925 | printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, | 6927 | pr_cont("%5lu %5d %6d 0x%08lx\n", free, |
6926 | task_pid_nr(p), task_pid_nr(p->real_parent), | 6928 | task_pid_nr(p), task_pid_nr(p->real_parent), |
6927 | (unsigned long)task_thread_info(p)->flags); | 6929 | (unsigned long)task_thread_info(p)->flags); |
6928 | 6930 | ||
@@ -6934,11 +6936,9 @@ void show_state_filter(unsigned long state_filter) | |||
6934 | struct task_struct *g, *p; | 6936 | struct task_struct *g, *p; |
6935 | 6937 | ||
6936 | #if BITS_PER_LONG == 32 | 6938 | #if BITS_PER_LONG == 32 |
6937 | printk(KERN_INFO | 6939 | pr_info(" task PC stack pid father\n"); |
6938 | " task PC stack pid father\n"); | ||
6939 | #else | 6940 | #else |
6940 | printk(KERN_INFO | 6941 | pr_info(" task PC stack pid father\n"); |
6941 | " task PC stack pid father\n"); | ||
6942 | #endif | 6942 | #endif |
6943 | read_lock(&tasklist_lock); | 6943 | read_lock(&tasklist_lock); |
6944 | do_each_thread(g, p) { | 6944 | do_each_thread(g, p) { |
@@ -7296,9 +7296,8 @@ again: | |||
7296 | * leave kernel. | 7296 | * leave kernel. |
7297 | */ | 7297 | */ |
7298 | if (p->mm && printk_ratelimit()) { | 7298 | if (p->mm && printk_ratelimit()) { |
7299 | printk(KERN_INFO "process %d (%s) no " | 7299 | pr_info("process %d (%s) no longer affine to cpu%d\n", |
7300 | "longer affine to cpu%d\n", | 7300 | task_pid_nr(p), p->comm, dead_cpu); |
7301 | task_pid_nr(p), p->comm, dead_cpu); | ||
7302 | } | 7301 | } |
7303 | } | 7302 | } |
7304 | 7303 | ||
@@ -7805,48 +7804,44 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
7805 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); | 7804 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
7806 | 7805 | ||
7807 | if (!(sd->flags & SD_LOAD_BALANCE)) { | 7806 | if (!(sd->flags & SD_LOAD_BALANCE)) { |
7808 | printk("does not load-balance\n"); | 7807 | pr_cont("does not load-balance\n"); |
7809 | if (sd->parent) | 7808 | if (sd->parent) |
7810 | printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" | 7809 | pr_err("ERROR: !SD_LOAD_BALANCE domain has parent\n"); |
7811 | " has parent"); | ||
7812 | return -1; | 7810 | return -1; |
7813 | } | 7811 | } |
7814 | 7812 | ||
7815 | printk(KERN_CONT "span %s level %s\n", str, sd->name); | 7813 | pr_cont("span %s level %s\n", str, sd->name); |
7816 | 7814 | ||
7817 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { | 7815 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
7818 | printk(KERN_ERR "ERROR: domain->span does not contain " | 7816 | pr_err("ERROR: domain->span does not contain CPU%d\n", cpu); |
7819 | "CPU%d\n", cpu); | ||
7820 | } | 7817 | } |
7821 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { | 7818 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { |
7822 | printk(KERN_ERR "ERROR: domain->groups does not contain" | 7819 | pr_err("ERROR: domain->groups does not contain CPU%d\n", cpu); |
7823 | " CPU%d\n", cpu); | ||
7824 | } | 7820 | } |
7825 | 7821 | ||
7826 | printk(KERN_DEBUG "%*s groups:", level + 1, ""); | 7822 | printk(KERN_DEBUG "%*s groups:", level + 1, ""); |
7827 | do { | 7823 | do { |
7828 | if (!group) { | 7824 | if (!group) { |
7829 | printk("\n"); | 7825 | pr_cont("\n"); |
7830 | printk(KERN_ERR "ERROR: group is NULL\n"); | 7826 | pr_err("ERROR: group is NULL\n"); |
7831 | break; | 7827 | break; |
7832 | } | 7828 | } |
7833 | 7829 | ||
7834 | if (!group->cpu_power) { | 7830 | if (!group->cpu_power) { |
7835 | printk(KERN_CONT "\n"); | 7831 | pr_cont("\n"); |
7836 | printk(KERN_ERR "ERROR: domain->cpu_power not " | 7832 | pr_err("ERROR: domain->cpu_power not set\n"); |
7837 | "set\n"); | ||
7838 | break; | 7833 | break; |
7839 | } | 7834 | } |
7840 | 7835 | ||
7841 | if (!cpumask_weight(sched_group_cpus(group))) { | 7836 | if (!cpumask_weight(sched_group_cpus(group))) { |
7842 | printk(KERN_CONT "\n"); | 7837 | pr_cont("\n"); |
7843 | printk(KERN_ERR "ERROR: empty group\n"); | 7838 | pr_err("ERROR: empty group\n"); |
7844 | break; | 7839 | break; |
7845 | } | 7840 | } |
7846 | 7841 | ||
7847 | if (cpumask_intersects(groupmask, sched_group_cpus(group))) { | 7842 | if (cpumask_intersects(groupmask, sched_group_cpus(group))) { |
7848 | printk(KERN_CONT "\n"); | 7843 | pr_cont("\n"); |
7849 | printk(KERN_ERR "ERROR: repeated CPUs\n"); | 7844 | pr_err("ERROR: repeated CPUs\n"); |
7850 | break; | 7845 | break; |
7851 | } | 7846 | } |
7852 | 7847 | ||
@@ -7854,23 +7849,21 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
7854 | 7849 | ||
7855 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); | 7850 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); |
7856 | 7851 | ||
7857 | printk(KERN_CONT " %s", str); | 7852 | pr_cont(" %s", str); |
7858 | if (group->cpu_power != SCHED_LOAD_SCALE) { | 7853 | if (group->cpu_power != SCHED_LOAD_SCALE) { |
7859 | printk(KERN_CONT " (cpu_power = %d)", | 7854 | pr_cont(" (cpu_power = %d)", group->cpu_power); |
7860 | group->cpu_power); | ||
7861 | } | 7855 | } |
7862 | 7856 | ||
7863 | group = group->next; | 7857 | group = group->next; |
7864 | } while (group != sd->groups); | 7858 | } while (group != sd->groups); |
7865 | printk(KERN_CONT "\n"); | 7859 | pr_cont("\n"); |
7866 | 7860 | ||
7867 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) | 7861 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) |
7868 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); | 7862 | pr_err("ERROR: groups don't span domain->span\n"); |
7869 | 7863 | ||
7870 | if (sd->parent && | 7864 | if (sd->parent && |
7871 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) | 7865 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) |
7872 | printk(KERN_ERR "ERROR: parent span is not a superset " | 7866 | pr_err("ERROR: parent span is not a superset of domain->span\n"); |
7873 | "of domain->span\n"); | ||
7874 | return 0; | 7867 | return 0; |
7875 | } | 7868 | } |
7876 | 7869 | ||
@@ -8426,8 +8419,7 @@ static int build_numa_sched_groups(struct s_data *d, | |||
8426 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), | 8419 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), |
8427 | GFP_KERNEL, num); | 8420 | GFP_KERNEL, num); |
8428 | if (!sg) { | 8421 | if (!sg) { |
8429 | printk(KERN_WARNING "Can not alloc domain group for node %d\n", | 8422 | pr_warning("Can not alloc domain group for node %d\n", num); |
8430 | num); | ||
8431 | return -ENOMEM; | 8423 | return -ENOMEM; |
8432 | } | 8424 | } |
8433 | d->sched_group_nodes[num] = sg; | 8425 | d->sched_group_nodes[num] = sg; |
@@ -8456,8 +8448,8 @@ static int build_numa_sched_groups(struct s_data *d, | |||
8456 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), | 8448 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), |
8457 | GFP_KERNEL, num); | 8449 | GFP_KERNEL, num); |
8458 | if (!sg) { | 8450 | if (!sg) { |
8459 | printk(KERN_WARNING | 8451 | pr_warning("Can not alloc domain group for node %d\n", |
8460 | "Can not alloc domain group for node %d\n", j); | 8452 | j); |
8461 | return -ENOMEM; | 8453 | return -ENOMEM; |
8462 | } | 8454 | } |
8463 | sg->cpu_power = 0; | 8455 | sg->cpu_power = 0; |
@@ -8685,7 +8677,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, | |||
8685 | d->sched_group_nodes = kcalloc(nr_node_ids, | 8677 | d->sched_group_nodes = kcalloc(nr_node_ids, |
8686 | sizeof(struct sched_group *), GFP_KERNEL); | 8678 | sizeof(struct sched_group *), GFP_KERNEL); |
8687 | if (!d->sched_group_nodes) { | 8679 | if (!d->sched_group_nodes) { |
8688 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 8680 | pr_warning("Can not alloc sched group node list\n"); |
8689 | return sa_notcovered; | 8681 | return sa_notcovered; |
8690 | } | 8682 | } |
8691 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes; | 8683 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes; |
@@ -8702,7 +8694,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, | |||
8702 | return sa_send_covered; | 8694 | return sa_send_covered; |
8703 | d->rd = alloc_rootdomain(); | 8695 | d->rd = alloc_rootdomain(); |
8704 | if (!d->rd) { | 8696 | if (!d->rd) { |
8705 | printk(KERN_WARNING "Cannot alloc root domain\n"); | 8697 | pr_warning("Cannot alloc root domain\n"); |
8706 | return sa_tmpmask; | 8698 | return sa_tmpmask; |
8707 | } | 8699 | } |
8708 | return sa_rootdomain; | 8700 | return sa_rootdomain; |
@@ -9684,13 +9676,11 @@ void __might_sleep(char *file, int line, int preempt_offset) | |||
9684 | return; | 9676 | return; |
9685 | prev_jiffy = jiffies; | 9677 | prev_jiffy = jiffies; |
9686 | 9678 | ||
9687 | printk(KERN_ERR | 9679 | pr_err("BUG: sleeping function called from invalid context at %s:%d\n", |
9688 | "BUG: sleeping function called from invalid context at %s:%d\n", | 9680 | file, line); |
9689 | file, line); | 9681 | pr_err("in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", |
9690 | printk(KERN_ERR | 9682 | in_atomic(), irqs_disabled(), |
9691 | "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", | 9683 | current->pid, current->comm); |
9692 | in_atomic(), irqs_disabled(), | ||
9693 | current->pid, current->comm); | ||
9694 | 9684 | ||
9695 | debug_show_held_locks(current); | 9685 | debug_show_held_locks(current); |
9696 | if (irqs_disabled()) | 9686 | if (irqs_disabled()) |
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 33d5384a73a8..b810e22772d5 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
@@ -35,7 +35,7 @@ static void | |||
35 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) | 35 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) |
36 | { | 36 | { |
37 | spin_unlock_irq(&rq->lock); | 37 | spin_unlock_irq(&rq->lock); |
38 | printk(KERN_ERR "bad: scheduling from the idle thread!\n"); | 38 | pr_err("bad: scheduling from the idle thread!\n"); |
39 | dump_stack(); | 39 | dump_stack(); |
40 | spin_lock_irq(&rq->lock); | 40 | spin_lock_irq(&rq->lock); |
41 | } | 41 | } |