aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c136
-rw-r--r--kernel/sched_clock.c23
-rw-r--r--kernel/sched_idletask.c2
3 files changed, 78 insertions, 83 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 18cceeecce35..9c30858b6463 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -26,6 +26,8 @@
26 * Thomas Gleixner, Mike Kravetz 26 * Thomas Gleixner, Mike Kravetz
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include <linux/mm.h> 31#include <linux/mm.h>
30#include <linux/module.h> 32#include <linux/module.h>
31#include <linux/nmi.h> 33#include <linux/nmi.h>
@@ -5340,8 +5342,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
5340{ 5342{
5341 struct pt_regs *regs = get_irq_regs(); 5343 struct pt_regs *regs = get_irq_regs();
5342 5344
5343 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 5345 pr_err("BUG: scheduling while atomic: %s/%d/0x%08x\n",
5344 prev->comm, prev->pid, preempt_count()); 5346 prev->comm, prev->pid, preempt_count());
5345 5347
5346 debug_show_held_locks(prev); 5348 debug_show_held_locks(prev);
5347 print_modules(); 5349 print_modules();
@@ -5911,14 +5913,15 @@ EXPORT_SYMBOL(wait_for_completion_killable);
5911 */ 5913 */
5912bool try_wait_for_completion(struct completion *x) 5914bool try_wait_for_completion(struct completion *x)
5913{ 5915{
5916 unsigned long flags;
5914 int ret = 1; 5917 int ret = 1;
5915 5918
5916 spin_lock_irq(&x->wait.lock); 5919 spin_lock_irqsave(&x->wait.lock, flags);
5917 if (!x->done) 5920 if (!x->done)
5918 ret = 0; 5921 ret = 0;
5919 else 5922 else
5920 x->done--; 5923 x->done--;
5921 spin_unlock_irq(&x->wait.lock); 5924 spin_unlock_irqrestore(&x->wait.lock, flags);
5922 return ret; 5925 return ret;
5923} 5926}
5924EXPORT_SYMBOL(try_wait_for_completion); 5927EXPORT_SYMBOL(try_wait_for_completion);
@@ -5933,12 +5936,13 @@ EXPORT_SYMBOL(try_wait_for_completion);
5933 */ 5936 */
5934bool completion_done(struct completion *x) 5937bool completion_done(struct completion *x)
5935{ 5938{
5939 unsigned long flags;
5936 int ret = 1; 5940 int ret = 1;
5937 5941
5938 spin_lock_irq(&x->wait.lock); 5942 spin_lock_irqsave(&x->wait.lock, flags);
5939 if (!x->done) 5943 if (!x->done)
5940 ret = 0; 5944 ret = 0;
5941 spin_unlock_irq(&x->wait.lock); 5945 spin_unlock_irqrestore(&x->wait.lock, flags);
5942 return ret; 5946 return ret;
5943} 5947}
5944EXPORT_SYMBOL(completion_done); 5948EXPORT_SYMBOL(completion_done);
@@ -6457,7 +6461,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
6457 return -EINVAL; 6461 return -EINVAL;
6458 6462
6459 retval = -ESRCH; 6463 retval = -ESRCH;
6460 read_lock(&tasklist_lock); 6464 rcu_read_lock();
6461 p = find_process_by_pid(pid); 6465 p = find_process_by_pid(pid);
6462 if (p) { 6466 if (p) {
6463 retval = security_task_getscheduler(p); 6467 retval = security_task_getscheduler(p);
@@ -6465,7 +6469,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
6465 retval = p->policy 6469 retval = p->policy
6466 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 6470 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
6467 } 6471 }
6468 read_unlock(&tasklist_lock); 6472 rcu_read_unlock();
6469 return retval; 6473 return retval;
6470} 6474}
6471 6475
@@ -6483,7 +6487,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6483 if (!param || pid < 0) 6487 if (!param || pid < 0)
6484 return -EINVAL; 6488 return -EINVAL;
6485 6489
6486 read_lock(&tasklist_lock); 6490 rcu_read_lock();
6487 p = find_process_by_pid(pid); 6491 p = find_process_by_pid(pid);
6488 retval = -ESRCH; 6492 retval = -ESRCH;
6489 if (!p) 6493 if (!p)
@@ -6494,7 +6498,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6494 goto out_unlock; 6498 goto out_unlock;
6495 6499
6496 lp.sched_priority = p->rt_priority; 6500 lp.sched_priority = p->rt_priority;
6497 read_unlock(&tasklist_lock); 6501 rcu_read_unlock();
6498 6502
6499 /* 6503 /*
6500 * This one might sleep, we cannot do it with a spinlock held ... 6504 * This one might sleep, we cannot do it with a spinlock held ...
@@ -6504,7 +6508,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6504 return retval; 6508 return retval;
6505 6509
6506out_unlock: 6510out_unlock:
6507 read_unlock(&tasklist_lock); 6511 rcu_read_unlock();
6508 return retval; 6512 return retval;
6509} 6513}
6510 6514
@@ -6515,22 +6519,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
6515 int retval; 6519 int retval;
6516 6520
6517 get_online_cpus(); 6521 get_online_cpus();
6518 read_lock(&tasklist_lock); 6522 rcu_read_lock();
6519 6523
6520 p = find_process_by_pid(pid); 6524 p = find_process_by_pid(pid);
6521 if (!p) { 6525 if (!p) {
6522 read_unlock(&tasklist_lock); 6526 rcu_read_unlock();
6523 put_online_cpus(); 6527 put_online_cpus();
6524 return -ESRCH; 6528 return -ESRCH;
6525 } 6529 }
6526 6530
6527 /* 6531 /* Prevent p going away */
6528 * It is not safe to call set_cpus_allowed with the
6529 * tasklist_lock held. We will bump the task_struct's
6530 * usage count and then drop tasklist_lock.
6531 */
6532 get_task_struct(p); 6532 get_task_struct(p);
6533 read_unlock(&tasklist_lock); 6533 rcu_read_unlock();
6534 6534
6535 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 6535 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
6536 retval = -ENOMEM; 6536 retval = -ENOMEM;
@@ -6616,7 +6616,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
6616 int retval; 6616 int retval;
6617 6617
6618 get_online_cpus(); 6618 get_online_cpus();
6619 read_lock(&tasklist_lock); 6619 rcu_read_lock();
6620 6620
6621 retval = -ESRCH; 6621 retval = -ESRCH;
6622 p = find_process_by_pid(pid); 6622 p = find_process_by_pid(pid);
@@ -6632,7 +6632,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
6632 task_rq_unlock(rq, &flags); 6632 task_rq_unlock(rq, &flags);
6633 6633
6634out_unlock: 6634out_unlock:
6635 read_unlock(&tasklist_lock); 6635 rcu_read_unlock();
6636 put_online_cpus(); 6636 put_online_cpus();
6637 6637
6638 return retval; 6638 return retval;
@@ -6876,7 +6876,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6876 return -EINVAL; 6876 return -EINVAL;
6877 6877
6878 retval = -ESRCH; 6878 retval = -ESRCH;
6879 read_lock(&tasklist_lock); 6879 rcu_read_lock();
6880 p = find_process_by_pid(pid); 6880 p = find_process_by_pid(pid);
6881 if (!p) 6881 if (!p)
6882 goto out_unlock; 6882 goto out_unlock;
@@ -6889,13 +6889,13 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6889 time_slice = p->sched_class->get_rr_interval(rq, p); 6889 time_slice = p->sched_class->get_rr_interval(rq, p);
6890 task_rq_unlock(rq, &flags); 6890 task_rq_unlock(rq, &flags);
6891 6891
6892 read_unlock(&tasklist_lock); 6892 rcu_read_unlock();
6893 jiffies_to_timespec(time_slice, &t); 6893 jiffies_to_timespec(time_slice, &t);
6894 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 6894 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
6895 return retval; 6895 return retval;
6896 6896
6897out_unlock: 6897out_unlock:
6898 read_unlock(&tasklist_lock); 6898 rcu_read_unlock();
6899 return retval; 6899 return retval;
6900} 6900}
6901 6901
@@ -6907,23 +6907,23 @@ void sched_show_task(struct task_struct *p)
6907 unsigned state; 6907 unsigned state;
6908 6908
6909 state = p->state ? __ffs(p->state) + 1 : 0; 6909 state = p->state ? __ffs(p->state) + 1 : 0;
6910 printk(KERN_INFO "%-13.13s %c", p->comm, 6910 pr_info("%-13.13s %c", p->comm,
6911 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 6911 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
6912#if BITS_PER_LONG == 32 6912#if BITS_PER_LONG == 32
6913 if (state == TASK_RUNNING) 6913 if (state == TASK_RUNNING)
6914 printk(KERN_CONT " running "); 6914 pr_cont(" running ");
6915 else 6915 else
6916 printk(KERN_CONT " %08lx ", thread_saved_pc(p)); 6916 pr_cont(" %08lx ", thread_saved_pc(p));
6917#else 6917#else
6918 if (state == TASK_RUNNING) 6918 if (state == TASK_RUNNING)
6919 printk(KERN_CONT " running task "); 6919 pr_cont(" running task ");
6920 else 6920 else
6921 printk(KERN_CONT " %016lx ", thread_saved_pc(p)); 6921 pr_cont(" %016lx ", thread_saved_pc(p));
6922#endif 6922#endif
6923#ifdef CONFIG_DEBUG_STACK_USAGE 6923#ifdef CONFIG_DEBUG_STACK_USAGE
6924 free = stack_not_used(p); 6924 free = stack_not_used(p);
6925#endif 6925#endif
6926 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 6926 pr_cont("%5lu %5d %6d 0x%08lx\n", free,
6927 task_pid_nr(p), task_pid_nr(p->real_parent), 6927 task_pid_nr(p), task_pid_nr(p->real_parent),
6928 (unsigned long)task_thread_info(p)->flags); 6928 (unsigned long)task_thread_info(p)->flags);
6929 6929
@@ -6935,11 +6935,9 @@ void show_state_filter(unsigned long state_filter)
6935 struct task_struct *g, *p; 6935 struct task_struct *g, *p;
6936 6936
6937#if BITS_PER_LONG == 32 6937#if BITS_PER_LONG == 32
6938 printk(KERN_INFO 6938 pr_info(" task PC stack pid father\n");
6939 " task PC stack pid father\n");
6940#else 6939#else
6941 printk(KERN_INFO 6940 pr_info(" task PC stack pid father\n");
6942 " task PC stack pid father\n");
6943#endif 6941#endif
6944 read_lock(&tasklist_lock); 6942 read_lock(&tasklist_lock);
6945 do_each_thread(g, p) { 6943 do_each_thread(g, p) {
@@ -7297,9 +7295,8 @@ again:
7297 * leave kernel. 7295 * leave kernel.
7298 */ 7296 */
7299 if (p->mm && printk_ratelimit()) { 7297 if (p->mm && printk_ratelimit()) {
7300 printk(KERN_INFO "process %d (%s) no " 7298 pr_info("process %d (%s) no longer affine to cpu%d\n",
7301 "longer affine to cpu%d\n", 7299 task_pid_nr(p), p->comm, dead_cpu);
7302 task_pid_nr(p), p->comm, dead_cpu);
7303 } 7300 }
7304 } 7301 }
7305 7302
@@ -7806,48 +7803,44 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7806 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 7803 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
7807 7804
7808 if (!(sd->flags & SD_LOAD_BALANCE)) { 7805 if (!(sd->flags & SD_LOAD_BALANCE)) {
7809 printk("does not load-balance\n"); 7806 pr_cont("does not load-balance\n");
7810 if (sd->parent) 7807 if (sd->parent)
7811 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" 7808 pr_err("ERROR: !SD_LOAD_BALANCE domain has parent\n");
7812 " has parent");
7813 return -1; 7809 return -1;
7814 } 7810 }
7815 7811
7816 printk(KERN_CONT "span %s level %s\n", str, sd->name); 7812 pr_cont("span %s level %s\n", str, sd->name);
7817 7813
7818 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 7814 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
7819 printk(KERN_ERR "ERROR: domain->span does not contain " 7815 pr_err("ERROR: domain->span does not contain CPU%d\n", cpu);
7820 "CPU%d\n", cpu);
7821 } 7816 }
7822 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { 7817 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
7823 printk(KERN_ERR "ERROR: domain->groups does not contain" 7818 pr_err("ERROR: domain->groups does not contain CPU%d\n", cpu);
7824 " CPU%d\n", cpu);
7825 } 7819 }
7826 7820
7827 printk(KERN_DEBUG "%*s groups:", level + 1, ""); 7821 printk(KERN_DEBUG "%*s groups:", level + 1, "");
7828 do { 7822 do {
7829 if (!group) { 7823 if (!group) {
7830 printk("\n"); 7824 pr_cont("\n");
7831 printk(KERN_ERR "ERROR: group is NULL\n"); 7825 pr_err("ERROR: group is NULL\n");
7832 break; 7826 break;
7833 } 7827 }
7834 7828
7835 if (!group->cpu_power) { 7829 if (!group->cpu_power) {
7836 printk(KERN_CONT "\n"); 7830 pr_cont("\n");
7837 printk(KERN_ERR "ERROR: domain->cpu_power not " 7831 pr_err("ERROR: domain->cpu_power not set\n");
7838 "set\n");
7839 break; 7832 break;
7840 } 7833 }
7841 7834
7842 if (!cpumask_weight(sched_group_cpus(group))) { 7835 if (!cpumask_weight(sched_group_cpus(group))) {
7843 printk(KERN_CONT "\n"); 7836 pr_cont("\n");
7844 printk(KERN_ERR "ERROR: empty group\n"); 7837 pr_err("ERROR: empty group\n");
7845 break; 7838 break;
7846 } 7839 }
7847 7840
7848 if (cpumask_intersects(groupmask, sched_group_cpus(group))) { 7841 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
7849 printk(KERN_CONT "\n"); 7842 pr_cont("\n");
7850 printk(KERN_ERR "ERROR: repeated CPUs\n"); 7843 pr_err("ERROR: repeated CPUs\n");
7851 break; 7844 break;
7852 } 7845 }
7853 7846
@@ -7855,23 +7848,21 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7855 7848
7856 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 7849 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
7857 7850
7858 printk(KERN_CONT " %s", str); 7851 pr_cont(" %s", str);
7859 if (group->cpu_power != SCHED_LOAD_SCALE) { 7852 if (group->cpu_power != SCHED_LOAD_SCALE) {
7860 printk(KERN_CONT " (cpu_power = %d)", 7853 pr_cont(" (cpu_power = %d)", group->cpu_power);
7861 group->cpu_power);
7862 } 7854 }
7863 7855
7864 group = group->next; 7856 group = group->next;
7865 } while (group != sd->groups); 7857 } while (group != sd->groups);
7866 printk(KERN_CONT "\n"); 7858 pr_cont("\n");
7867 7859
7868 if (!cpumask_equal(sched_domain_span(sd), groupmask)) 7860 if (!cpumask_equal(sched_domain_span(sd), groupmask))
7869 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 7861 pr_err("ERROR: groups don't span domain->span\n");
7870 7862
7871 if (sd->parent && 7863 if (sd->parent &&
7872 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 7864 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
7873 printk(KERN_ERR "ERROR: parent span is not a superset " 7865 pr_err("ERROR: parent span is not a superset of domain->span\n");
7874 "of domain->span\n");
7875 return 0; 7866 return 0;
7876} 7867}
7877 7868
@@ -8427,8 +8418,7 @@ static int build_numa_sched_groups(struct s_data *d,
8427 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), 8418 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
8428 GFP_KERNEL, num); 8419 GFP_KERNEL, num);
8429 if (!sg) { 8420 if (!sg) {
8430 printk(KERN_WARNING "Can not alloc domain group for node %d\n", 8421 pr_warning("Can not alloc domain group for node %d\n", num);
8431 num);
8432 return -ENOMEM; 8422 return -ENOMEM;
8433 } 8423 }
8434 d->sched_group_nodes[num] = sg; 8424 d->sched_group_nodes[num] = sg;
@@ -8457,8 +8447,8 @@ static int build_numa_sched_groups(struct s_data *d,
8457 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), 8447 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
8458 GFP_KERNEL, num); 8448 GFP_KERNEL, num);
8459 if (!sg) { 8449 if (!sg) {
8460 printk(KERN_WARNING 8450 pr_warning("Can not alloc domain group for node %d\n",
8461 "Can not alloc domain group for node %d\n", j); 8451 j);
8462 return -ENOMEM; 8452 return -ENOMEM;
8463 } 8453 }
8464 sg->cpu_power = 0; 8454 sg->cpu_power = 0;
@@ -8686,7 +8676,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
8686 d->sched_group_nodes = kcalloc(nr_node_ids, 8676 d->sched_group_nodes = kcalloc(nr_node_ids,
8687 sizeof(struct sched_group *), GFP_KERNEL); 8677 sizeof(struct sched_group *), GFP_KERNEL);
8688 if (!d->sched_group_nodes) { 8678 if (!d->sched_group_nodes) {
8689 printk(KERN_WARNING "Can not alloc sched group node list\n"); 8679 pr_warning("Can not alloc sched group node list\n");
8690 return sa_notcovered; 8680 return sa_notcovered;
8691 } 8681 }
8692 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes; 8682 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
@@ -8703,7 +8693,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
8703 return sa_send_covered; 8693 return sa_send_covered;
8704 d->rd = alloc_rootdomain(); 8694 d->rd = alloc_rootdomain();
8705 if (!d->rd) { 8695 if (!d->rd) {
8706 printk(KERN_WARNING "Cannot alloc root domain\n"); 8696 pr_warning("Cannot alloc root domain\n");
8707 return sa_tmpmask; 8697 return sa_tmpmask;
8708 } 8698 }
8709 return sa_rootdomain; 8699 return sa_rootdomain;
@@ -9685,13 +9675,11 @@ void __might_sleep(char *file, int line, int preempt_offset)
9685 return; 9675 return;
9686 prev_jiffy = jiffies; 9676 prev_jiffy = jiffies;
9687 9677
9688 printk(KERN_ERR 9678 pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
9689 "BUG: sleeping function called from invalid context at %s:%d\n", 9679 file, line);
9690 file, line); 9680 pr_err("in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
9691 printk(KERN_ERR 9681 in_atomic(), irqs_disabled(),
9692 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 9682 current->pid, current->comm);
9693 in_atomic(), irqs_disabled(),
9694 current->pid, current->comm);
9695 9683
9696 debug_show_held_locks(current); 9684 debug_show_held_locks(current);
9697 if (irqs_disabled()) 9685 if (irqs_disabled())
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 479ce5682d7c..5b496132c28a 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -236,6 +236,18 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
236} 236}
237EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); 237EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
238 238
239unsigned long long cpu_clock(int cpu)
240{
241 unsigned long long clock;
242 unsigned long flags;
243
244 local_irq_save(flags);
245 clock = sched_clock_cpu(cpu);
246 local_irq_restore(flags);
247
248 return clock;
249}
250
239#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 251#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
240 252
241void sched_clock_init(void) 253void sched_clock_init(void)
@@ -251,17 +263,12 @@ u64 sched_clock_cpu(int cpu)
251 return sched_clock(); 263 return sched_clock();
252} 264}
253 265
254#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
255 266
256unsigned long long cpu_clock(int cpu) 267unsigned long long cpu_clock(int cpu)
257{ 268{
258 unsigned long long clock; 269 return sched_clock_cpu(cpu);
259 unsigned long flags; 270}
260 271
261 local_irq_save(flags); 272#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
262 clock = sched_clock_cpu(cpu);
263 local_irq_restore(flags);
264 273
265 return clock;
266}
267EXPORT_SYMBOL_GPL(cpu_clock); 274EXPORT_SYMBOL_GPL(cpu_clock);
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 5f93b570d383..21b969a28725 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -35,7 +35,7 @@ static void
35dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) 35dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
36{ 36{
37 raw_spin_unlock_irq(&rq->lock); 37 raw_spin_unlock_irq(&rq->lock);
38 printk(KERN_ERR "bad: scheduling from the idle thread!\n"); 38 pr_err("bad: scheduling from the idle thread!\n");
39 dump_stack(); 39 dump_stack();
40 raw_spin_lock_irq(&rq->lock); 40 raw_spin_lock_irq(&rq->lock);
41} 41}