aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b47819b676fa..d714611f1691 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3384,7 +3384,7 @@ EXPORT_SYMBOL(schedule);
3384 3384
3385#ifdef CONFIG_PREEMPT 3385#ifdef CONFIG_PREEMPT
3386/* 3386/*
3387 * this is is the entry point to schedule() from in-kernel preemption 3387 * this is the entry point to schedule() from in-kernel preemption
3388 * off of preempt_enable. Kernel preemptions off return from interrupt 3388 * off of preempt_enable. Kernel preemptions off return from interrupt
3389 * occur there and call schedule directly. 3389 * occur there and call schedule directly.
3390 */ 3390 */
@@ -3427,7 +3427,7 @@ need_resched:
3427EXPORT_SYMBOL(preempt_schedule); 3427EXPORT_SYMBOL(preempt_schedule);
3428 3428
3429/* 3429/*
3430 * this is is the entry point to schedule() from kernel preemption 3430 * this is the entry point to schedule() from kernel preemption
3431 * off of irq context. 3431 * off of irq context.
3432 * Note, that this is called and return with irqs disabled. This will 3432 * Note, that this is called and return with irqs disabled. This will
3433 * protect us against recursive calling from irq. 3433 * protect us against recursive calling from irq.
@@ -3439,7 +3439,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
3439 struct task_struct *task = current; 3439 struct task_struct *task = current;
3440 int saved_lock_depth; 3440 int saved_lock_depth;
3441#endif 3441#endif
3442 /* Catch callers which need to be fixed*/ 3442 /* Catch callers which need to be fixed */
3443 BUG_ON(ti->preempt_count || !irqs_disabled()); 3443 BUG_ON(ti->preempt_count || !irqs_disabled());
3444 3444
3445need_resched: 3445need_resched:
@@ -4650,7 +4650,7 @@ static inline struct task_struct *younger_sibling(struct task_struct *p)
4650 return list_entry(p->sibling.next,struct task_struct,sibling); 4650 return list_entry(p->sibling.next,struct task_struct,sibling);
4651} 4651}
4652 4652
4653static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; 4653static const char stat_nam[] = "RSDTtZX";
4654 4654
4655static void show_task(struct task_struct *p) 4655static void show_task(struct task_struct *p)
4656{ 4656{
@@ -4658,12 +4658,9 @@ static void show_task(struct task_struct *p)
4658 unsigned long free = 0; 4658 unsigned long free = 0;
4659 unsigned state; 4659 unsigned state;
4660 4660
4661 printk("%-13.13s ", p->comm);
4662 state = p->state ? __ffs(p->state) + 1 : 0; 4661 state = p->state ? __ffs(p->state) + 1 : 0;
4663 if (state < ARRAY_SIZE(stat_nam)) 4662 printk("%-13.13s %c", p->comm,
4664 printk(stat_nam[state]); 4663 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4665 else
4666 printk("?");
4667#if (BITS_PER_LONG == 32) 4664#if (BITS_PER_LONG == 32)
4668 if (state == TASK_RUNNING) 4665 if (state == TASK_RUNNING)
4669 printk(" running "); 4666 printk(" running ");
@@ -5776,7 +5773,7 @@ static unsigned long long measure_migration_cost(int cpu1, int cpu2)
5776 cache = vmalloc(max_size); 5773 cache = vmalloc(max_size);
5777 if (!cache) { 5774 if (!cache) {
5778 printk("could not vmalloc %d bytes for cache!\n", 2*max_size); 5775 printk("could not vmalloc %d bytes for cache!\n", 2*max_size);
5779 return 1000000; // return 1 msec on very small boxen 5776 return 1000000; /* return 1 msec on very small boxen */
5780 } 5777 }
5781 5778
5782 while (size <= max_size) { 5779 while (size <= max_size) {