aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2007-10-15 11:00:15 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:15 -0400
commit3a5e4dc12f23fb96fafd4f5d0f61e6c3070f80a5 (patch)
treee7c0246126f7cf169cdd167555a1db209d5b03ef /kernel/sched.c
parent8cbbe86dfcfd68ad69916164bdc838d9e09adca8 (diff)
sched: cleanup: refactor normalize_rt_tasks
Replace a particularly ugly ifdef with an inline and a new macro. Also split up the function to be easier to read. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c43
1 files changed, 23 insertions, 20 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index db88b5655aca..2c6295b395a9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -75,6 +75,12 @@ unsigned long long __attribute__((weak)) sched_clock(void)
75 return (unsigned long long)jiffies * (1000000000 / HZ); 75 return (unsigned long long)jiffies * (1000000000 / HZ);
76} 76}
77 77
78#ifdef CONFIG_SMP
79#define is_migration_thread(p, rq) ((p) == (rq)->migration_thread)
80#else
81#define is_migration_thread(p, rq) 0
82#endif
83
78/* 84/*
79 * Convert user-nice values [ -20 ... 0 ... 19 ] 85 * Convert user-nice values [ -20 ... 0 ... 19 ]
80 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], 86 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
@@ -6532,12 +6538,25 @@ EXPORT_SYMBOL(__might_sleep);
6532#endif 6538#endif
6533 6539
6534#ifdef CONFIG_MAGIC_SYSRQ 6540#ifdef CONFIG_MAGIC_SYSRQ
6541static void normalize_task(struct rq *rq, struct task_struct *p)
6542{
6543 int on_rq;
6544 update_rq_clock(rq);
6545 on_rq = p->se.on_rq;
6546 if (on_rq)
6547 deactivate_task(rq, p, 0);
6548 __setscheduler(rq, p, SCHED_NORMAL, 0);
6549 if (on_rq) {
6550 activate_task(rq, p, 0);
6551 resched_task(rq->curr);
6552 }
6553}
6554
6535void normalize_rt_tasks(void) 6555void normalize_rt_tasks(void)
6536{ 6556{
6537 struct task_struct *g, *p; 6557 struct task_struct *g, *p;
6538 unsigned long flags; 6558 unsigned long flags;
6539 struct rq *rq; 6559 struct rq *rq;
6540 int on_rq;
6541 6560
6542 read_lock_irq(&tasklist_lock); 6561 read_lock_irq(&tasklist_lock);
6543 do_each_thread(g, p) { 6562 do_each_thread(g, p) {
@@ -6561,26 +6580,10 @@ void normalize_rt_tasks(void)
6561 6580
6562 spin_lock_irqsave(&p->pi_lock, flags); 6581 spin_lock_irqsave(&p->pi_lock, flags);
6563 rq = __task_rq_lock(p); 6582 rq = __task_rq_lock(p);
6564#ifdef CONFIG_SMP
6565 /*
6566 * Do not touch the migration thread:
6567 */
6568 if (p == rq->migration_thread)
6569 goto out_unlock;
6570#endif
6571 6583
6572 update_rq_clock(rq); 6584 if (!is_migration_thread(p, rq))
6573 on_rq = p->se.on_rq; 6585 normalize_task(rq, p);
6574 if (on_rq) 6586
6575 deactivate_task(rq, p, 0);
6576 __setscheduler(rq, p, SCHED_NORMAL, 0);
6577 if (on_rq) {
6578 activate_task(rq, p, 0);
6579 resched_task(rq->curr);
6580 }
6581#ifdef CONFIG_SMP
6582 out_unlock:
6583#endif
6584 __task_rq_unlock(rq); 6587 __task_rq_unlock(rq);
6585 spin_unlock_irqrestore(&p->pi_lock, flags); 6588 spin_unlock_irqrestore(&p->pi_lock, flags);
6586 } while_each_thread(g, p); 6589 } while_each_thread(g, p);