aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 11:00:18 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:18 -0400
commit178be793485d70d871a0fd46b29e9e3e7da636ad (patch)
treed7542c2e06e649197d4914e7bfe0ad31e072d58c /kernel
parent1666703af948ae87c87c2bc7121aa34271cc52ab (diff)
sched: do not normalize kernel threads via SysRq-N
do not normalize kernel threads via SysRq-N: the migration threads, softlockup threads, etc. might be essential for the system to function properly. So only zap user tasks. pointed out by Andi Kleen. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c18
1 files changed, 7 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index fc61b1fc67d5..791dd08c692f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -365,15 +365,6 @@ static inline int cpu_of(struct rq *rq)
365#endif 365#endif
366} 366}
367 367
368static inline int is_migration_thread(struct task_struct *p, struct rq *rq)
369{
370#ifdef CONFIG_SMP
371 return p == rq->migration_thread;
372#else
373 return 0;
374#endif
375}
376
377/* 368/*
378 * Update the per-runqueue clock, as finegrained as the platform can give 369 * Update the per-runqueue clock, as finegrained as the platform can give
379 * us, but without assuming monotonicity, etc.: 370 * us, but without assuming monotonicity, etc.:
@@ -6563,6 +6554,12 @@ void normalize_rt_tasks(void)
6563 6554
6564 read_lock_irq(&tasklist_lock); 6555 read_lock_irq(&tasklist_lock);
6565 do_each_thread(g, p) { 6556 do_each_thread(g, p) {
6557 /*
6558 * Only normalize user tasks:
6559 */
6560 if (!p->mm)
6561 continue;
6562
6566 p->se.exec_start = 0; 6563 p->se.exec_start = 0;
6567#ifdef CONFIG_SCHEDSTATS 6564#ifdef CONFIG_SCHEDSTATS
6568 p->se.wait_start = 0; 6565 p->se.wait_start = 0;
@@ -6584,8 +6581,7 @@ void normalize_rt_tasks(void)
6584 spin_lock_irqsave(&p->pi_lock, flags); 6581 spin_lock_irqsave(&p->pi_lock, flags);
6585 rq = __task_rq_lock(p); 6582 rq = __task_rq_lock(p);
6586 6583
6587 if (!is_migration_thread(p, rq)) 6584 normalize_task(rq, p);
6588 normalize_task(rq, p);
6589 6585
6590 __task_rq_unlock(rq); 6586 __task_rq_unlock(rq);
6591 spin_unlock_irqrestore(&p->pi_lock, flags); 6587 spin_unlock_irqrestore(&p->pi_lock, flags);