aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2014-09-21 15:33:38 -0400
committerIngo Molnar <mingo@kernel.org>2014-09-24 08:47:03 -0400
commit3472eaa1f12e217e2b8b0ef658ff861b2308cbbd (patch)
treec231d968cc0a6a696ae8f3e008f979899bf52b45
parent8651c65844e93af44554272b7e0d2b142837b244 (diff)
sched: normalize_rt_tasks(): Don't use _irqsave for tasklist_lock, use task_rq_lock()
1. read_lock(tasklist_lock) does not need to disable irqs. 2. ->mm != NULL is a common mistake, use PF_KTHREAD. 3. The second ->mm check can be simply removed. 4. task_rq_lock() looks better than raw_spin_lock(&p->pi_lock) + __task_rq_lock(). Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Kirill Tkhai <tkhai@yandex.ru> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20140921193338.GA28621@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/core.c16
1 files changed, 6 insertions, 10 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0abfb7ec9e62..d65566d07fcf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7220,12 +7220,12 @@ void normalize_rt_tasks(void)
7220 unsigned long flags; 7220 unsigned long flags;
7221 struct rq *rq; 7221 struct rq *rq;
7222 7222
7223 read_lock_irqsave(&tasklist_lock, flags); 7223 read_lock(&tasklist_lock);
7224 for_each_process_thread(g, p) { 7224 for_each_process_thread(g, p) {
7225 /* 7225 /*
7226 * Only normalize user tasks: 7226 * Only normalize user tasks:
7227 */ 7227 */
7228 if (!p->mm) 7228 if (p->flags & PF_KTHREAD)
7229 continue; 7229 continue;
7230 7230
7231 p->se.exec_start = 0; 7231 p->se.exec_start = 0;
@@ -7240,20 +7240,16 @@ void normalize_rt_tasks(void)
7240 * Renice negative nice level userspace 7240 * Renice negative nice level userspace
7241 * tasks back to 0: 7241 * tasks back to 0:
7242 */ 7242 */
7243 if (task_nice(p) < 0 && p->mm) 7243 if (task_nice(p) < 0)
7244 set_user_nice(p, 0); 7244 set_user_nice(p, 0);
7245 continue; 7245 continue;
7246 } 7246 }
7247 7247
7248 raw_spin_lock(&p->pi_lock); 7248 rq = task_rq_lock(p, &flags);
7249 rq = __task_rq_lock(p);
7250
7251 normalize_task(rq, p); 7249 normalize_task(rq, p);
7252 7250 task_rq_unlock(rq, p, &flags);
7253 __task_rq_unlock(rq);
7254 raw_spin_unlock(&p->pi_lock);
7255 } 7251 }
7256 read_unlock_irqrestore(&tasklist_lock, flags); 7252 read_unlock(&tasklist_lock);
7257} 7253}
7258 7254
7259#endif /* CONFIG_MAGIC_SYSRQ */ 7255#endif /* CONFIG_MAGIC_SYSRQ */