aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-11-17 08:54:03 -0500
committerThomas Gleixner <tglx@linutronix.de>2009-12-14 17:55:33 -0500
commit1d615482547584b9a8bb6316a58fed6ce90dd9ff (patch)
tree21dae4b70acb3ce0bdaeeaee1dbd970be41d26e2 /kernel/sched.c
parentfe841226bd954fba4fd79f037a876053fe9c3217 (diff)
sched: Convert pi_lock to raw_spinlock
Convert locks which cannot be sleeping locks in preempt-rt to raw_spinlocks. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 01c5016e57f1..18cceeecce35 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6323,7 +6323,7 @@ recheck:
6323 * make sure no PI-waiters arrive (or leave) while we are 6323 * make sure no PI-waiters arrive (or leave) while we are
6324 * changing the priority of the task: 6324 * changing the priority of the task:
6325 */ 6325 */
6326 spin_lock_irqsave(&p->pi_lock, flags); 6326 raw_spin_lock_irqsave(&p->pi_lock, flags);
6327 /* 6327 /*
6328 * To be able to change p->policy safely, the apropriate 6328 * To be able to change p->policy safely, the apropriate
6329 * runqueue lock must be held. 6329 * runqueue lock must be held.
@@ -6333,7 +6333,7 @@ recheck:
6333 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 6333 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
6334 policy = oldpolicy = -1; 6334 policy = oldpolicy = -1;
6335 __task_rq_unlock(rq); 6335 __task_rq_unlock(rq);
6336 spin_unlock_irqrestore(&p->pi_lock, flags); 6336 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
6337 goto recheck; 6337 goto recheck;
6338 } 6338 }
6339 update_rq_clock(rq); 6339 update_rq_clock(rq);
@@ -6357,7 +6357,7 @@ recheck:
6357 check_class_changed(rq, p, prev_class, oldprio, running); 6357 check_class_changed(rq, p, prev_class, oldprio, running);
6358 } 6358 }
6359 __task_rq_unlock(rq); 6359 __task_rq_unlock(rq);
6360 spin_unlock_irqrestore(&p->pi_lock, flags); 6360 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
6361 6361
6362 rt_mutex_adjust_pi(p); 6362 rt_mutex_adjust_pi(p);
6363 6363
@@ -9624,7 +9624,7 @@ void __init sched_init(void)
9624#endif 9624#endif
9625 9625
9626#ifdef CONFIG_RT_MUTEXES 9626#ifdef CONFIG_RT_MUTEXES
9627 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); 9627 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
9628#endif 9628#endif
9629 9629
9630 /* 9630 /*
@@ -9749,13 +9749,13 @@ void normalize_rt_tasks(void)
9749 continue; 9749 continue;
9750 } 9750 }
9751 9751
9752 spin_lock(&p->pi_lock); 9752 raw_spin_lock(&p->pi_lock);
9753 rq = __task_rq_lock(p); 9753 rq = __task_rq_lock(p);
9754 9754
9755 normalize_task(rq, p); 9755 normalize_task(rq, p);
9756 9756
9757 __task_rq_unlock(rq); 9757 __task_rq_unlock(rq);
9758 spin_unlock(&p->pi_lock); 9758 raw_spin_unlock(&p->pi_lock);
9759 } while_each_thread(g, p); 9759 } while_each_thread(g, p);
9760 9760
9761 read_unlock_irqrestore(&tasklist_lock, flags); 9761 read_unlock_irqrestore(&tasklist_lock, flags);