aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-11-17 08:28:38 -0500
committerThomas Gleixner <tglx@linutronix.de>2009-12-14 17:55:33 -0500
commit05fa785cf80c9b7c0254c3056037147aed3ea16b (patch)
tree3d5c69d449b9240dc6d1005dddf344e467de4f34 /kernel/sched.c
parenta26724591edba5acc528d41f3906a972590e8f54 (diff)
sched: Convert rq->lock to raw_spinlock
Convert locks which cannot be sleeping locks in preempt-rt to raw_spinlocks. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c183
1 files changed, 93 insertions, 90 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 91c65dd91435..3dbe6178ebfd 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -525,7 +525,7 @@ static struct root_domain def_root_domain;
525 */ 525 */
526struct rq { 526struct rq {
527 /* runqueue lock: */ 527 /* runqueue lock: */
528 spinlock_t lock; 528 raw_spinlock_t lock;
529 529
530 /* 530 /*
531 * nr_running and cpu_load should be in the same cacheline because 531 * nr_running and cpu_load should be in the same cacheline because
@@ -685,7 +685,7 @@ inline void update_rq_clock(struct rq *rq)
685 */ 685 */
686int runqueue_is_locked(int cpu) 686int runqueue_is_locked(int cpu)
687{ 687{
688 return spin_is_locked(&cpu_rq(cpu)->lock); 688 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
689} 689}
690 690
691/* 691/*
@@ -884,7 +884,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
884{ 884{
885#ifdef CONFIG_DEBUG_SPINLOCK 885#ifdef CONFIG_DEBUG_SPINLOCK
886 /* this is a valid case when another task releases the spinlock */ 886 /* this is a valid case when another task releases the spinlock */
887 rq->lock.rlock.owner = current; 887 rq->lock.owner = current;
888#endif 888#endif
889 /* 889 /*
890 * If we are tracking spinlock dependencies then we have to 890 * If we are tracking spinlock dependencies then we have to
@@ -893,7 +893,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
893 */ 893 */
894 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 894 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
895 895
896 spin_unlock_irq(&rq->lock); 896 raw_spin_unlock_irq(&rq->lock);
897} 897}
898 898
899#else /* __ARCH_WANT_UNLOCKED_CTXSW */ 899#else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -917,9 +917,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
917 next->oncpu = 1; 917 next->oncpu = 1;
918#endif 918#endif
919#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 919#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
920 spin_unlock_irq(&rq->lock); 920 raw_spin_unlock_irq(&rq->lock);
921#else 921#else
922 spin_unlock(&rq->lock); 922 raw_spin_unlock(&rq->lock);
923#endif 923#endif
924} 924}
925 925
@@ -949,10 +949,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
949{ 949{
950 for (;;) { 950 for (;;) {
951 struct rq *rq = task_rq(p); 951 struct rq *rq = task_rq(p);
952 spin_lock(&rq->lock); 952 raw_spin_lock(&rq->lock);
953 if (likely(rq == task_rq(p))) 953 if (likely(rq == task_rq(p)))
954 return rq; 954 return rq;
955 spin_unlock(&rq->lock); 955 raw_spin_unlock(&rq->lock);
956 } 956 }
957} 957}
958 958
@@ -969,10 +969,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
969 for (;;) { 969 for (;;) {
970 local_irq_save(*flags); 970 local_irq_save(*flags);
971 rq = task_rq(p); 971 rq = task_rq(p);
972 spin_lock(&rq->lock); 972 raw_spin_lock(&rq->lock);
973 if (likely(rq == task_rq(p))) 973 if (likely(rq == task_rq(p)))
974 return rq; 974 return rq;
975 spin_unlock_irqrestore(&rq->lock, *flags); 975 raw_spin_unlock_irqrestore(&rq->lock, *flags);
976 } 976 }
977} 977}
978 978
@@ -981,19 +981,19 @@ void task_rq_unlock_wait(struct task_struct *p)
981 struct rq *rq = task_rq(p); 981 struct rq *rq = task_rq(p);
982 982
983 smp_mb(); /* spin-unlock-wait is not a full memory barrier */ 983 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
984 spin_unlock_wait(&rq->lock); 984 raw_spin_unlock_wait(&rq->lock);
985} 985}
986 986
987static void __task_rq_unlock(struct rq *rq) 987static void __task_rq_unlock(struct rq *rq)
988 __releases(rq->lock) 988 __releases(rq->lock)
989{ 989{
990 spin_unlock(&rq->lock); 990 raw_spin_unlock(&rq->lock);
991} 991}
992 992
993static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) 993static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
994 __releases(rq->lock) 994 __releases(rq->lock)
995{ 995{
996 spin_unlock_irqrestore(&rq->lock, *flags); 996 raw_spin_unlock_irqrestore(&rq->lock, *flags);
997} 997}
998 998
999/* 999/*
@@ -1006,7 +1006,7 @@ static struct rq *this_rq_lock(void)
1006 1006
1007 local_irq_disable(); 1007 local_irq_disable();
1008 rq = this_rq(); 1008 rq = this_rq();
1009 spin_lock(&rq->lock); 1009 raw_spin_lock(&rq->lock);
1010 1010
1011 return rq; 1011 return rq;
1012} 1012}
@@ -1053,10 +1053,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1053 1053
1054 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 1054 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1055 1055
1056 spin_lock(&rq->lock); 1056 raw_spin_lock(&rq->lock);
1057 update_rq_clock(rq); 1057 update_rq_clock(rq);
1058 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 1058 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
1059 spin_unlock(&rq->lock); 1059 raw_spin_unlock(&rq->lock);
1060 1060
1061 return HRTIMER_NORESTART; 1061 return HRTIMER_NORESTART;
1062} 1062}
@@ -1069,10 +1069,10 @@ static void __hrtick_start(void *arg)
1069{ 1069{
1070 struct rq *rq = arg; 1070 struct rq *rq = arg;
1071 1071
1072 spin_lock(&rq->lock); 1072 raw_spin_lock(&rq->lock);
1073 hrtimer_restart(&rq->hrtick_timer); 1073 hrtimer_restart(&rq->hrtick_timer);
1074 rq->hrtick_csd_pending = 0; 1074 rq->hrtick_csd_pending = 0;
1075 spin_unlock(&rq->lock); 1075 raw_spin_unlock(&rq->lock);
1076} 1076}
1077 1077
1078/* 1078/*
@@ -1179,7 +1179,7 @@ static void resched_task(struct task_struct *p)
1179{ 1179{
1180 int cpu; 1180 int cpu;
1181 1181
1182 assert_spin_locked(&task_rq(p)->lock); 1182 assert_raw_spin_locked(&task_rq(p)->lock);
1183 1183
1184 if (test_tsk_need_resched(p)) 1184 if (test_tsk_need_resched(p))
1185 return; 1185 return;
@@ -1201,10 +1201,10 @@ static void resched_cpu(int cpu)
1201 struct rq *rq = cpu_rq(cpu); 1201 struct rq *rq = cpu_rq(cpu);
1202 unsigned long flags; 1202 unsigned long flags;
1203 1203
1204 if (!spin_trylock_irqsave(&rq->lock, flags)) 1204 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
1205 return; 1205 return;
1206 resched_task(cpu_curr(cpu)); 1206 resched_task(cpu_curr(cpu));
1207 spin_unlock_irqrestore(&rq->lock, flags); 1207 raw_spin_unlock_irqrestore(&rq->lock, flags);
1208} 1208}
1209 1209
1210#ifdef CONFIG_NO_HZ 1210#ifdef CONFIG_NO_HZ
@@ -1273,7 +1273,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1273#else /* !CONFIG_SMP */ 1273#else /* !CONFIG_SMP */
1274static void resched_task(struct task_struct *p) 1274static void resched_task(struct task_struct *p)
1275{ 1275{
1276 assert_spin_locked(&task_rq(p)->lock); 1276 assert_raw_spin_locked(&task_rq(p)->lock);
1277 set_tsk_need_resched(p); 1277 set_tsk_need_resched(p);
1278} 1278}
1279 1279
@@ -1600,11 +1600,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
1600 struct rq *rq = cpu_rq(cpu); 1600 struct rq *rq = cpu_rq(cpu);
1601 unsigned long flags; 1601 unsigned long flags;
1602 1602
1603 spin_lock_irqsave(&rq->lock, flags); 1603 raw_spin_lock_irqsave(&rq->lock, flags);
1604 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; 1604 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
1605 tg->cfs_rq[cpu]->shares = boost ? 0 : shares; 1605 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
1606 __set_se_shares(tg->se[cpu], shares); 1606 __set_se_shares(tg->se[cpu], shares);
1607 spin_unlock_irqrestore(&rq->lock, flags); 1607 raw_spin_unlock_irqrestore(&rq->lock, flags);
1608 } 1608 }
1609} 1609}
1610 1610
@@ -1706,9 +1706,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1706 if (root_task_group_empty()) 1706 if (root_task_group_empty())
1707 return; 1707 return;
1708 1708
1709 spin_unlock(&rq->lock); 1709 raw_spin_unlock(&rq->lock);
1710 update_shares(sd); 1710 update_shares(sd);
1711 spin_lock(&rq->lock); 1711 raw_spin_lock(&rq->lock);
1712} 1712}
1713 1713
1714static void update_h_load(long cpu) 1714static void update_h_load(long cpu)
@@ -1748,7 +1748,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1748 __acquires(busiest->lock) 1748 __acquires(busiest->lock)
1749 __acquires(this_rq->lock) 1749 __acquires(this_rq->lock)
1750{ 1750{
1751 spin_unlock(&this_rq->lock); 1751 raw_spin_unlock(&this_rq->lock);
1752 double_rq_lock(this_rq, busiest); 1752 double_rq_lock(this_rq, busiest);
1753 1753
1754 return 1; 1754 return 1;
@@ -1769,14 +1769,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1769{ 1769{
1770 int ret = 0; 1770 int ret = 0;
1771 1771
1772 if (unlikely(!spin_trylock(&busiest->lock))) { 1772 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1773 if (busiest < this_rq) { 1773 if (busiest < this_rq) {
1774 spin_unlock(&this_rq->lock); 1774 raw_spin_unlock(&this_rq->lock);
1775 spin_lock(&busiest->lock); 1775 raw_spin_lock(&busiest->lock);
1776 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); 1776 raw_spin_lock_nested(&this_rq->lock,
1777 SINGLE_DEPTH_NESTING);
1777 ret = 1; 1778 ret = 1;
1778 } else 1779 } else
1779 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); 1780 raw_spin_lock_nested(&busiest->lock,
1781 SINGLE_DEPTH_NESTING);
1780 } 1782 }
1781 return ret; 1783 return ret;
1782} 1784}
@@ -1790,7 +1792,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1790{ 1792{
1791 if (unlikely(!irqs_disabled())) { 1793 if (unlikely(!irqs_disabled())) {
1792 /* printk() doesn't work good under rq->lock */ 1794 /* printk() doesn't work good under rq->lock */
1793 spin_unlock(&this_rq->lock); 1795 raw_spin_unlock(&this_rq->lock);
1794 BUG_ON(1); 1796 BUG_ON(1);
1795 } 1797 }
1796 1798
@@ -1800,7 +1802,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1800static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1802static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1801 __releases(busiest->lock) 1803 __releases(busiest->lock)
1802{ 1804{
1803 spin_unlock(&busiest->lock); 1805 raw_spin_unlock(&busiest->lock);
1804 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1806 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1805} 1807}
1806#endif 1808#endif
@@ -2023,13 +2025,13 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
2023 return; 2025 return;
2024 } 2026 }
2025 2027
2026 spin_lock_irqsave(&rq->lock, flags); 2028 raw_spin_lock_irqsave(&rq->lock, flags);
2027 update_rq_clock(rq); 2029 update_rq_clock(rq);
2028 set_task_cpu(p, cpu); 2030 set_task_cpu(p, cpu);
2029 p->cpus_allowed = cpumask_of_cpu(cpu); 2031 p->cpus_allowed = cpumask_of_cpu(cpu);
2030 p->rt.nr_cpus_allowed = 1; 2032 p->rt.nr_cpus_allowed = 1;
2031 p->flags |= PF_THREAD_BOUND; 2033 p->flags |= PF_THREAD_BOUND;
2032 spin_unlock_irqrestore(&rq->lock, flags); 2034 raw_spin_unlock_irqrestore(&rq->lock, flags);
2033} 2035}
2034EXPORT_SYMBOL(kthread_bind); 2036EXPORT_SYMBOL(kthread_bind);
2035 2037
@@ -2781,10 +2783,10 @@ static inline void post_schedule(struct rq *rq)
2781 if (rq->post_schedule) { 2783 if (rq->post_schedule) {
2782 unsigned long flags; 2784 unsigned long flags;
2783 2785
2784 spin_lock_irqsave(&rq->lock, flags); 2786 raw_spin_lock_irqsave(&rq->lock, flags);
2785 if (rq->curr->sched_class->post_schedule) 2787 if (rq->curr->sched_class->post_schedule)
2786 rq->curr->sched_class->post_schedule(rq); 2788 rq->curr->sched_class->post_schedule(rq);
2787 spin_unlock_irqrestore(&rq->lock, flags); 2789 raw_spin_unlock_irqrestore(&rq->lock, flags);
2788 2790
2789 rq->post_schedule = 0; 2791 rq->post_schedule = 0;
2790 } 2792 }
@@ -3066,15 +3068,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
3066{ 3068{
3067 BUG_ON(!irqs_disabled()); 3069 BUG_ON(!irqs_disabled());
3068 if (rq1 == rq2) { 3070 if (rq1 == rq2) {
3069 spin_lock(&rq1->lock); 3071 raw_spin_lock(&rq1->lock);
3070 __acquire(rq2->lock); /* Fake it out ;) */ 3072 __acquire(rq2->lock); /* Fake it out ;) */
3071 } else { 3073 } else {
3072 if (rq1 < rq2) { 3074 if (rq1 < rq2) {
3073 spin_lock(&rq1->lock); 3075 raw_spin_lock(&rq1->lock);
3074 spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 3076 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
3075 } else { 3077 } else {
3076 spin_lock(&rq2->lock); 3078 raw_spin_lock(&rq2->lock);
3077 spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 3079 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
3078 } 3080 }
3079 } 3081 }
3080 update_rq_clock(rq1); 3082 update_rq_clock(rq1);
@@ -3091,9 +3093,9 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
3091 __releases(rq1->lock) 3093 __releases(rq1->lock)
3092 __releases(rq2->lock) 3094 __releases(rq2->lock)
3093{ 3095{
3094 spin_unlock(&rq1->lock); 3096 raw_spin_unlock(&rq1->lock);
3095 if (rq1 != rq2) 3097 if (rq1 != rq2)
3096 spin_unlock(&rq2->lock); 3098 raw_spin_unlock(&rq2->lock);
3097 else 3099 else
3098 __release(rq2->lock); 3100 __release(rq2->lock);
3099} 3101}
@@ -4186,14 +4188,15 @@ redo:
4186 4188
4187 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { 4189 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
4188 4190
4189 spin_lock_irqsave(&busiest->lock, flags); 4191 raw_spin_lock_irqsave(&busiest->lock, flags);
4190 4192
4191 /* don't kick the migration_thread, if the curr 4193 /* don't kick the migration_thread, if the curr
4192 * task on busiest cpu can't be moved to this_cpu 4194 * task on busiest cpu can't be moved to this_cpu
4193 */ 4195 */
4194 if (!cpumask_test_cpu(this_cpu, 4196 if (!cpumask_test_cpu(this_cpu,
4195 &busiest->curr->cpus_allowed)) { 4197 &busiest->curr->cpus_allowed)) {
4196 spin_unlock_irqrestore(&busiest->lock, flags); 4198 raw_spin_unlock_irqrestore(&busiest->lock,
4199 flags);
4197 all_pinned = 1; 4200 all_pinned = 1;
4198 goto out_one_pinned; 4201 goto out_one_pinned;
4199 } 4202 }
@@ -4203,7 +4206,7 @@ redo:
4203 busiest->push_cpu = this_cpu; 4206 busiest->push_cpu = this_cpu;
4204 active_balance = 1; 4207 active_balance = 1;
4205 } 4208 }
4206 spin_unlock_irqrestore(&busiest->lock, flags); 4209 raw_spin_unlock_irqrestore(&busiest->lock, flags);
4207 if (active_balance) 4210 if (active_balance)
4208 wake_up_process(busiest->migration_thread); 4211 wake_up_process(busiest->migration_thread);
4209 4212
@@ -4385,10 +4388,10 @@ redo:
4385 /* 4388 /*
4386 * Should not call ttwu while holding a rq->lock 4389 * Should not call ttwu while holding a rq->lock
4387 */ 4390 */
4388 spin_unlock(&this_rq->lock); 4391 raw_spin_unlock(&this_rq->lock);
4389 if (active_balance) 4392 if (active_balance)
4390 wake_up_process(busiest->migration_thread); 4393 wake_up_process(busiest->migration_thread);
4391 spin_lock(&this_rq->lock); 4394 raw_spin_lock(&this_rq->lock);
4392 4395
4393 } else 4396 } else
4394 sd->nr_balance_failed = 0; 4397 sd->nr_balance_failed = 0;
@@ -5257,11 +5260,11 @@ void scheduler_tick(void)
5257 5260
5258 sched_clock_tick(); 5261 sched_clock_tick();
5259 5262
5260 spin_lock(&rq->lock); 5263 raw_spin_lock(&rq->lock);
5261 update_rq_clock(rq); 5264 update_rq_clock(rq);
5262 update_cpu_load(rq); 5265 update_cpu_load(rq);
5263 curr->sched_class->task_tick(rq, curr, 0); 5266 curr->sched_class->task_tick(rq, curr, 0);
5264 spin_unlock(&rq->lock); 5267 raw_spin_unlock(&rq->lock);
5265 5268
5266 perf_event_task_tick(curr, cpu); 5269 perf_event_task_tick(curr, cpu);
5267 5270
@@ -5455,7 +5458,7 @@ need_resched_nonpreemptible:
5455 if (sched_feat(HRTICK)) 5458 if (sched_feat(HRTICK))
5456 hrtick_clear(rq); 5459 hrtick_clear(rq);
5457 5460
5458 spin_lock_irq(&rq->lock); 5461 raw_spin_lock_irq(&rq->lock);
5459 update_rq_clock(rq); 5462 update_rq_clock(rq);
5460 clear_tsk_need_resched(prev); 5463 clear_tsk_need_resched(prev);
5461 5464
@@ -5491,7 +5494,7 @@ need_resched_nonpreemptible:
5491 cpu = smp_processor_id(); 5494 cpu = smp_processor_id();
5492 rq = cpu_rq(cpu); 5495 rq = cpu_rq(cpu);
5493 } else 5496 } else
5494 spin_unlock_irq(&rq->lock); 5497 raw_spin_unlock_irq(&rq->lock);
5495 5498
5496 post_schedule(rq); 5499 post_schedule(rq);
5497 5500
@@ -6980,7 +6983,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
6980 struct rq *rq = cpu_rq(cpu); 6983 struct rq *rq = cpu_rq(cpu);
6981 unsigned long flags; 6984 unsigned long flags;
6982 6985
6983 spin_lock_irqsave(&rq->lock, flags); 6986 raw_spin_lock_irqsave(&rq->lock, flags);
6984 6987
6985 __sched_fork(idle); 6988 __sched_fork(idle);
6986 idle->se.exec_start = sched_clock(); 6989 idle->se.exec_start = sched_clock();
@@ -6992,7 +6995,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
6992#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 6995#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
6993 idle->oncpu = 1; 6996 idle->oncpu = 1;
6994#endif 6997#endif
6995 spin_unlock_irqrestore(&rq->lock, flags); 6998 raw_spin_unlock_irqrestore(&rq->lock, flags);
6996 6999
6997 /* Set the preempt count _outside_ the spinlocks! */ 7000 /* Set the preempt count _outside_ the spinlocks! */
6998#if defined(CONFIG_PREEMPT) 7001#if defined(CONFIG_PREEMPT)
@@ -7209,10 +7212,10 @@ static int migration_thread(void *data)
7209 struct migration_req *req; 7212 struct migration_req *req;
7210 struct list_head *head; 7213 struct list_head *head;
7211 7214
7212 spin_lock_irq(&rq->lock); 7215 raw_spin_lock_irq(&rq->lock);
7213 7216
7214 if (cpu_is_offline(cpu)) { 7217 if (cpu_is_offline(cpu)) {
7215 spin_unlock_irq(&rq->lock); 7218 raw_spin_unlock_irq(&rq->lock);
7216 break; 7219 break;
7217 } 7220 }
7218 7221
@@ -7224,7 +7227,7 @@ static int migration_thread(void *data)
7224 head = &rq->migration_queue; 7227 head = &rq->migration_queue;
7225 7228
7226 if (list_empty(head)) { 7229 if (list_empty(head)) {
7227 spin_unlock_irq(&rq->lock); 7230 raw_spin_unlock_irq(&rq->lock);
7228 schedule(); 7231 schedule();
7229 set_current_state(TASK_INTERRUPTIBLE); 7232 set_current_state(TASK_INTERRUPTIBLE);
7230 continue; 7233 continue;
@@ -7233,14 +7236,14 @@ static int migration_thread(void *data)
7233 list_del_init(head->next); 7236 list_del_init(head->next);
7234 7237
7235 if (req->task != NULL) { 7238 if (req->task != NULL) {
7236 spin_unlock(&rq->lock); 7239 raw_spin_unlock(&rq->lock);
7237 __migrate_task(req->task, cpu, req->dest_cpu); 7240 __migrate_task(req->task, cpu, req->dest_cpu);
7238 } else if (likely(cpu == (badcpu = smp_processor_id()))) { 7241 } else if (likely(cpu == (badcpu = smp_processor_id()))) {
7239 req->dest_cpu = RCU_MIGRATION_GOT_QS; 7242 req->dest_cpu = RCU_MIGRATION_GOT_QS;
7240 spin_unlock(&rq->lock); 7243 raw_spin_unlock(&rq->lock);
7241 } else { 7244 } else {
7242 req->dest_cpu = RCU_MIGRATION_MUST_SYNC; 7245 req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
7243 spin_unlock(&rq->lock); 7246 raw_spin_unlock(&rq->lock);
7244 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); 7247 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
7245 } 7248 }
7246 local_irq_enable(); 7249 local_irq_enable();
@@ -7363,14 +7366,14 @@ void sched_idle_next(void)
7363 * Strictly not necessary since rest of the CPUs are stopped by now 7366 * Strictly not necessary since rest of the CPUs are stopped by now
7364 * and interrupts disabled on the current cpu. 7367 * and interrupts disabled on the current cpu.
7365 */ 7368 */
7366 spin_lock_irqsave(&rq->lock, flags); 7369 raw_spin_lock_irqsave(&rq->lock, flags);
7367 7370
7368 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 7371 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
7369 7372
7370 update_rq_clock(rq); 7373 update_rq_clock(rq);
7371 activate_task(rq, p, 0); 7374 activate_task(rq, p, 0);
7372 7375
7373 spin_unlock_irqrestore(&rq->lock, flags); 7376 raw_spin_unlock_irqrestore(&rq->lock, flags);
7374} 7377}
7375 7378
7376/* 7379/*
@@ -7406,9 +7409,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
7406 * that's OK. No task can be added to this CPU, so iteration is 7409 * that's OK. No task can be added to this CPU, so iteration is
7407 * fine. 7410 * fine.
7408 */ 7411 */
7409 spin_unlock_irq(&rq->lock); 7412 raw_spin_unlock_irq(&rq->lock);
7410 move_task_off_dead_cpu(dead_cpu, p); 7413 move_task_off_dead_cpu(dead_cpu, p);
7411 spin_lock_irq(&rq->lock); 7414 raw_spin_lock_irq(&rq->lock);
7412 7415
7413 put_task_struct(p); 7416 put_task_struct(p);
7414} 7417}
@@ -7674,13 +7677,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7674 7677
7675 /* Update our root-domain */ 7678 /* Update our root-domain */
7676 rq = cpu_rq(cpu); 7679 rq = cpu_rq(cpu);
7677 spin_lock_irqsave(&rq->lock, flags); 7680 raw_spin_lock_irqsave(&rq->lock, flags);
7678 if (rq->rd) { 7681 if (rq->rd) {
7679 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7682 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7680 7683
7681 set_rq_online(rq); 7684 set_rq_online(rq);
7682 } 7685 }
7683 spin_unlock_irqrestore(&rq->lock, flags); 7686 raw_spin_unlock_irqrestore(&rq->lock, flags);
7684 break; 7687 break;
7685 7688
7686#ifdef CONFIG_HOTPLUG_CPU 7689#ifdef CONFIG_HOTPLUG_CPU
@@ -7705,13 +7708,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7705 put_task_struct(rq->migration_thread); 7708 put_task_struct(rq->migration_thread);
7706 rq->migration_thread = NULL; 7709 rq->migration_thread = NULL;
7707 /* Idle task back to normal (off runqueue, low prio) */ 7710 /* Idle task back to normal (off runqueue, low prio) */
7708 spin_lock_irq(&rq->lock); 7711 raw_spin_lock_irq(&rq->lock);
7709 update_rq_clock(rq); 7712 update_rq_clock(rq);
7710 deactivate_task(rq, rq->idle, 0); 7713 deactivate_task(rq, rq->idle, 0);
7711 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); 7714 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
7712 rq->idle->sched_class = &idle_sched_class; 7715 rq->idle->sched_class = &idle_sched_class;
7713 migrate_dead_tasks(cpu); 7716 migrate_dead_tasks(cpu);
7714 spin_unlock_irq(&rq->lock); 7717 raw_spin_unlock_irq(&rq->lock);
7715 cpuset_unlock(); 7718 cpuset_unlock();
7716 migrate_nr_uninterruptible(rq); 7719 migrate_nr_uninterruptible(rq);
7717 BUG_ON(rq->nr_running != 0); 7720 BUG_ON(rq->nr_running != 0);
@@ -7721,30 +7724,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7721 * they didn't take sched_hotcpu_mutex. Just wake up 7724 * they didn't take sched_hotcpu_mutex. Just wake up
7722 * the requestors. 7725 * the requestors.
7723 */ 7726 */
7724 spin_lock_irq(&rq->lock); 7727 raw_spin_lock_irq(&rq->lock);
7725 while (!list_empty(&rq->migration_queue)) { 7728 while (!list_empty(&rq->migration_queue)) {
7726 struct migration_req *req; 7729 struct migration_req *req;
7727 7730
7728 req = list_entry(rq->migration_queue.next, 7731 req = list_entry(rq->migration_queue.next,
7729 struct migration_req, list); 7732 struct migration_req, list);
7730 list_del_init(&req->list); 7733 list_del_init(&req->list);
7731 spin_unlock_irq(&rq->lock); 7734 raw_spin_unlock_irq(&rq->lock);
7732 complete(&req->done); 7735 complete(&req->done);
7733 spin_lock_irq(&rq->lock); 7736 raw_spin_lock_irq(&rq->lock);
7734 } 7737 }
7735 spin_unlock_irq(&rq->lock); 7738 raw_spin_unlock_irq(&rq->lock);
7736 break; 7739 break;
7737 7740
7738 case CPU_DYING: 7741 case CPU_DYING:
7739 case CPU_DYING_FROZEN: 7742 case CPU_DYING_FROZEN:
7740 /* Update our root-domain */ 7743 /* Update our root-domain */
7741 rq = cpu_rq(cpu); 7744 rq = cpu_rq(cpu);
7742 spin_lock_irqsave(&rq->lock, flags); 7745 raw_spin_lock_irqsave(&rq->lock, flags);
7743 if (rq->rd) { 7746 if (rq->rd) {
7744 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7747 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7745 set_rq_offline(rq); 7748 set_rq_offline(rq);
7746 } 7749 }
7747 spin_unlock_irqrestore(&rq->lock, flags); 7750 raw_spin_unlock_irqrestore(&rq->lock, flags);
7748 break; 7751 break;
7749#endif 7752#endif
7750 } 7753 }
@@ -7974,7 +7977,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7974 struct root_domain *old_rd = NULL; 7977 struct root_domain *old_rd = NULL;
7975 unsigned long flags; 7978 unsigned long flags;
7976 7979
7977 spin_lock_irqsave(&rq->lock, flags); 7980 raw_spin_lock_irqsave(&rq->lock, flags);
7978 7981
7979 if (rq->rd) { 7982 if (rq->rd) {
7980 old_rd = rq->rd; 7983 old_rd = rq->rd;
@@ -8000,7 +8003,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
8000 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 8003 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
8001 set_rq_online(rq); 8004 set_rq_online(rq);
8002 8005
8003 spin_unlock_irqrestore(&rq->lock, flags); 8006 raw_spin_unlock_irqrestore(&rq->lock, flags);
8004 8007
8005 if (old_rd) 8008 if (old_rd)
8006 free_rootdomain(old_rd); 8009 free_rootdomain(old_rd);
@@ -9357,7 +9360,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
9357#ifdef CONFIG_SMP 9360#ifdef CONFIG_SMP
9358 rt_rq->rt_nr_migratory = 0; 9361 rt_rq->rt_nr_migratory = 0;
9359 rt_rq->overloaded = 0; 9362 rt_rq->overloaded = 0;
9360 plist_head_init(&rt_rq->pushable_tasks, &rq->lock); 9363 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
9361#endif 9364#endif
9362 9365
9363 rt_rq->rt_time = 0; 9366 rt_rq->rt_time = 0;
@@ -9523,7 +9526,7 @@ void __init sched_init(void)
9523 struct rq *rq; 9526 struct rq *rq;
9524 9527
9525 rq = cpu_rq(i); 9528 rq = cpu_rq(i);
9526 spin_lock_init(&rq->lock); 9529 raw_spin_lock_init(&rq->lock);
9527 rq->nr_running = 0; 9530 rq->nr_running = 0;
9528 rq->calc_load_active = 0; 9531 rq->calc_load_active = 0;
9529 rq->calc_load_update = jiffies + LOAD_FREQ; 9532 rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -10115,9 +10118,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
10115 struct rq *rq = cfs_rq->rq; 10118 struct rq *rq = cfs_rq->rq;
10116 unsigned long flags; 10119 unsigned long flags;
10117 10120
10118 spin_lock_irqsave(&rq->lock, flags); 10121 raw_spin_lock_irqsave(&rq->lock, flags);
10119 __set_se_shares(se, shares); 10122 __set_se_shares(se, shares);
10120 spin_unlock_irqrestore(&rq->lock, flags); 10123 raw_spin_unlock_irqrestore(&rq->lock, flags);
10121} 10124}
10122 10125
10123static DEFINE_MUTEX(shares_mutex); 10126static DEFINE_MUTEX(shares_mutex);
@@ -10717,9 +10720,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
10717 /* 10720 /*
10718 * Take rq->lock to make 64-bit read safe on 32-bit platforms. 10721 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
10719 */ 10722 */
10720 spin_lock_irq(&cpu_rq(cpu)->lock); 10723 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
10721 data = *cpuusage; 10724 data = *cpuusage;
10722 spin_unlock_irq(&cpu_rq(cpu)->lock); 10725 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
10723#else 10726#else
10724 data = *cpuusage; 10727 data = *cpuusage;
10725#endif 10728#endif
@@ -10735,9 +10738,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
10735 /* 10738 /*
10736 * Take rq->lock to make 64-bit write safe on 32-bit platforms. 10739 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
10737 */ 10740 */
10738 spin_lock_irq(&cpu_rq(cpu)->lock); 10741 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
10739 *cpuusage = val; 10742 *cpuusage = val;
10740 spin_unlock_irq(&cpu_rq(cpu)->lock); 10743 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
10741#else 10744#else
10742 *cpuusage = val; 10745 *cpuusage = val;
10743#endif 10746#endif
@@ -10971,9 +10974,9 @@ void synchronize_sched_expedited(void)
10971 init_completion(&req->done); 10974 init_completion(&req->done);
10972 req->task = NULL; 10975 req->task = NULL;
10973 req->dest_cpu = RCU_MIGRATION_NEED_QS; 10976 req->dest_cpu = RCU_MIGRATION_NEED_QS;
10974 spin_lock_irqsave(&rq->lock, flags); 10977 raw_spin_lock_irqsave(&rq->lock, flags);
10975 list_add(&req->list, &rq->migration_queue); 10978 list_add(&req->list, &rq->migration_queue);
10976 spin_unlock_irqrestore(&rq->lock, flags); 10979 raw_spin_unlock_irqrestore(&rq->lock, flags);
10977 wake_up_process(rq->migration_thread); 10980 wake_up_process(rq->migration_thread);
10978 } 10981 }
10979 for_each_online_cpu(cpu) { 10982 for_each_online_cpu(cpu) {
@@ -10981,11 +10984,11 @@ void synchronize_sched_expedited(void)
10981 req = &per_cpu(rcu_migration_req, cpu); 10984 req = &per_cpu(rcu_migration_req, cpu);
10982 rq = cpu_rq(cpu); 10985 rq = cpu_rq(cpu);
10983 wait_for_completion(&req->done); 10986 wait_for_completion(&req->done);
10984 spin_lock_irqsave(&rq->lock, flags); 10987 raw_spin_lock_irqsave(&rq->lock, flags);
10985 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) 10988 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
10986 need_full_sync = 1; 10989 need_full_sync = 1;
10987 req->dest_cpu = RCU_MIGRATION_IDLE; 10990 req->dest_cpu = RCU_MIGRATION_IDLE;
10988 spin_unlock_irqrestore(&rq->lock, flags); 10991 raw_spin_unlock_irqrestore(&rq->lock, flags);
10989 } 10992 }
10990 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; 10993 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
10991 synchronize_sched_expedited_count++; 10994 synchronize_sched_expedited_count++;