diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-12-16 12:33:49 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-12-16 12:33:49 -0500 |
commit | ee1156c11a1121e118b0a7f2dec240f0d421b1fd (patch) | |
tree | b8771cc5a9758af9d7410fc519227c036c222130 /kernel/sched.c | |
parent | b9f8fcd55bbdb037e5332dbdb7b494f0b70861ac (diff) | |
parent | 8bea8672edfca7ec5f661cafb218f1205863b343 (diff) |
Merge branch 'linus' into sched/urgent
Conflicts:
kernel/sched_idletask.c
Merge reason: resolve the conflicts, pick up latest changes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 231 |
1 files changed, 117 insertions, 114 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index db5c26692dd5..9c30858b6463 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -143,7 +143,7 @@ struct rt_prio_array { | |||
143 | 143 | ||
144 | struct rt_bandwidth { | 144 | struct rt_bandwidth { |
145 | /* nests inside the rq lock: */ | 145 | /* nests inside the rq lock: */ |
146 | spinlock_t rt_runtime_lock; | 146 | raw_spinlock_t rt_runtime_lock; |
147 | ktime_t rt_period; | 147 | ktime_t rt_period; |
148 | u64 rt_runtime; | 148 | u64 rt_runtime; |
149 | struct hrtimer rt_period_timer; | 149 | struct hrtimer rt_period_timer; |
@@ -180,7 +180,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) | |||
180 | rt_b->rt_period = ns_to_ktime(period); | 180 | rt_b->rt_period = ns_to_ktime(period); |
181 | rt_b->rt_runtime = runtime; | 181 | rt_b->rt_runtime = runtime; |
182 | 182 | ||
183 | spin_lock_init(&rt_b->rt_runtime_lock); | 183 | raw_spin_lock_init(&rt_b->rt_runtime_lock); |
184 | 184 | ||
185 | hrtimer_init(&rt_b->rt_period_timer, | 185 | hrtimer_init(&rt_b->rt_period_timer, |
186 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 186 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
@@ -202,7 +202,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
202 | if (hrtimer_active(&rt_b->rt_period_timer)) | 202 | if (hrtimer_active(&rt_b->rt_period_timer)) |
203 | return; | 203 | return; |
204 | 204 | ||
205 | spin_lock(&rt_b->rt_runtime_lock); | 205 | raw_spin_lock(&rt_b->rt_runtime_lock); |
206 | for (;;) { | 206 | for (;;) { |
207 | unsigned long delta; | 207 | unsigned long delta; |
208 | ktime_t soft, hard; | 208 | ktime_t soft, hard; |
@@ -219,7 +219,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
219 | __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, | 219 | __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, |
220 | HRTIMER_MODE_ABS_PINNED, 0); | 220 | HRTIMER_MODE_ABS_PINNED, 0); |
221 | } | 221 | } |
222 | spin_unlock(&rt_b->rt_runtime_lock); | 222 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
223 | } | 223 | } |
224 | 224 | ||
225 | #ifdef CONFIG_RT_GROUP_SCHED | 225 | #ifdef CONFIG_RT_GROUP_SCHED |
@@ -300,7 +300,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq); | |||
300 | 300 | ||
301 | #ifdef CONFIG_RT_GROUP_SCHED | 301 | #ifdef CONFIG_RT_GROUP_SCHED |
302 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | 302 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); |
303 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq); | 303 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var); |
304 | #endif /* CONFIG_RT_GROUP_SCHED */ | 304 | #endif /* CONFIG_RT_GROUP_SCHED */ |
305 | #else /* !CONFIG_USER_SCHED */ | 305 | #else /* !CONFIG_USER_SCHED */ |
306 | #define root_task_group init_task_group | 306 | #define root_task_group init_task_group |
@@ -472,7 +472,7 @@ struct rt_rq { | |||
472 | u64 rt_time; | 472 | u64 rt_time; |
473 | u64 rt_runtime; | 473 | u64 rt_runtime; |
474 | /* Nests inside the rq lock: */ | 474 | /* Nests inside the rq lock: */ |
475 | spinlock_t rt_runtime_lock; | 475 | raw_spinlock_t rt_runtime_lock; |
476 | 476 | ||
477 | #ifdef CONFIG_RT_GROUP_SCHED | 477 | #ifdef CONFIG_RT_GROUP_SCHED |
478 | unsigned long rt_nr_boosted; | 478 | unsigned long rt_nr_boosted; |
@@ -527,7 +527,7 @@ static struct root_domain def_root_domain; | |||
527 | */ | 527 | */ |
528 | struct rq { | 528 | struct rq { |
529 | /* runqueue lock: */ | 529 | /* runqueue lock: */ |
530 | spinlock_t lock; | 530 | raw_spinlock_t lock; |
531 | 531 | ||
532 | /* | 532 | /* |
533 | * nr_running and cpu_load should be in the same cacheline because | 533 | * nr_running and cpu_load should be in the same cacheline because |
@@ -687,7 +687,7 @@ inline void update_rq_clock(struct rq *rq) | |||
687 | */ | 687 | */ |
688 | int runqueue_is_locked(int cpu) | 688 | int runqueue_is_locked(int cpu) |
689 | { | 689 | { |
690 | return spin_is_locked(&cpu_rq(cpu)->lock); | 690 | return raw_spin_is_locked(&cpu_rq(cpu)->lock); |
691 | } | 691 | } |
692 | 692 | ||
693 | /* | 693 | /* |
@@ -895,7 +895,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
895 | */ | 895 | */ |
896 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); | 896 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); |
897 | 897 | ||
898 | spin_unlock_irq(&rq->lock); | 898 | raw_spin_unlock_irq(&rq->lock); |
899 | } | 899 | } |
900 | 900 | ||
901 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ | 901 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ |
@@ -919,9 +919,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | |||
919 | next->oncpu = 1; | 919 | next->oncpu = 1; |
920 | #endif | 920 | #endif |
921 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 921 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
922 | spin_unlock_irq(&rq->lock); | 922 | raw_spin_unlock_irq(&rq->lock); |
923 | #else | 923 | #else |
924 | spin_unlock(&rq->lock); | 924 | raw_spin_unlock(&rq->lock); |
925 | #endif | 925 | #endif |
926 | } | 926 | } |
927 | 927 | ||
@@ -951,10 +951,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p) | |||
951 | { | 951 | { |
952 | for (;;) { | 952 | for (;;) { |
953 | struct rq *rq = task_rq(p); | 953 | struct rq *rq = task_rq(p); |
954 | spin_lock(&rq->lock); | 954 | raw_spin_lock(&rq->lock); |
955 | if (likely(rq == task_rq(p))) | 955 | if (likely(rq == task_rq(p))) |
956 | return rq; | 956 | return rq; |
957 | spin_unlock(&rq->lock); | 957 | raw_spin_unlock(&rq->lock); |
958 | } | 958 | } |
959 | } | 959 | } |
960 | 960 | ||
@@ -971,10 +971,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
971 | for (;;) { | 971 | for (;;) { |
972 | local_irq_save(*flags); | 972 | local_irq_save(*flags); |
973 | rq = task_rq(p); | 973 | rq = task_rq(p); |
974 | spin_lock(&rq->lock); | 974 | raw_spin_lock(&rq->lock); |
975 | if (likely(rq == task_rq(p))) | 975 | if (likely(rq == task_rq(p))) |
976 | return rq; | 976 | return rq; |
977 | spin_unlock_irqrestore(&rq->lock, *flags); | 977 | raw_spin_unlock_irqrestore(&rq->lock, *flags); |
978 | } | 978 | } |
979 | } | 979 | } |
980 | 980 | ||
@@ -983,19 +983,19 @@ void task_rq_unlock_wait(struct task_struct *p) | |||
983 | struct rq *rq = task_rq(p); | 983 | struct rq *rq = task_rq(p); |
984 | 984 | ||
985 | smp_mb(); /* spin-unlock-wait is not a full memory barrier */ | 985 | smp_mb(); /* spin-unlock-wait is not a full memory barrier */ |
986 | spin_unlock_wait(&rq->lock); | 986 | raw_spin_unlock_wait(&rq->lock); |
987 | } | 987 | } |
988 | 988 | ||
989 | static void __task_rq_unlock(struct rq *rq) | 989 | static void __task_rq_unlock(struct rq *rq) |
990 | __releases(rq->lock) | 990 | __releases(rq->lock) |
991 | { | 991 | { |
992 | spin_unlock(&rq->lock); | 992 | raw_spin_unlock(&rq->lock); |
993 | } | 993 | } |
994 | 994 | ||
995 | static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) | 995 | static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) |
996 | __releases(rq->lock) | 996 | __releases(rq->lock) |
997 | { | 997 | { |
998 | spin_unlock_irqrestore(&rq->lock, *flags); | 998 | raw_spin_unlock_irqrestore(&rq->lock, *flags); |
999 | } | 999 | } |
1000 | 1000 | ||
1001 | /* | 1001 | /* |
@@ -1008,7 +1008,7 @@ static struct rq *this_rq_lock(void) | |||
1008 | 1008 | ||
1009 | local_irq_disable(); | 1009 | local_irq_disable(); |
1010 | rq = this_rq(); | 1010 | rq = this_rq(); |
1011 | spin_lock(&rq->lock); | 1011 | raw_spin_lock(&rq->lock); |
1012 | 1012 | ||
1013 | return rq; | 1013 | return rq; |
1014 | } | 1014 | } |
@@ -1055,10 +1055,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) | |||
1055 | 1055 | ||
1056 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); | 1056 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); |
1057 | 1057 | ||
1058 | spin_lock(&rq->lock); | 1058 | raw_spin_lock(&rq->lock); |
1059 | update_rq_clock(rq); | 1059 | update_rq_clock(rq); |
1060 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); | 1060 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); |
1061 | spin_unlock(&rq->lock); | 1061 | raw_spin_unlock(&rq->lock); |
1062 | 1062 | ||
1063 | return HRTIMER_NORESTART; | 1063 | return HRTIMER_NORESTART; |
1064 | } | 1064 | } |
@@ -1071,10 +1071,10 @@ static void __hrtick_start(void *arg) | |||
1071 | { | 1071 | { |
1072 | struct rq *rq = arg; | 1072 | struct rq *rq = arg; |
1073 | 1073 | ||
1074 | spin_lock(&rq->lock); | 1074 | raw_spin_lock(&rq->lock); |
1075 | hrtimer_restart(&rq->hrtick_timer); | 1075 | hrtimer_restart(&rq->hrtick_timer); |
1076 | rq->hrtick_csd_pending = 0; | 1076 | rq->hrtick_csd_pending = 0; |
1077 | spin_unlock(&rq->lock); | 1077 | raw_spin_unlock(&rq->lock); |
1078 | } | 1078 | } |
1079 | 1079 | ||
1080 | /* | 1080 | /* |
@@ -1181,7 +1181,7 @@ static void resched_task(struct task_struct *p) | |||
1181 | { | 1181 | { |
1182 | int cpu; | 1182 | int cpu; |
1183 | 1183 | ||
1184 | assert_spin_locked(&task_rq(p)->lock); | 1184 | assert_raw_spin_locked(&task_rq(p)->lock); |
1185 | 1185 | ||
1186 | if (test_tsk_need_resched(p)) | 1186 | if (test_tsk_need_resched(p)) |
1187 | return; | 1187 | return; |
@@ -1203,10 +1203,10 @@ static void resched_cpu(int cpu) | |||
1203 | struct rq *rq = cpu_rq(cpu); | 1203 | struct rq *rq = cpu_rq(cpu); |
1204 | unsigned long flags; | 1204 | unsigned long flags; |
1205 | 1205 | ||
1206 | if (!spin_trylock_irqsave(&rq->lock, flags)) | 1206 | if (!raw_spin_trylock_irqsave(&rq->lock, flags)) |
1207 | return; | 1207 | return; |
1208 | resched_task(cpu_curr(cpu)); | 1208 | resched_task(cpu_curr(cpu)); |
1209 | spin_unlock_irqrestore(&rq->lock, flags); | 1209 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
1210 | } | 1210 | } |
1211 | 1211 | ||
1212 | #ifdef CONFIG_NO_HZ | 1212 | #ifdef CONFIG_NO_HZ |
@@ -1275,7 +1275,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) | |||
1275 | #else /* !CONFIG_SMP */ | 1275 | #else /* !CONFIG_SMP */ |
1276 | static void resched_task(struct task_struct *p) | 1276 | static void resched_task(struct task_struct *p) |
1277 | { | 1277 | { |
1278 | assert_spin_locked(&task_rq(p)->lock); | 1278 | assert_raw_spin_locked(&task_rq(p)->lock); |
1279 | set_tsk_need_resched(p); | 1279 | set_tsk_need_resched(p); |
1280 | } | 1280 | } |
1281 | 1281 | ||
@@ -1602,11 +1602,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1602 | struct rq *rq = cpu_rq(cpu); | 1602 | struct rq *rq = cpu_rq(cpu); |
1603 | unsigned long flags; | 1603 | unsigned long flags; |
1604 | 1604 | ||
1605 | spin_lock_irqsave(&rq->lock, flags); | 1605 | raw_spin_lock_irqsave(&rq->lock, flags); |
1606 | tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; | 1606 | tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; |
1607 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | 1607 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; |
1608 | __set_se_shares(tg->se[cpu], shares); | 1608 | __set_se_shares(tg->se[cpu], shares); |
1609 | spin_unlock_irqrestore(&rq->lock, flags); | 1609 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
1610 | } | 1610 | } |
1611 | } | 1611 | } |
1612 | 1612 | ||
@@ -1708,9 +1708,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd) | |||
1708 | if (root_task_group_empty()) | 1708 | if (root_task_group_empty()) |
1709 | return; | 1709 | return; |
1710 | 1710 | ||
1711 | spin_unlock(&rq->lock); | 1711 | raw_spin_unlock(&rq->lock); |
1712 | update_shares(sd); | 1712 | update_shares(sd); |
1713 | spin_lock(&rq->lock); | 1713 | raw_spin_lock(&rq->lock); |
1714 | } | 1714 | } |
1715 | 1715 | ||
1716 | static void update_h_load(long cpu) | 1716 | static void update_h_load(long cpu) |
@@ -1750,7 +1750,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1750 | __acquires(busiest->lock) | 1750 | __acquires(busiest->lock) |
1751 | __acquires(this_rq->lock) | 1751 | __acquires(this_rq->lock) |
1752 | { | 1752 | { |
1753 | spin_unlock(&this_rq->lock); | 1753 | raw_spin_unlock(&this_rq->lock); |
1754 | double_rq_lock(this_rq, busiest); | 1754 | double_rq_lock(this_rq, busiest); |
1755 | 1755 | ||
1756 | return 1; | 1756 | return 1; |
@@ -1771,14 +1771,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1771 | { | 1771 | { |
1772 | int ret = 0; | 1772 | int ret = 0; |
1773 | 1773 | ||
1774 | if (unlikely(!spin_trylock(&busiest->lock))) { | 1774 | if (unlikely(!raw_spin_trylock(&busiest->lock))) { |
1775 | if (busiest < this_rq) { | 1775 | if (busiest < this_rq) { |
1776 | spin_unlock(&this_rq->lock); | 1776 | raw_spin_unlock(&this_rq->lock); |
1777 | spin_lock(&busiest->lock); | 1777 | raw_spin_lock(&busiest->lock); |
1778 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | 1778 | raw_spin_lock_nested(&this_rq->lock, |
1779 | SINGLE_DEPTH_NESTING); | ||
1779 | ret = 1; | 1780 | ret = 1; |
1780 | } else | 1781 | } else |
1781 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | 1782 | raw_spin_lock_nested(&busiest->lock, |
1783 | SINGLE_DEPTH_NESTING); | ||
1782 | } | 1784 | } |
1783 | return ret; | 1785 | return ret; |
1784 | } | 1786 | } |
@@ -1792,7 +1794,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1792 | { | 1794 | { |
1793 | if (unlikely(!irqs_disabled())) { | 1795 | if (unlikely(!irqs_disabled())) { |
1794 | /* printk() doesn't work good under rq->lock */ | 1796 | /* printk() doesn't work good under rq->lock */ |
1795 | spin_unlock(&this_rq->lock); | 1797 | raw_spin_unlock(&this_rq->lock); |
1796 | BUG_ON(1); | 1798 | BUG_ON(1); |
1797 | } | 1799 | } |
1798 | 1800 | ||
@@ -1802,7 +1804,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1802 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | 1804 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
1803 | __releases(busiest->lock) | 1805 | __releases(busiest->lock) |
1804 | { | 1806 | { |
1805 | spin_unlock(&busiest->lock); | 1807 | raw_spin_unlock(&busiest->lock); |
1806 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | 1808 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
1807 | } | 1809 | } |
1808 | #endif | 1810 | #endif |
@@ -2025,13 +2027,13 @@ void kthread_bind(struct task_struct *p, unsigned int cpu) | |||
2025 | return; | 2027 | return; |
2026 | } | 2028 | } |
2027 | 2029 | ||
2028 | spin_lock_irqsave(&rq->lock, flags); | 2030 | raw_spin_lock_irqsave(&rq->lock, flags); |
2029 | update_rq_clock(rq); | 2031 | update_rq_clock(rq); |
2030 | set_task_cpu(p, cpu); | 2032 | set_task_cpu(p, cpu); |
2031 | p->cpus_allowed = cpumask_of_cpu(cpu); | 2033 | p->cpus_allowed = cpumask_of_cpu(cpu); |
2032 | p->rt.nr_cpus_allowed = 1; | 2034 | p->rt.nr_cpus_allowed = 1; |
2033 | p->flags |= PF_THREAD_BOUND; | 2035 | p->flags |= PF_THREAD_BOUND; |
2034 | spin_unlock_irqrestore(&rq->lock, flags); | 2036 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
2035 | } | 2037 | } |
2036 | EXPORT_SYMBOL(kthread_bind); | 2038 | EXPORT_SYMBOL(kthread_bind); |
2037 | 2039 | ||
@@ -2783,10 +2785,10 @@ static inline void post_schedule(struct rq *rq) | |||
2783 | if (rq->post_schedule) { | 2785 | if (rq->post_schedule) { |
2784 | unsigned long flags; | 2786 | unsigned long flags; |
2785 | 2787 | ||
2786 | spin_lock_irqsave(&rq->lock, flags); | 2788 | raw_spin_lock_irqsave(&rq->lock, flags); |
2787 | if (rq->curr->sched_class->post_schedule) | 2789 | if (rq->curr->sched_class->post_schedule) |
2788 | rq->curr->sched_class->post_schedule(rq); | 2790 | rq->curr->sched_class->post_schedule(rq); |
2789 | spin_unlock_irqrestore(&rq->lock, flags); | 2791 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
2790 | 2792 | ||
2791 | rq->post_schedule = 0; | 2793 | rq->post_schedule = 0; |
2792 | } | 2794 | } |
@@ -3068,15 +3070,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) | |||
3068 | { | 3070 | { |
3069 | BUG_ON(!irqs_disabled()); | 3071 | BUG_ON(!irqs_disabled()); |
3070 | if (rq1 == rq2) { | 3072 | if (rq1 == rq2) { |
3071 | spin_lock(&rq1->lock); | 3073 | raw_spin_lock(&rq1->lock); |
3072 | __acquire(rq2->lock); /* Fake it out ;) */ | 3074 | __acquire(rq2->lock); /* Fake it out ;) */ |
3073 | } else { | 3075 | } else { |
3074 | if (rq1 < rq2) { | 3076 | if (rq1 < rq2) { |
3075 | spin_lock(&rq1->lock); | 3077 | raw_spin_lock(&rq1->lock); |
3076 | spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); | 3078 | raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
3077 | } else { | 3079 | } else { |
3078 | spin_lock(&rq2->lock); | 3080 | raw_spin_lock(&rq2->lock); |
3079 | spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); | 3081 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
3080 | } | 3082 | } |
3081 | } | 3083 | } |
3082 | update_rq_clock(rq1); | 3084 | update_rq_clock(rq1); |
@@ -3093,9 +3095,9 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) | |||
3093 | __releases(rq1->lock) | 3095 | __releases(rq1->lock) |
3094 | __releases(rq2->lock) | 3096 | __releases(rq2->lock) |
3095 | { | 3097 | { |
3096 | spin_unlock(&rq1->lock); | 3098 | raw_spin_unlock(&rq1->lock); |
3097 | if (rq1 != rq2) | 3099 | if (rq1 != rq2) |
3098 | spin_unlock(&rq2->lock); | 3100 | raw_spin_unlock(&rq2->lock); |
3099 | else | 3101 | else |
3100 | __release(rq2->lock); | 3102 | __release(rq2->lock); |
3101 | } | 3103 | } |
@@ -4188,14 +4190,15 @@ redo: | |||
4188 | 4190 | ||
4189 | if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { | 4191 | if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { |
4190 | 4192 | ||
4191 | spin_lock_irqsave(&busiest->lock, flags); | 4193 | raw_spin_lock_irqsave(&busiest->lock, flags); |
4192 | 4194 | ||
4193 | /* don't kick the migration_thread, if the curr | 4195 | /* don't kick the migration_thread, if the curr |
4194 | * task on busiest cpu can't be moved to this_cpu | 4196 | * task on busiest cpu can't be moved to this_cpu |
4195 | */ | 4197 | */ |
4196 | if (!cpumask_test_cpu(this_cpu, | 4198 | if (!cpumask_test_cpu(this_cpu, |
4197 | &busiest->curr->cpus_allowed)) { | 4199 | &busiest->curr->cpus_allowed)) { |
4198 | spin_unlock_irqrestore(&busiest->lock, flags); | 4200 | raw_spin_unlock_irqrestore(&busiest->lock, |
4201 | flags); | ||
4199 | all_pinned = 1; | 4202 | all_pinned = 1; |
4200 | goto out_one_pinned; | 4203 | goto out_one_pinned; |
4201 | } | 4204 | } |
@@ -4205,7 +4208,7 @@ redo: | |||
4205 | busiest->push_cpu = this_cpu; | 4208 | busiest->push_cpu = this_cpu; |
4206 | active_balance = 1; | 4209 | active_balance = 1; |
4207 | } | 4210 | } |
4208 | spin_unlock_irqrestore(&busiest->lock, flags); | 4211 | raw_spin_unlock_irqrestore(&busiest->lock, flags); |
4209 | if (active_balance) | 4212 | if (active_balance) |
4210 | wake_up_process(busiest->migration_thread); | 4213 | wake_up_process(busiest->migration_thread); |
4211 | 4214 | ||
@@ -4387,10 +4390,10 @@ redo: | |||
4387 | /* | 4390 | /* |
4388 | * Should not call ttwu while holding a rq->lock | 4391 | * Should not call ttwu while holding a rq->lock |
4389 | */ | 4392 | */ |
4390 | spin_unlock(&this_rq->lock); | 4393 | raw_spin_unlock(&this_rq->lock); |
4391 | if (active_balance) | 4394 | if (active_balance) |
4392 | wake_up_process(busiest->migration_thread); | 4395 | wake_up_process(busiest->migration_thread); |
4393 | spin_lock(&this_rq->lock); | 4396 | raw_spin_lock(&this_rq->lock); |
4394 | 4397 | ||
4395 | } else | 4398 | } else |
4396 | sd->nr_balance_failed = 0; | 4399 | sd->nr_balance_failed = 0; |
@@ -5259,11 +5262,11 @@ void scheduler_tick(void) | |||
5259 | 5262 | ||
5260 | sched_clock_tick(); | 5263 | sched_clock_tick(); |
5261 | 5264 | ||
5262 | spin_lock(&rq->lock); | 5265 | raw_spin_lock(&rq->lock); |
5263 | update_rq_clock(rq); | 5266 | update_rq_clock(rq); |
5264 | update_cpu_load(rq); | 5267 | update_cpu_load(rq); |
5265 | curr->sched_class->task_tick(rq, curr, 0); | 5268 | curr->sched_class->task_tick(rq, curr, 0); |
5266 | spin_unlock(&rq->lock); | 5269 | raw_spin_unlock(&rq->lock); |
5267 | 5270 | ||
5268 | perf_event_task_tick(curr, cpu); | 5271 | perf_event_task_tick(curr, cpu); |
5269 | 5272 | ||
@@ -5457,7 +5460,7 @@ need_resched_nonpreemptible: | |||
5457 | if (sched_feat(HRTICK)) | 5460 | if (sched_feat(HRTICK)) |
5458 | hrtick_clear(rq); | 5461 | hrtick_clear(rq); |
5459 | 5462 | ||
5460 | spin_lock_irq(&rq->lock); | 5463 | raw_spin_lock_irq(&rq->lock); |
5461 | update_rq_clock(rq); | 5464 | update_rq_clock(rq); |
5462 | clear_tsk_need_resched(prev); | 5465 | clear_tsk_need_resched(prev); |
5463 | 5466 | ||
@@ -5493,7 +5496,7 @@ need_resched_nonpreemptible: | |||
5493 | cpu = smp_processor_id(); | 5496 | cpu = smp_processor_id(); |
5494 | rq = cpu_rq(cpu); | 5497 | rq = cpu_rq(cpu); |
5495 | } else | 5498 | } else |
5496 | spin_unlock_irq(&rq->lock); | 5499 | raw_spin_unlock_irq(&rq->lock); |
5497 | 5500 | ||
5498 | post_schedule(rq); | 5501 | post_schedule(rq); |
5499 | 5502 | ||
@@ -6324,7 +6327,7 @@ recheck: | |||
6324 | * make sure no PI-waiters arrive (or leave) while we are | 6327 | * make sure no PI-waiters arrive (or leave) while we are |
6325 | * changing the priority of the task: | 6328 | * changing the priority of the task: |
6326 | */ | 6329 | */ |
6327 | spin_lock_irqsave(&p->pi_lock, flags); | 6330 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
6328 | /* | 6331 | /* |
6329 | * To be able to change p->policy safely, the apropriate | 6332 | * To be able to change p->policy safely, the apropriate |
6330 | * runqueue lock must be held. | 6333 | * runqueue lock must be held. |
@@ -6334,7 +6337,7 @@ recheck: | |||
6334 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { | 6337 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
6335 | policy = oldpolicy = -1; | 6338 | policy = oldpolicy = -1; |
6336 | __task_rq_unlock(rq); | 6339 | __task_rq_unlock(rq); |
6337 | spin_unlock_irqrestore(&p->pi_lock, flags); | 6340 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
6338 | goto recheck; | 6341 | goto recheck; |
6339 | } | 6342 | } |
6340 | update_rq_clock(rq); | 6343 | update_rq_clock(rq); |
@@ -6358,7 +6361,7 @@ recheck: | |||
6358 | check_class_changed(rq, p, prev_class, oldprio, running); | 6361 | check_class_changed(rq, p, prev_class, oldprio, running); |
6359 | } | 6362 | } |
6360 | __task_rq_unlock(rq); | 6363 | __task_rq_unlock(rq); |
6361 | spin_unlock_irqrestore(&p->pi_lock, flags); | 6364 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
6362 | 6365 | ||
6363 | rt_mutex_adjust_pi(p); | 6366 | rt_mutex_adjust_pi(p); |
6364 | 6367 | ||
@@ -6684,7 +6687,7 @@ SYSCALL_DEFINE0(sched_yield) | |||
6684 | */ | 6687 | */ |
6685 | __release(rq->lock); | 6688 | __release(rq->lock); |
6686 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); | 6689 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
6687 | _raw_spin_unlock(&rq->lock); | 6690 | do_raw_spin_unlock(&rq->lock); |
6688 | preempt_enable_no_resched(); | 6691 | preempt_enable_no_resched(); |
6689 | 6692 | ||
6690 | schedule(); | 6693 | schedule(); |
@@ -6978,7 +6981,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
6978 | struct rq *rq = cpu_rq(cpu); | 6981 | struct rq *rq = cpu_rq(cpu); |
6979 | unsigned long flags; | 6982 | unsigned long flags; |
6980 | 6983 | ||
6981 | spin_lock_irqsave(&rq->lock, flags); | 6984 | raw_spin_lock_irqsave(&rq->lock, flags); |
6982 | 6985 | ||
6983 | __sched_fork(idle); | 6986 | __sched_fork(idle); |
6984 | idle->se.exec_start = sched_clock(); | 6987 | idle->se.exec_start = sched_clock(); |
@@ -6990,7 +6993,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
6990 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 6993 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
6991 | idle->oncpu = 1; | 6994 | idle->oncpu = 1; |
6992 | #endif | 6995 | #endif |
6993 | spin_unlock_irqrestore(&rq->lock, flags); | 6996 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
6994 | 6997 | ||
6995 | /* Set the preempt count _outside_ the spinlocks! */ | 6998 | /* Set the preempt count _outside_ the spinlocks! */ |
6996 | #if defined(CONFIG_PREEMPT) | 6999 | #if defined(CONFIG_PREEMPT) |
@@ -7207,10 +7210,10 @@ static int migration_thread(void *data) | |||
7207 | struct migration_req *req; | 7210 | struct migration_req *req; |
7208 | struct list_head *head; | 7211 | struct list_head *head; |
7209 | 7212 | ||
7210 | spin_lock_irq(&rq->lock); | 7213 | raw_spin_lock_irq(&rq->lock); |
7211 | 7214 | ||
7212 | if (cpu_is_offline(cpu)) { | 7215 | if (cpu_is_offline(cpu)) { |
7213 | spin_unlock_irq(&rq->lock); | 7216 | raw_spin_unlock_irq(&rq->lock); |
7214 | break; | 7217 | break; |
7215 | } | 7218 | } |
7216 | 7219 | ||
@@ -7222,7 +7225,7 @@ static int migration_thread(void *data) | |||
7222 | head = &rq->migration_queue; | 7225 | head = &rq->migration_queue; |
7223 | 7226 | ||
7224 | if (list_empty(head)) { | 7227 | if (list_empty(head)) { |
7225 | spin_unlock_irq(&rq->lock); | 7228 | raw_spin_unlock_irq(&rq->lock); |
7226 | schedule(); | 7229 | schedule(); |
7227 | set_current_state(TASK_INTERRUPTIBLE); | 7230 | set_current_state(TASK_INTERRUPTIBLE); |
7228 | continue; | 7231 | continue; |
@@ -7231,14 +7234,14 @@ static int migration_thread(void *data) | |||
7231 | list_del_init(head->next); | 7234 | list_del_init(head->next); |
7232 | 7235 | ||
7233 | if (req->task != NULL) { | 7236 | if (req->task != NULL) { |
7234 | spin_unlock(&rq->lock); | 7237 | raw_spin_unlock(&rq->lock); |
7235 | __migrate_task(req->task, cpu, req->dest_cpu); | 7238 | __migrate_task(req->task, cpu, req->dest_cpu); |
7236 | } else if (likely(cpu == (badcpu = smp_processor_id()))) { | 7239 | } else if (likely(cpu == (badcpu = smp_processor_id()))) { |
7237 | req->dest_cpu = RCU_MIGRATION_GOT_QS; | 7240 | req->dest_cpu = RCU_MIGRATION_GOT_QS; |
7238 | spin_unlock(&rq->lock); | 7241 | raw_spin_unlock(&rq->lock); |
7239 | } else { | 7242 | } else { |
7240 | req->dest_cpu = RCU_MIGRATION_MUST_SYNC; | 7243 | req->dest_cpu = RCU_MIGRATION_MUST_SYNC; |
7241 | spin_unlock(&rq->lock); | 7244 | raw_spin_unlock(&rq->lock); |
7242 | WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); | 7245 | WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); |
7243 | } | 7246 | } |
7244 | local_irq_enable(); | 7247 | local_irq_enable(); |
@@ -7360,14 +7363,14 @@ void sched_idle_next(void) | |||
7360 | * Strictly not necessary since rest of the CPUs are stopped by now | 7363 | * Strictly not necessary since rest of the CPUs are stopped by now |
7361 | * and interrupts disabled on the current cpu. | 7364 | * and interrupts disabled on the current cpu. |
7362 | */ | 7365 | */ |
7363 | spin_lock_irqsave(&rq->lock, flags); | 7366 | raw_spin_lock_irqsave(&rq->lock, flags); |
7364 | 7367 | ||
7365 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); | 7368 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); |
7366 | 7369 | ||
7367 | update_rq_clock(rq); | 7370 | update_rq_clock(rq); |
7368 | activate_task(rq, p, 0); | 7371 | activate_task(rq, p, 0); |
7369 | 7372 | ||
7370 | spin_unlock_irqrestore(&rq->lock, flags); | 7373 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7371 | } | 7374 | } |
7372 | 7375 | ||
7373 | /* | 7376 | /* |
@@ -7403,9 +7406,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) | |||
7403 | * that's OK. No task can be added to this CPU, so iteration is | 7406 | * that's OK. No task can be added to this CPU, so iteration is |
7404 | * fine. | 7407 | * fine. |
7405 | */ | 7408 | */ |
7406 | spin_unlock_irq(&rq->lock); | 7409 | raw_spin_unlock_irq(&rq->lock); |
7407 | move_task_off_dead_cpu(dead_cpu, p); | 7410 | move_task_off_dead_cpu(dead_cpu, p); |
7408 | spin_lock_irq(&rq->lock); | 7411 | raw_spin_lock_irq(&rq->lock); |
7409 | 7412 | ||
7410 | put_task_struct(p); | 7413 | put_task_struct(p); |
7411 | } | 7414 | } |
@@ -7671,13 +7674,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7671 | 7674 | ||
7672 | /* Update our root-domain */ | 7675 | /* Update our root-domain */ |
7673 | rq = cpu_rq(cpu); | 7676 | rq = cpu_rq(cpu); |
7674 | spin_lock_irqsave(&rq->lock, flags); | 7677 | raw_spin_lock_irqsave(&rq->lock, flags); |
7675 | if (rq->rd) { | 7678 | if (rq->rd) { |
7676 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); | 7679 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
7677 | 7680 | ||
7678 | set_rq_online(rq); | 7681 | set_rq_online(rq); |
7679 | } | 7682 | } |
7680 | spin_unlock_irqrestore(&rq->lock, flags); | 7683 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7681 | break; | 7684 | break; |
7682 | 7685 | ||
7683 | #ifdef CONFIG_HOTPLUG_CPU | 7686 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -7702,13 +7705,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7702 | put_task_struct(rq->migration_thread); | 7705 | put_task_struct(rq->migration_thread); |
7703 | rq->migration_thread = NULL; | 7706 | rq->migration_thread = NULL; |
7704 | /* Idle task back to normal (off runqueue, low prio) */ | 7707 | /* Idle task back to normal (off runqueue, low prio) */ |
7705 | spin_lock_irq(&rq->lock); | 7708 | raw_spin_lock_irq(&rq->lock); |
7706 | update_rq_clock(rq); | 7709 | update_rq_clock(rq); |
7707 | deactivate_task(rq, rq->idle, 0); | 7710 | deactivate_task(rq, rq->idle, 0); |
7708 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); | 7711 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); |
7709 | rq->idle->sched_class = &idle_sched_class; | 7712 | rq->idle->sched_class = &idle_sched_class; |
7710 | migrate_dead_tasks(cpu); | 7713 | migrate_dead_tasks(cpu); |
7711 | spin_unlock_irq(&rq->lock); | 7714 | raw_spin_unlock_irq(&rq->lock); |
7712 | cpuset_unlock(); | 7715 | cpuset_unlock(); |
7713 | migrate_nr_uninterruptible(rq); | 7716 | migrate_nr_uninterruptible(rq); |
7714 | BUG_ON(rq->nr_running != 0); | 7717 | BUG_ON(rq->nr_running != 0); |
@@ -7718,30 +7721,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7718 | * they didn't take sched_hotcpu_mutex. Just wake up | 7721 | * they didn't take sched_hotcpu_mutex. Just wake up |
7719 | * the requestors. | 7722 | * the requestors. |
7720 | */ | 7723 | */ |
7721 | spin_lock_irq(&rq->lock); | 7724 | raw_spin_lock_irq(&rq->lock); |
7722 | while (!list_empty(&rq->migration_queue)) { | 7725 | while (!list_empty(&rq->migration_queue)) { |
7723 | struct migration_req *req; | 7726 | struct migration_req *req; |
7724 | 7727 | ||
7725 | req = list_entry(rq->migration_queue.next, | 7728 | req = list_entry(rq->migration_queue.next, |
7726 | struct migration_req, list); | 7729 | struct migration_req, list); |
7727 | list_del_init(&req->list); | 7730 | list_del_init(&req->list); |
7728 | spin_unlock_irq(&rq->lock); | 7731 | raw_spin_unlock_irq(&rq->lock); |
7729 | complete(&req->done); | 7732 | complete(&req->done); |
7730 | spin_lock_irq(&rq->lock); | 7733 | raw_spin_lock_irq(&rq->lock); |
7731 | } | 7734 | } |
7732 | spin_unlock_irq(&rq->lock); | 7735 | raw_spin_unlock_irq(&rq->lock); |
7733 | break; | 7736 | break; |
7734 | 7737 | ||
7735 | case CPU_DYING: | 7738 | case CPU_DYING: |
7736 | case CPU_DYING_FROZEN: | 7739 | case CPU_DYING_FROZEN: |
7737 | /* Update our root-domain */ | 7740 | /* Update our root-domain */ |
7738 | rq = cpu_rq(cpu); | 7741 | rq = cpu_rq(cpu); |
7739 | spin_lock_irqsave(&rq->lock, flags); | 7742 | raw_spin_lock_irqsave(&rq->lock, flags); |
7740 | if (rq->rd) { | 7743 | if (rq->rd) { |
7741 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); | 7744 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
7742 | set_rq_offline(rq); | 7745 | set_rq_offline(rq); |
7743 | } | 7746 | } |
7744 | spin_unlock_irqrestore(&rq->lock, flags); | 7747 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7745 | break; | 7748 | break; |
7746 | #endif | 7749 | #endif |
7747 | } | 7750 | } |
@@ -7965,7 +7968,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
7965 | struct root_domain *old_rd = NULL; | 7968 | struct root_domain *old_rd = NULL; |
7966 | unsigned long flags; | 7969 | unsigned long flags; |
7967 | 7970 | ||
7968 | spin_lock_irqsave(&rq->lock, flags); | 7971 | raw_spin_lock_irqsave(&rq->lock, flags); |
7969 | 7972 | ||
7970 | if (rq->rd) { | 7973 | if (rq->rd) { |
7971 | old_rd = rq->rd; | 7974 | old_rd = rq->rd; |
@@ -7991,7 +7994,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
7991 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) | 7994 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) |
7992 | set_rq_online(rq); | 7995 | set_rq_online(rq); |
7993 | 7996 | ||
7994 | spin_unlock_irqrestore(&rq->lock, flags); | 7997 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7995 | 7998 | ||
7996 | if (old_rd) | 7999 | if (old_rd) |
7997 | free_rootdomain(old_rd); | 8000 | free_rootdomain(old_rd); |
@@ -8277,14 +8280,14 @@ enum s_alloc { | |||
8277 | */ | 8280 | */ |
8278 | #ifdef CONFIG_SCHED_SMT | 8281 | #ifdef CONFIG_SCHED_SMT |
8279 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); | 8282 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); |
8280 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); | 8283 | static DEFINE_PER_CPU(struct static_sched_group, sched_groups); |
8281 | 8284 | ||
8282 | static int | 8285 | static int |
8283 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, | 8286 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
8284 | struct sched_group **sg, struct cpumask *unused) | 8287 | struct sched_group **sg, struct cpumask *unused) |
8285 | { | 8288 | { |
8286 | if (sg) | 8289 | if (sg) |
8287 | *sg = &per_cpu(sched_group_cpus, cpu).sg; | 8290 | *sg = &per_cpu(sched_groups, cpu).sg; |
8288 | return cpu; | 8291 | return cpu; |
8289 | } | 8292 | } |
8290 | #endif /* CONFIG_SCHED_SMT */ | 8293 | #endif /* CONFIG_SCHED_SMT */ |
@@ -9347,13 +9350,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | |||
9347 | #ifdef CONFIG_SMP | 9350 | #ifdef CONFIG_SMP |
9348 | rt_rq->rt_nr_migratory = 0; | 9351 | rt_rq->rt_nr_migratory = 0; |
9349 | rt_rq->overloaded = 0; | 9352 | rt_rq->overloaded = 0; |
9350 | plist_head_init(&rt_rq->pushable_tasks, &rq->lock); | 9353 | plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock); |
9351 | #endif | 9354 | #endif |
9352 | 9355 | ||
9353 | rt_rq->rt_time = 0; | 9356 | rt_rq->rt_time = 0; |
9354 | rt_rq->rt_throttled = 0; | 9357 | rt_rq->rt_throttled = 0; |
9355 | rt_rq->rt_runtime = 0; | 9358 | rt_rq->rt_runtime = 0; |
9356 | spin_lock_init(&rt_rq->rt_runtime_lock); | 9359 | raw_spin_lock_init(&rt_rq->rt_runtime_lock); |
9357 | 9360 | ||
9358 | #ifdef CONFIG_RT_GROUP_SCHED | 9361 | #ifdef CONFIG_RT_GROUP_SCHED |
9359 | rt_rq->rt_nr_boosted = 0; | 9362 | rt_rq->rt_nr_boosted = 0; |
@@ -9513,7 +9516,7 @@ void __init sched_init(void) | |||
9513 | struct rq *rq; | 9516 | struct rq *rq; |
9514 | 9517 | ||
9515 | rq = cpu_rq(i); | 9518 | rq = cpu_rq(i); |
9516 | spin_lock_init(&rq->lock); | 9519 | raw_spin_lock_init(&rq->lock); |
9517 | rq->nr_running = 0; | 9520 | rq->nr_running = 0; |
9518 | rq->calc_load_active = 0; | 9521 | rq->calc_load_active = 0; |
9519 | rq->calc_load_update = jiffies + LOAD_FREQ; | 9522 | rq->calc_load_update = jiffies + LOAD_FREQ; |
@@ -9573,7 +9576,7 @@ void __init sched_init(void) | |||
9573 | #elif defined CONFIG_USER_SCHED | 9576 | #elif defined CONFIG_USER_SCHED |
9574 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); | 9577 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); |
9575 | init_tg_rt_entry(&init_task_group, | 9578 | init_tg_rt_entry(&init_task_group, |
9576 | &per_cpu(init_rt_rq, i), | 9579 | &per_cpu(init_rt_rq_var, i), |
9577 | &per_cpu(init_sched_rt_entity, i), i, 1, | 9580 | &per_cpu(init_sched_rt_entity, i), i, 1, |
9578 | root_task_group.rt_se[i]); | 9581 | root_task_group.rt_se[i]); |
9579 | #endif | 9582 | #endif |
@@ -9611,7 +9614,7 @@ void __init sched_init(void) | |||
9611 | #endif | 9614 | #endif |
9612 | 9615 | ||
9613 | #ifdef CONFIG_RT_MUTEXES | 9616 | #ifdef CONFIG_RT_MUTEXES |
9614 | plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); | 9617 | plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock); |
9615 | #endif | 9618 | #endif |
9616 | 9619 | ||
9617 | /* | 9620 | /* |
@@ -9734,13 +9737,13 @@ void normalize_rt_tasks(void) | |||
9734 | continue; | 9737 | continue; |
9735 | } | 9738 | } |
9736 | 9739 | ||
9737 | spin_lock(&p->pi_lock); | 9740 | raw_spin_lock(&p->pi_lock); |
9738 | rq = __task_rq_lock(p); | 9741 | rq = __task_rq_lock(p); |
9739 | 9742 | ||
9740 | normalize_task(rq, p); | 9743 | normalize_task(rq, p); |
9741 | 9744 | ||
9742 | __task_rq_unlock(rq); | 9745 | __task_rq_unlock(rq); |
9743 | spin_unlock(&p->pi_lock); | 9746 | raw_spin_unlock(&p->pi_lock); |
9744 | } while_each_thread(g, p); | 9747 | } while_each_thread(g, p); |
9745 | 9748 | ||
9746 | read_unlock_irqrestore(&tasklist_lock, flags); | 9749 | read_unlock_irqrestore(&tasklist_lock, flags); |
@@ -10103,9 +10106,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares) | |||
10103 | struct rq *rq = cfs_rq->rq; | 10106 | struct rq *rq = cfs_rq->rq; |
10104 | unsigned long flags; | 10107 | unsigned long flags; |
10105 | 10108 | ||
10106 | spin_lock_irqsave(&rq->lock, flags); | 10109 | raw_spin_lock_irqsave(&rq->lock, flags); |
10107 | __set_se_shares(se, shares); | 10110 | __set_se_shares(se, shares); |
10108 | spin_unlock_irqrestore(&rq->lock, flags); | 10111 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
10109 | } | 10112 | } |
10110 | 10113 | ||
10111 | static DEFINE_MUTEX(shares_mutex); | 10114 | static DEFINE_MUTEX(shares_mutex); |
@@ -10290,18 +10293,18 @@ static int tg_set_bandwidth(struct task_group *tg, | |||
10290 | if (err) | 10293 | if (err) |
10291 | goto unlock; | 10294 | goto unlock; |
10292 | 10295 | ||
10293 | spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); | 10296 | raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
10294 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); | 10297 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); |
10295 | tg->rt_bandwidth.rt_runtime = rt_runtime; | 10298 | tg->rt_bandwidth.rt_runtime = rt_runtime; |
10296 | 10299 | ||
10297 | for_each_possible_cpu(i) { | 10300 | for_each_possible_cpu(i) { |
10298 | struct rt_rq *rt_rq = tg->rt_rq[i]; | 10301 | struct rt_rq *rt_rq = tg->rt_rq[i]; |
10299 | 10302 | ||
10300 | spin_lock(&rt_rq->rt_runtime_lock); | 10303 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
10301 | rt_rq->rt_runtime = rt_runtime; | 10304 | rt_rq->rt_runtime = rt_runtime; |
10302 | spin_unlock(&rt_rq->rt_runtime_lock); | 10305 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
10303 | } | 10306 | } |
10304 | spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); | 10307 | raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
10305 | unlock: | 10308 | unlock: |
10306 | read_unlock(&tasklist_lock); | 10309 | read_unlock(&tasklist_lock); |
10307 | mutex_unlock(&rt_constraints_mutex); | 10310 | mutex_unlock(&rt_constraints_mutex); |
@@ -10406,15 +10409,15 @@ static int sched_rt_global_constraints(void) | |||
10406 | if (sysctl_sched_rt_runtime == 0) | 10409 | if (sysctl_sched_rt_runtime == 0) |
10407 | return -EBUSY; | 10410 | return -EBUSY; |
10408 | 10411 | ||
10409 | spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); | 10412 | raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); |
10410 | for_each_possible_cpu(i) { | 10413 | for_each_possible_cpu(i) { |
10411 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; | 10414 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; |
10412 | 10415 | ||
10413 | spin_lock(&rt_rq->rt_runtime_lock); | 10416 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
10414 | rt_rq->rt_runtime = global_rt_runtime(); | 10417 | rt_rq->rt_runtime = global_rt_runtime(); |
10415 | spin_unlock(&rt_rq->rt_runtime_lock); | 10418 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
10416 | } | 10419 | } |
10417 | spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); | 10420 | raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); |
10418 | 10421 | ||
10419 | return 0; | 10422 | return 0; |
10420 | } | 10423 | } |
@@ -10705,9 +10708,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) | |||
10705 | /* | 10708 | /* |
10706 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. | 10709 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. |
10707 | */ | 10710 | */ |
10708 | spin_lock_irq(&cpu_rq(cpu)->lock); | 10711 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
10709 | data = *cpuusage; | 10712 | data = *cpuusage; |
10710 | spin_unlock_irq(&cpu_rq(cpu)->lock); | 10713 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
10711 | #else | 10714 | #else |
10712 | data = *cpuusage; | 10715 | data = *cpuusage; |
10713 | #endif | 10716 | #endif |
@@ -10723,9 +10726,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) | |||
10723 | /* | 10726 | /* |
10724 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. | 10727 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. |
10725 | */ | 10728 | */ |
10726 | spin_lock_irq(&cpu_rq(cpu)->lock); | 10729 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
10727 | *cpuusage = val; | 10730 | *cpuusage = val; |
10728 | spin_unlock_irq(&cpu_rq(cpu)->lock); | 10731 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
10729 | #else | 10732 | #else |
10730 | *cpuusage = val; | 10733 | *cpuusage = val; |
10731 | #endif | 10734 | #endif |
@@ -10959,9 +10962,9 @@ void synchronize_sched_expedited(void) | |||
10959 | init_completion(&req->done); | 10962 | init_completion(&req->done); |
10960 | req->task = NULL; | 10963 | req->task = NULL; |
10961 | req->dest_cpu = RCU_MIGRATION_NEED_QS; | 10964 | req->dest_cpu = RCU_MIGRATION_NEED_QS; |
10962 | spin_lock_irqsave(&rq->lock, flags); | 10965 | raw_spin_lock_irqsave(&rq->lock, flags); |
10963 | list_add(&req->list, &rq->migration_queue); | 10966 | list_add(&req->list, &rq->migration_queue); |
10964 | spin_unlock_irqrestore(&rq->lock, flags); | 10967 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
10965 | wake_up_process(rq->migration_thread); | 10968 | wake_up_process(rq->migration_thread); |
10966 | } | 10969 | } |
10967 | for_each_online_cpu(cpu) { | 10970 | for_each_online_cpu(cpu) { |
@@ -10969,11 +10972,11 @@ void synchronize_sched_expedited(void) | |||
10969 | req = &per_cpu(rcu_migration_req, cpu); | 10972 | req = &per_cpu(rcu_migration_req, cpu); |
10970 | rq = cpu_rq(cpu); | 10973 | rq = cpu_rq(cpu); |
10971 | wait_for_completion(&req->done); | 10974 | wait_for_completion(&req->done); |
10972 | spin_lock_irqsave(&rq->lock, flags); | 10975 | raw_spin_lock_irqsave(&rq->lock, flags); |
10973 | if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) | 10976 | if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) |
10974 | need_full_sync = 1; | 10977 | need_full_sync = 1; |
10975 | req->dest_cpu = RCU_MIGRATION_IDLE; | 10978 | req->dest_cpu = RCU_MIGRATION_IDLE; |
10976 | spin_unlock_irqrestore(&rq->lock, flags); | 10979 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
10977 | } | 10980 | } |
10978 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; | 10981 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; |
10979 | synchronize_sched_expedited_count++; | 10982 | synchronize_sched_expedited_count++; |