aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c531
1 files changed, 279 insertions, 252 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index fd05861b2111..4508fe7048be 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -141,7 +141,7 @@ struct rt_prio_array {
141 141
142struct rt_bandwidth { 142struct rt_bandwidth {
143 /* nests inside the rq lock: */ 143 /* nests inside the rq lock: */
144 spinlock_t rt_runtime_lock; 144 raw_spinlock_t rt_runtime_lock;
145 ktime_t rt_period; 145 ktime_t rt_period;
146 u64 rt_runtime; 146 u64 rt_runtime;
147 struct hrtimer rt_period_timer; 147 struct hrtimer rt_period_timer;
@@ -178,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
178 rt_b->rt_period = ns_to_ktime(period); 178 rt_b->rt_period = ns_to_ktime(period);
179 rt_b->rt_runtime = runtime; 179 rt_b->rt_runtime = runtime;
180 180
181 spin_lock_init(&rt_b->rt_runtime_lock); 181 raw_spin_lock_init(&rt_b->rt_runtime_lock);
182 182
183 hrtimer_init(&rt_b->rt_period_timer, 183 hrtimer_init(&rt_b->rt_period_timer,
184 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 184 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -200,7 +200,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
200 if (hrtimer_active(&rt_b->rt_period_timer)) 200 if (hrtimer_active(&rt_b->rt_period_timer))
201 return; 201 return;
202 202
203 spin_lock(&rt_b->rt_runtime_lock); 203 raw_spin_lock(&rt_b->rt_runtime_lock);
204 for (;;) { 204 for (;;) {
205 unsigned long delta; 205 unsigned long delta;
206 ktime_t soft, hard; 206 ktime_t soft, hard;
@@ -217,7 +217,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
217 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, 217 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
218 HRTIMER_MODE_ABS_PINNED, 0); 218 HRTIMER_MODE_ABS_PINNED, 0);
219 } 219 }
220 spin_unlock(&rt_b->rt_runtime_lock); 220 raw_spin_unlock(&rt_b->rt_runtime_lock);
221} 221}
222 222
223#ifdef CONFIG_RT_GROUP_SCHED 223#ifdef CONFIG_RT_GROUP_SCHED
@@ -470,7 +470,7 @@ struct rt_rq {
470 u64 rt_time; 470 u64 rt_time;
471 u64 rt_runtime; 471 u64 rt_runtime;
472 /* Nests inside the rq lock: */ 472 /* Nests inside the rq lock: */
473 spinlock_t rt_runtime_lock; 473 raw_spinlock_t rt_runtime_lock;
474 474
475#ifdef CONFIG_RT_GROUP_SCHED 475#ifdef CONFIG_RT_GROUP_SCHED
476 unsigned long rt_nr_boosted; 476 unsigned long rt_nr_boosted;
@@ -525,7 +525,7 @@ static struct root_domain def_root_domain;
525 */ 525 */
526struct rq { 526struct rq {
527 /* runqueue lock: */ 527 /* runqueue lock: */
528 spinlock_t lock; 528 raw_spinlock_t lock;
529 529
530 /* 530 /*
531 * nr_running and cpu_load should be in the same cacheline because 531 * nr_running and cpu_load should be in the same cacheline because
@@ -685,7 +685,7 @@ inline void update_rq_clock(struct rq *rq)
685 */ 685 */
686int runqueue_is_locked(int cpu) 686int runqueue_is_locked(int cpu)
687{ 687{
688 return spin_is_locked(&cpu_rq(cpu)->lock); 688 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
689} 689}
690 690
691/* 691/*
@@ -893,7 +893,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
893 */ 893 */
894 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 894 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
895 895
896 spin_unlock_irq(&rq->lock); 896 raw_spin_unlock_irq(&rq->lock);
897} 897}
898 898
899#else /* __ARCH_WANT_UNLOCKED_CTXSW */ 899#else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -917,9 +917,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
917 next->oncpu = 1; 917 next->oncpu = 1;
918#endif 918#endif
919#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 919#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
920 spin_unlock_irq(&rq->lock); 920 raw_spin_unlock_irq(&rq->lock);
921#else 921#else
922 spin_unlock(&rq->lock); 922 raw_spin_unlock(&rq->lock);
923#endif 923#endif
924} 924}
925 925
@@ -949,10 +949,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
949{ 949{
950 for (;;) { 950 for (;;) {
951 struct rq *rq = task_rq(p); 951 struct rq *rq = task_rq(p);
952 spin_lock(&rq->lock); 952 raw_spin_lock(&rq->lock);
953 if (likely(rq == task_rq(p))) 953 if (likely(rq == task_rq(p)))
954 return rq; 954 return rq;
955 spin_unlock(&rq->lock); 955 raw_spin_unlock(&rq->lock);
956 } 956 }
957} 957}
958 958
@@ -969,10 +969,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
969 for (;;) { 969 for (;;) {
970 local_irq_save(*flags); 970 local_irq_save(*flags);
971 rq = task_rq(p); 971 rq = task_rq(p);
972 spin_lock(&rq->lock); 972 raw_spin_lock(&rq->lock);
973 if (likely(rq == task_rq(p))) 973 if (likely(rq == task_rq(p)))
974 return rq; 974 return rq;
975 spin_unlock_irqrestore(&rq->lock, *flags); 975 raw_spin_unlock_irqrestore(&rq->lock, *flags);
976 } 976 }
977} 977}
978 978
@@ -981,19 +981,19 @@ void task_rq_unlock_wait(struct task_struct *p)
981 struct rq *rq = task_rq(p); 981 struct rq *rq = task_rq(p);
982 982
983 smp_mb(); /* spin-unlock-wait is not a full memory barrier */ 983 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
984 spin_unlock_wait(&rq->lock); 984 raw_spin_unlock_wait(&rq->lock);
985} 985}
986 986
987static void __task_rq_unlock(struct rq *rq) 987static void __task_rq_unlock(struct rq *rq)
988 __releases(rq->lock) 988 __releases(rq->lock)
989{ 989{
990 spin_unlock(&rq->lock); 990 raw_spin_unlock(&rq->lock);
991} 991}
992 992
993static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) 993static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
994 __releases(rq->lock) 994 __releases(rq->lock)
995{ 995{
996 spin_unlock_irqrestore(&rq->lock, *flags); 996 raw_spin_unlock_irqrestore(&rq->lock, *flags);
997} 997}
998 998
999/* 999/*
@@ -1006,7 +1006,7 @@ static struct rq *this_rq_lock(void)
1006 1006
1007 local_irq_disable(); 1007 local_irq_disable();
1008 rq = this_rq(); 1008 rq = this_rq();
1009 spin_lock(&rq->lock); 1009 raw_spin_lock(&rq->lock);
1010 1010
1011 return rq; 1011 return rq;
1012} 1012}
@@ -1053,10 +1053,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1053 1053
1054 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 1054 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1055 1055
1056 spin_lock(&rq->lock); 1056 raw_spin_lock(&rq->lock);
1057 update_rq_clock(rq); 1057 update_rq_clock(rq);
1058 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 1058 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
1059 spin_unlock(&rq->lock); 1059 raw_spin_unlock(&rq->lock);
1060 1060
1061 return HRTIMER_NORESTART; 1061 return HRTIMER_NORESTART;
1062} 1062}
@@ -1069,10 +1069,10 @@ static void __hrtick_start(void *arg)
1069{ 1069{
1070 struct rq *rq = arg; 1070 struct rq *rq = arg;
1071 1071
1072 spin_lock(&rq->lock); 1072 raw_spin_lock(&rq->lock);
1073 hrtimer_restart(&rq->hrtick_timer); 1073 hrtimer_restart(&rq->hrtick_timer);
1074 rq->hrtick_csd_pending = 0; 1074 rq->hrtick_csd_pending = 0;
1075 spin_unlock(&rq->lock); 1075 raw_spin_unlock(&rq->lock);
1076} 1076}
1077 1077
1078/* 1078/*
@@ -1179,7 +1179,7 @@ static void resched_task(struct task_struct *p)
1179{ 1179{
1180 int cpu; 1180 int cpu;
1181 1181
1182 assert_spin_locked(&task_rq(p)->lock); 1182 assert_raw_spin_locked(&task_rq(p)->lock);
1183 1183
1184 if (test_tsk_need_resched(p)) 1184 if (test_tsk_need_resched(p))
1185 return; 1185 return;
@@ -1201,10 +1201,10 @@ static void resched_cpu(int cpu)
1201 struct rq *rq = cpu_rq(cpu); 1201 struct rq *rq = cpu_rq(cpu);
1202 unsigned long flags; 1202 unsigned long flags;
1203 1203
1204 if (!spin_trylock_irqsave(&rq->lock, flags)) 1204 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
1205 return; 1205 return;
1206 resched_task(cpu_curr(cpu)); 1206 resched_task(cpu_curr(cpu));
1207 spin_unlock_irqrestore(&rq->lock, flags); 1207 raw_spin_unlock_irqrestore(&rq->lock, flags);
1208} 1208}
1209 1209
1210#ifdef CONFIG_NO_HZ 1210#ifdef CONFIG_NO_HZ
@@ -1273,7 +1273,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1273#else /* !CONFIG_SMP */ 1273#else /* !CONFIG_SMP */
1274static void resched_task(struct task_struct *p) 1274static void resched_task(struct task_struct *p)
1275{ 1275{
1276 assert_spin_locked(&task_rq(p)->lock); 1276 assert_raw_spin_locked(&task_rq(p)->lock);
1277 set_tsk_need_resched(p); 1277 set_tsk_need_resched(p);
1278} 1278}
1279 1279
@@ -1600,11 +1600,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
1600 struct rq *rq = cpu_rq(cpu); 1600 struct rq *rq = cpu_rq(cpu);
1601 unsigned long flags; 1601 unsigned long flags;
1602 1602
1603 spin_lock_irqsave(&rq->lock, flags); 1603 raw_spin_lock_irqsave(&rq->lock, flags);
1604 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; 1604 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
1605 tg->cfs_rq[cpu]->shares = boost ? 0 : shares; 1605 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
1606 __set_se_shares(tg->se[cpu], shares); 1606 __set_se_shares(tg->se[cpu], shares);
1607 spin_unlock_irqrestore(&rq->lock, flags); 1607 raw_spin_unlock_irqrestore(&rq->lock, flags);
1608 } 1608 }
1609} 1609}
1610 1610
@@ -1706,9 +1706,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1706 if (root_task_group_empty()) 1706 if (root_task_group_empty())
1707 return; 1707 return;
1708 1708
1709 spin_unlock(&rq->lock); 1709 raw_spin_unlock(&rq->lock);
1710 update_shares(sd); 1710 update_shares(sd);
1711 spin_lock(&rq->lock); 1711 raw_spin_lock(&rq->lock);
1712} 1712}
1713 1713
1714static void update_h_load(long cpu) 1714static void update_h_load(long cpu)
@@ -1748,7 +1748,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1748 __acquires(busiest->lock) 1748 __acquires(busiest->lock)
1749 __acquires(this_rq->lock) 1749 __acquires(this_rq->lock)
1750{ 1750{
1751 spin_unlock(&this_rq->lock); 1751 raw_spin_unlock(&this_rq->lock);
1752 double_rq_lock(this_rq, busiest); 1752 double_rq_lock(this_rq, busiest);
1753 1753
1754 return 1; 1754 return 1;
@@ -1769,14 +1769,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1769{ 1769{
1770 int ret = 0; 1770 int ret = 0;
1771 1771
1772 if (unlikely(!spin_trylock(&busiest->lock))) { 1772 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1773 if (busiest < this_rq) { 1773 if (busiest < this_rq) {
1774 spin_unlock(&this_rq->lock); 1774 raw_spin_unlock(&this_rq->lock);
1775 spin_lock(&busiest->lock); 1775 raw_spin_lock(&busiest->lock);
1776 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); 1776 raw_spin_lock_nested(&this_rq->lock,
1777 SINGLE_DEPTH_NESTING);
1777 ret = 1; 1778 ret = 1;
1778 } else 1779 } else
1779 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); 1780 raw_spin_lock_nested(&busiest->lock,
1781 SINGLE_DEPTH_NESTING);
1780 } 1782 }
1781 return ret; 1783 return ret;
1782} 1784}
@@ -1790,7 +1792,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1790{ 1792{
1791 if (unlikely(!irqs_disabled())) { 1793 if (unlikely(!irqs_disabled())) {
1792 /* printk() doesn't work good under rq->lock */ 1794 /* printk() doesn't work good under rq->lock */
1793 spin_unlock(&this_rq->lock); 1795 raw_spin_unlock(&this_rq->lock);
1794 BUG_ON(1); 1796 BUG_ON(1);
1795 } 1797 }
1796 1798
@@ -1800,7 +1802,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1800static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1802static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1801 __releases(busiest->lock) 1803 __releases(busiest->lock)
1802{ 1804{
1803 spin_unlock(&busiest->lock); 1805 raw_spin_unlock(&busiest->lock);
1804 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1806 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1805} 1807}
1806#endif 1808#endif
@@ -2000,39 +2002,6 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2000 p->sched_class->prio_changed(rq, p, oldprio, running); 2002 p->sched_class->prio_changed(rq, p, oldprio, running);
2001} 2003}
2002 2004
2003/**
2004 * kthread_bind - bind a just-created kthread to a cpu.
2005 * @p: thread created by kthread_create().
2006 * @cpu: cpu (might not be online, must be possible) for @k to run on.
2007 *
2008 * Description: This function is equivalent to set_cpus_allowed(),
2009 * except that @cpu doesn't need to be online, and the thread must be
2010 * stopped (i.e., just returned from kthread_create()).
2011 *
2012 * Function lives here instead of kthread.c because it messes with
2013 * scheduler internals which require locking.
2014 */
2015void kthread_bind(struct task_struct *p, unsigned int cpu)
2016{
2017 struct rq *rq = cpu_rq(cpu);
2018 unsigned long flags;
2019
2020 /* Must have done schedule() in kthread() before we set_task_cpu */
2021 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
2022 WARN_ON(1);
2023 return;
2024 }
2025
2026 spin_lock_irqsave(&rq->lock, flags);
2027 update_rq_clock(rq);
2028 set_task_cpu(p, cpu);
2029 p->cpus_allowed = cpumask_of_cpu(cpu);
2030 p->rt.nr_cpus_allowed = 1;
2031 p->flags |= PF_THREAD_BOUND;
2032 spin_unlock_irqrestore(&rq->lock, flags);
2033}
2034EXPORT_SYMBOL(kthread_bind);
2035
2036#ifdef CONFIG_SMP 2005#ifdef CONFIG_SMP
2037/* 2006/*
2038 * Is this task likely cache-hot: 2007 * Is this task likely cache-hot:
@@ -2042,6 +2011,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2042{ 2011{
2043 s64 delta; 2012 s64 delta;
2044 2013
2014 if (p->sched_class != &fair_sched_class)
2015 return 0;
2016
2045 /* 2017 /*
2046 * Buddy candidates are cache hot: 2018 * Buddy candidates are cache hot:
2047 */ 2019 */
@@ -2050,9 +2022,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2050 &p->se == cfs_rq_of(&p->se)->last)) 2022 &p->se == cfs_rq_of(&p->se)->last))
2051 return 1; 2023 return 1;
2052 2024
2053 if (p->sched_class != &fair_sched_class)
2054 return 0;
2055
2056 if (sysctl_sched_migration_cost == -1) 2025 if (sysctl_sched_migration_cost == -1)
2057 return 1; 2026 return 1;
2058 if (sysctl_sched_migration_cost == 0) 2027 if (sysctl_sched_migration_cost == 0)
@@ -2063,22 +2032,23 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2063 return delta < (s64)sysctl_sched_migration_cost; 2032 return delta < (s64)sysctl_sched_migration_cost;
2064} 2033}
2065 2034
2066
2067void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 2035void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2068{ 2036{
2069 int old_cpu = task_cpu(p); 2037#ifdef CONFIG_SCHED_DEBUG
2070 struct cfs_rq *old_cfsrq = task_cfs_rq(p), 2038 /*
2071 *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu); 2039 * We should never call set_task_cpu() on a blocked task,
2040 * ttwu() will sort out the placement.
2041 */
2042 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2043 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2044#endif
2072 2045
2073 trace_sched_migrate_task(p, new_cpu); 2046 trace_sched_migrate_task(p, new_cpu);
2074 2047
2075 if (old_cpu != new_cpu) { 2048 if (task_cpu(p) != new_cpu) {
2076 p->se.nr_migrations++; 2049 p->se.nr_migrations++;
2077 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 2050 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
2078 1, 1, NULL, 0);
2079 } 2051 }
2080 p->se.vruntime -= old_cfsrq->min_vruntime -
2081 new_cfsrq->min_vruntime;
2082 2052
2083 __set_task_cpu(p, new_cpu); 2053 __set_task_cpu(p, new_cpu);
2084} 2054}
@@ -2103,13 +2073,10 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2103 2073
2104 /* 2074 /*
2105 * If the task is not on a runqueue (and not running), then 2075 * If the task is not on a runqueue (and not running), then
2106 * it is sufficient to simply update the task's cpu field. 2076 * the next wake-up will properly place the task.
2107 */ 2077 */
2108 if (!p->se.on_rq && !task_running(rq, p)) { 2078 if (!p->se.on_rq && !task_running(rq, p))
2109 update_rq_clock(rq);
2110 set_task_cpu(p, dest_cpu);
2111 return 0; 2079 return 0;
2112 }
2113 2080
2114 init_completion(&req->done); 2081 init_completion(&req->done);
2115 req->task = p; 2082 req->task = p;
@@ -2315,10 +2282,73 @@ void task_oncpu_function_call(struct task_struct *p,
2315} 2282}
2316 2283
2317#ifdef CONFIG_SMP 2284#ifdef CONFIG_SMP
2285static int select_fallback_rq(int cpu, struct task_struct *p)
2286{
2287 int dest_cpu;
2288 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2289
2290 /* Look for allowed, online CPU in same node. */
2291 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2292 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2293 return dest_cpu;
2294
2295 /* Any allowed, online CPU? */
2296 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2297 if (dest_cpu < nr_cpu_ids)
2298 return dest_cpu;
2299
2300 /* No more Mr. Nice Guy. */
2301 if (dest_cpu >= nr_cpu_ids) {
2302 rcu_read_lock();
2303 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
2304 rcu_read_unlock();
2305 dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
2306
2307 /*
2308 * Don't tell them about moving exiting tasks or
2309 * kernel threads (both mm NULL), since they never
2310 * leave kernel.
2311 */
2312 if (p->mm && printk_ratelimit()) {
2313 printk(KERN_INFO "process %d (%s) no "
2314 "longer affine to cpu%d\n",
2315 task_pid_nr(p), p->comm, cpu);
2316 }
2317 }
2318
2319 return dest_cpu;
2320}
2321
2322/*
2323 * Called from:
2324 *
2325 * - fork, @p is stable because it isn't on the tasklist yet
2326 *
2327 * - exec, @p is unstable, retry loop
2328 *
2329 * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so
2330 * we should be good.
2331 */
2318static inline 2332static inline
2319int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) 2333int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
2320{ 2334{
2321 return p->sched_class->select_task_rq(p, sd_flags, wake_flags); 2335 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
2336
2337 /*
2338 * In order not to call set_task_cpu() on a blocking task we need
2339 * to rely on ttwu() to place the task on a valid ->cpus_allowed
2340 * cpu.
2341 *
2342 * Since this is common to all placement strategies, this lives here.
2343 *
2344 * [ this allows ->select_task() to simply return task_cpu(p) and
2345 * not worry about this generic constraint ]
2346 */
2347 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
2348 !cpu_online(cpu)))
2349 cpu = select_fallback_rq(task_cpu(p), p);
2350
2351 return cpu;
2322} 2352}
2323#endif 2353#endif
2324 2354
@@ -2373,6 +2403,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2373 if (task_contributes_to_load(p)) 2403 if (task_contributes_to_load(p))
2374 rq->nr_uninterruptible--; 2404 rq->nr_uninterruptible--;
2375 p->state = TASK_WAKING; 2405 p->state = TASK_WAKING;
2406
2407 if (p->sched_class->task_waking)
2408 p->sched_class->task_waking(rq, p);
2409
2376 __task_rq_unlock(rq); 2410 __task_rq_unlock(rq);
2377 2411
2378 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); 2412 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
@@ -2436,8 +2470,8 @@ out_running:
2436 2470
2437 p->state = TASK_RUNNING; 2471 p->state = TASK_RUNNING;
2438#ifdef CONFIG_SMP 2472#ifdef CONFIG_SMP
2439 if (p->sched_class->task_wake_up) 2473 if (p->sched_class->task_woken)
2440 p->sched_class->task_wake_up(rq, p); 2474 p->sched_class->task_woken(rq, p);
2441 2475
2442 if (unlikely(rq->idle_stamp)) { 2476 if (unlikely(rq->idle_stamp)) {
2443 u64 delta = rq->clock - rq->idle_stamp; 2477 u64 delta = rq->clock - rq->idle_stamp;
@@ -2536,14 +2570,6 @@ static void __sched_fork(struct task_struct *p)
2536#ifdef CONFIG_PREEMPT_NOTIFIERS 2570#ifdef CONFIG_PREEMPT_NOTIFIERS
2537 INIT_HLIST_HEAD(&p->preempt_notifiers); 2571 INIT_HLIST_HEAD(&p->preempt_notifiers);
2538#endif 2572#endif
2539
2540 /*
2541 * We mark the process as running here, but have not actually
2542 * inserted it onto the runqueue yet. This guarantees that
2543 * nobody will actually run it, and a signal or other external
2544 * event cannot wake it up and insert it on the runqueue either.
2545 */
2546 p->state = TASK_RUNNING;
2547} 2573}
2548 2574
2549/* 2575/*
@@ -2554,6 +2580,12 @@ void sched_fork(struct task_struct *p, int clone_flags)
2554 int cpu = get_cpu(); 2580 int cpu = get_cpu();
2555 2581
2556 __sched_fork(p); 2582 __sched_fork(p);
2583 /*
2584 * We mark the process as waking here. This guarantees that
2585 * nobody will actually run it, and a signal or other external
2586 * event cannot wake it up and insert it on the runqueue either.
2587 */
2588 p->state = TASK_WAKING;
2557 2589
2558 /* 2590 /*
2559 * Revert to default priority/policy on fork if requested. 2591 * Revert to default priority/policy on fork if requested.
@@ -2622,14 +2654,15 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2622 struct rq *rq; 2654 struct rq *rq;
2623 2655
2624 rq = task_rq_lock(p, &flags); 2656 rq = task_rq_lock(p, &flags);
2625 BUG_ON(p->state != TASK_RUNNING); 2657 BUG_ON(p->state != TASK_WAKING);
2658 p->state = TASK_RUNNING;
2626 update_rq_clock(rq); 2659 update_rq_clock(rq);
2627 activate_task(rq, p, 0); 2660 activate_task(rq, p, 0);
2628 trace_sched_wakeup_new(rq, p, 1); 2661 trace_sched_wakeup_new(rq, p, 1);
2629 check_preempt_curr(rq, p, WF_FORK); 2662 check_preempt_curr(rq, p, WF_FORK);
2630#ifdef CONFIG_SMP 2663#ifdef CONFIG_SMP
2631 if (p->sched_class->task_wake_up) 2664 if (p->sched_class->task_woken)
2632 p->sched_class->task_wake_up(rq, p); 2665 p->sched_class->task_woken(rq, p);
2633#endif 2666#endif
2634 task_rq_unlock(rq, &flags); 2667 task_rq_unlock(rq, &flags);
2635} 2668}
@@ -2781,10 +2814,10 @@ static inline void post_schedule(struct rq *rq)
2781 if (rq->post_schedule) { 2814 if (rq->post_schedule) {
2782 unsigned long flags; 2815 unsigned long flags;
2783 2816
2784 spin_lock_irqsave(&rq->lock, flags); 2817 raw_spin_lock_irqsave(&rq->lock, flags);
2785 if (rq->curr->sched_class->post_schedule) 2818 if (rq->curr->sched_class->post_schedule)
2786 rq->curr->sched_class->post_schedule(rq); 2819 rq->curr->sched_class->post_schedule(rq);
2787 spin_unlock_irqrestore(&rq->lock, flags); 2820 raw_spin_unlock_irqrestore(&rq->lock, flags);
2788 2821
2789 rq->post_schedule = 0; 2822 rq->post_schedule = 0;
2790 } 2823 }
@@ -3066,15 +3099,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
3066{ 3099{
3067 BUG_ON(!irqs_disabled()); 3100 BUG_ON(!irqs_disabled());
3068 if (rq1 == rq2) { 3101 if (rq1 == rq2) {
3069 spin_lock(&rq1->lock); 3102 raw_spin_lock(&rq1->lock);
3070 __acquire(rq2->lock); /* Fake it out ;) */ 3103 __acquire(rq2->lock); /* Fake it out ;) */
3071 } else { 3104 } else {
3072 if (rq1 < rq2) { 3105 if (rq1 < rq2) {
3073 spin_lock(&rq1->lock); 3106 raw_spin_lock(&rq1->lock);
3074 spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 3107 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
3075 } else { 3108 } else {
3076 spin_lock(&rq2->lock); 3109 raw_spin_lock(&rq2->lock);
3077 spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 3110 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
3078 } 3111 }
3079 } 3112 }
3080 update_rq_clock(rq1); 3113 update_rq_clock(rq1);
@@ -3091,29 +3124,44 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
3091 __releases(rq1->lock) 3124 __releases(rq1->lock)
3092 __releases(rq2->lock) 3125 __releases(rq2->lock)
3093{ 3126{
3094 spin_unlock(&rq1->lock); 3127 raw_spin_unlock(&rq1->lock);
3095 if (rq1 != rq2) 3128 if (rq1 != rq2)
3096 spin_unlock(&rq2->lock); 3129 raw_spin_unlock(&rq2->lock);
3097 else 3130 else
3098 __release(rq2->lock); 3131 __release(rq2->lock);
3099} 3132}
3100 3133
3101/* 3134/*
3102 * If dest_cpu is allowed for this process, migrate the task to it. 3135 * sched_exec - execve() is a valuable balancing opportunity, because at
3103 * This is accomplished by forcing the cpu_allowed mask to only 3136 * this point the task has the smallest effective memory and cache footprint.
3104 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
3105 * the cpu_allowed mask is restored.
3106 */ 3137 */
3107static void sched_migrate_task(struct task_struct *p, int dest_cpu) 3138void sched_exec(void)
3108{ 3139{
3140 struct task_struct *p = current;
3109 struct migration_req req; 3141 struct migration_req req;
3142 int dest_cpu, this_cpu;
3110 unsigned long flags; 3143 unsigned long flags;
3111 struct rq *rq; 3144 struct rq *rq;
3112 3145
3146again:
3147 this_cpu = get_cpu();
3148 dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
3149 if (dest_cpu == this_cpu) {
3150 put_cpu();
3151 return;
3152 }
3153
3113 rq = task_rq_lock(p, &flags); 3154 rq = task_rq_lock(p, &flags);
3155 put_cpu();
3156
3157 /*
3158 * select_task_rq() can race against ->cpus_allowed
3159 */
3114 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) 3160 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
3115 || unlikely(!cpu_active(dest_cpu))) 3161 || unlikely(!cpu_active(dest_cpu))) {
3116 goto out; 3162 task_rq_unlock(rq, &flags);
3163 goto again;
3164 }
3117 3165
3118 /* force the process onto the specified CPU */ 3166 /* force the process onto the specified CPU */
3119 if (migrate_task(p, dest_cpu, &req)) { 3167 if (migrate_task(p, dest_cpu, &req)) {
@@ -3128,24 +3176,10 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
3128 3176
3129 return; 3177 return;
3130 } 3178 }
3131out:
3132 task_rq_unlock(rq, &flags); 3179 task_rq_unlock(rq, &flags);
3133} 3180}
3134 3181
3135/* 3182/*
3136 * sched_exec - execve() is a valuable balancing opportunity, because at
3137 * this point the task has the smallest effective memory and cache footprint.
3138 */
3139void sched_exec(void)
3140{
3141 int new_cpu, this_cpu = get_cpu();
3142 new_cpu = select_task_rq(current, SD_BALANCE_EXEC, 0);
3143 put_cpu();
3144 if (new_cpu != this_cpu)
3145 sched_migrate_task(current, new_cpu);
3146}
3147
3148/*
3149 * pull_task - move a task from a remote runqueue to the local runqueue. 3183 * pull_task - move a task from a remote runqueue to the local runqueue.
3150 * Both runqueues must be locked. 3184 * Both runqueues must be locked.
3151 */ 3185 */
@@ -4186,14 +4220,15 @@ redo:
4186 4220
4187 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { 4221 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
4188 4222
4189 spin_lock_irqsave(&busiest->lock, flags); 4223 raw_spin_lock_irqsave(&busiest->lock, flags);
4190 4224
4191 /* don't kick the migration_thread, if the curr 4225 /* don't kick the migration_thread, if the curr
4192 * task on busiest cpu can't be moved to this_cpu 4226 * task on busiest cpu can't be moved to this_cpu
4193 */ 4227 */
4194 if (!cpumask_test_cpu(this_cpu, 4228 if (!cpumask_test_cpu(this_cpu,
4195 &busiest->curr->cpus_allowed)) { 4229 &busiest->curr->cpus_allowed)) {
4196 spin_unlock_irqrestore(&busiest->lock, flags); 4230 raw_spin_unlock_irqrestore(&busiest->lock,
4231 flags);
4197 all_pinned = 1; 4232 all_pinned = 1;
4198 goto out_one_pinned; 4233 goto out_one_pinned;
4199 } 4234 }
@@ -4203,7 +4238,7 @@ redo:
4203 busiest->push_cpu = this_cpu; 4238 busiest->push_cpu = this_cpu;
4204 active_balance = 1; 4239 active_balance = 1;
4205 } 4240 }
4206 spin_unlock_irqrestore(&busiest->lock, flags); 4241 raw_spin_unlock_irqrestore(&busiest->lock, flags);
4207 if (active_balance) 4242 if (active_balance)
4208 wake_up_process(busiest->migration_thread); 4243 wake_up_process(busiest->migration_thread);
4209 4244
@@ -4385,10 +4420,10 @@ redo:
4385 /* 4420 /*
4386 * Should not call ttwu while holding a rq->lock 4421 * Should not call ttwu while holding a rq->lock
4387 */ 4422 */
4388 spin_unlock(&this_rq->lock); 4423 raw_spin_unlock(&this_rq->lock);
4389 if (active_balance) 4424 if (active_balance)
4390 wake_up_process(busiest->migration_thread); 4425 wake_up_process(busiest->migration_thread);
4391 spin_lock(&this_rq->lock); 4426 raw_spin_lock(&this_rq->lock);
4392 4427
4393 } else 4428 } else
4394 sd->nr_balance_failed = 0; 4429 sd->nr_balance_failed = 0;
@@ -5257,11 +5292,11 @@ void scheduler_tick(void)
5257 5292
5258 sched_clock_tick(); 5293 sched_clock_tick();
5259 5294
5260 spin_lock(&rq->lock); 5295 raw_spin_lock(&rq->lock);
5261 update_rq_clock(rq); 5296 update_rq_clock(rq);
5262 update_cpu_load(rq); 5297 update_cpu_load(rq);
5263 curr->sched_class->task_tick(rq, curr, 0); 5298 curr->sched_class->task_tick(rq, curr, 0);
5264 spin_unlock(&rq->lock); 5299 raw_spin_unlock(&rq->lock);
5265 5300
5266 perf_event_task_tick(curr, cpu); 5301 perf_event_task_tick(curr, cpu);
5267 5302
@@ -5455,7 +5490,7 @@ need_resched_nonpreemptible:
5455 if (sched_feat(HRTICK)) 5490 if (sched_feat(HRTICK))
5456 hrtick_clear(rq); 5491 hrtick_clear(rq);
5457 5492
5458 spin_lock_irq(&rq->lock); 5493 raw_spin_lock_irq(&rq->lock);
5459 update_rq_clock(rq); 5494 update_rq_clock(rq);
5460 clear_tsk_need_resched(prev); 5495 clear_tsk_need_resched(prev);
5461 5496
@@ -5491,12 +5526,15 @@ need_resched_nonpreemptible:
5491 cpu = smp_processor_id(); 5526 cpu = smp_processor_id();
5492 rq = cpu_rq(cpu); 5527 rq = cpu_rq(cpu);
5493 } else 5528 } else
5494 spin_unlock_irq(&rq->lock); 5529 raw_spin_unlock_irq(&rq->lock);
5495 5530
5496 post_schedule(rq); 5531 post_schedule(rq);
5497 5532
5498 if (unlikely(reacquire_kernel_lock(current) < 0)) 5533 if (unlikely(reacquire_kernel_lock(current) < 0)) {
5534 prev = rq->curr;
5535 switch_count = &prev->nivcsw;
5499 goto need_resched_nonpreemptible; 5536 goto need_resched_nonpreemptible;
5537 }
5500 5538
5501 preempt_enable_no_resched(); 5539 preempt_enable_no_resched();
5502 if (need_resched()) 5540 if (need_resched())
@@ -5908,14 +5946,15 @@ EXPORT_SYMBOL(wait_for_completion_killable);
5908 */ 5946 */
5909bool try_wait_for_completion(struct completion *x) 5947bool try_wait_for_completion(struct completion *x)
5910{ 5948{
5949 unsigned long flags;
5911 int ret = 1; 5950 int ret = 1;
5912 5951
5913 spin_lock_irq(&x->wait.lock); 5952 spin_lock_irqsave(&x->wait.lock, flags);
5914 if (!x->done) 5953 if (!x->done)
5915 ret = 0; 5954 ret = 0;
5916 else 5955 else
5917 x->done--; 5956 x->done--;
5918 spin_unlock_irq(&x->wait.lock); 5957 spin_unlock_irqrestore(&x->wait.lock, flags);
5919 return ret; 5958 return ret;
5920} 5959}
5921EXPORT_SYMBOL(try_wait_for_completion); 5960EXPORT_SYMBOL(try_wait_for_completion);
@@ -5930,12 +5969,13 @@ EXPORT_SYMBOL(try_wait_for_completion);
5930 */ 5969 */
5931bool completion_done(struct completion *x) 5970bool completion_done(struct completion *x)
5932{ 5971{
5972 unsigned long flags;
5933 int ret = 1; 5973 int ret = 1;
5934 5974
5935 spin_lock_irq(&x->wait.lock); 5975 spin_lock_irqsave(&x->wait.lock, flags);
5936 if (!x->done) 5976 if (!x->done)
5937 ret = 0; 5977 ret = 0;
5938 spin_unlock_irq(&x->wait.lock); 5978 spin_unlock_irqrestore(&x->wait.lock, flags);
5939 return ret; 5979 return ret;
5940} 5980}
5941EXPORT_SYMBOL(completion_done); 5981EXPORT_SYMBOL(completion_done);
@@ -6320,7 +6360,7 @@ recheck:
6320 * make sure no PI-waiters arrive (or leave) while we are 6360 * make sure no PI-waiters arrive (or leave) while we are
6321 * changing the priority of the task: 6361 * changing the priority of the task:
6322 */ 6362 */
6323 spin_lock_irqsave(&p->pi_lock, flags); 6363 raw_spin_lock_irqsave(&p->pi_lock, flags);
6324 /* 6364 /*
6325 * To be able to change p->policy safely, the apropriate 6365 * To be able to change p->policy safely, the apropriate
6326 * runqueue lock must be held. 6366 * runqueue lock must be held.
@@ -6330,7 +6370,7 @@ recheck:
6330 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 6370 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
6331 policy = oldpolicy = -1; 6371 policy = oldpolicy = -1;
6332 __task_rq_unlock(rq); 6372 __task_rq_unlock(rq);
6333 spin_unlock_irqrestore(&p->pi_lock, flags); 6373 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
6334 goto recheck; 6374 goto recheck;
6335 } 6375 }
6336 update_rq_clock(rq); 6376 update_rq_clock(rq);
@@ -6354,7 +6394,7 @@ recheck:
6354 check_class_changed(rq, p, prev_class, oldprio, running); 6394 check_class_changed(rq, p, prev_class, oldprio, running);
6355 } 6395 }
6356 __task_rq_unlock(rq); 6396 __task_rq_unlock(rq);
6357 spin_unlock_irqrestore(&p->pi_lock, flags); 6397 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
6358 6398
6359 rt_mutex_adjust_pi(p); 6399 rt_mutex_adjust_pi(p);
6360 6400
@@ -6454,7 +6494,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
6454 return -EINVAL; 6494 return -EINVAL;
6455 6495
6456 retval = -ESRCH; 6496 retval = -ESRCH;
6457 read_lock(&tasklist_lock); 6497 rcu_read_lock();
6458 p = find_process_by_pid(pid); 6498 p = find_process_by_pid(pid);
6459 if (p) { 6499 if (p) {
6460 retval = security_task_getscheduler(p); 6500 retval = security_task_getscheduler(p);
@@ -6462,7 +6502,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
6462 retval = p->policy 6502 retval = p->policy
6463 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 6503 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
6464 } 6504 }
6465 read_unlock(&tasklist_lock); 6505 rcu_read_unlock();
6466 return retval; 6506 return retval;
6467} 6507}
6468 6508
@@ -6480,7 +6520,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6480 if (!param || pid < 0) 6520 if (!param || pid < 0)
6481 return -EINVAL; 6521 return -EINVAL;
6482 6522
6483 read_lock(&tasklist_lock); 6523 rcu_read_lock();
6484 p = find_process_by_pid(pid); 6524 p = find_process_by_pid(pid);
6485 retval = -ESRCH; 6525 retval = -ESRCH;
6486 if (!p) 6526 if (!p)
@@ -6491,7 +6531,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6491 goto out_unlock; 6531 goto out_unlock;
6492 6532
6493 lp.sched_priority = p->rt_priority; 6533 lp.sched_priority = p->rt_priority;
6494 read_unlock(&tasklist_lock); 6534 rcu_read_unlock();
6495 6535
6496 /* 6536 /*
6497 * This one might sleep, we cannot do it with a spinlock held ... 6537 * This one might sleep, we cannot do it with a spinlock held ...
@@ -6501,7 +6541,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6501 return retval; 6541 return retval;
6502 6542
6503out_unlock: 6543out_unlock:
6504 read_unlock(&tasklist_lock); 6544 rcu_read_unlock();
6505 return retval; 6545 return retval;
6506} 6546}
6507 6547
@@ -6512,22 +6552,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
6512 int retval; 6552 int retval;
6513 6553
6514 get_online_cpus(); 6554 get_online_cpus();
6515 read_lock(&tasklist_lock); 6555 rcu_read_lock();
6516 6556
6517 p = find_process_by_pid(pid); 6557 p = find_process_by_pid(pid);
6518 if (!p) { 6558 if (!p) {
6519 read_unlock(&tasklist_lock); 6559 rcu_read_unlock();
6520 put_online_cpus(); 6560 put_online_cpus();
6521 return -ESRCH; 6561 return -ESRCH;
6522 } 6562 }
6523 6563
6524 /* 6564 /* Prevent p going away */
6525 * It is not safe to call set_cpus_allowed with the
6526 * tasklist_lock held. We will bump the task_struct's
6527 * usage count and then drop tasklist_lock.
6528 */
6529 get_task_struct(p); 6565 get_task_struct(p);
6530 read_unlock(&tasklist_lock); 6566 rcu_read_unlock();
6531 6567
6532 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 6568 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
6533 retval = -ENOMEM; 6569 retval = -ENOMEM;
@@ -6613,7 +6649,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
6613 int retval; 6649 int retval;
6614 6650
6615 get_online_cpus(); 6651 get_online_cpus();
6616 read_lock(&tasklist_lock); 6652 rcu_read_lock();
6617 6653
6618 retval = -ESRCH; 6654 retval = -ESRCH;
6619 p = find_process_by_pid(pid); 6655 p = find_process_by_pid(pid);
@@ -6629,7 +6665,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
6629 task_rq_unlock(rq, &flags); 6665 task_rq_unlock(rq, &flags);
6630 6666
6631out_unlock: 6667out_unlock:
6632 read_unlock(&tasklist_lock); 6668 rcu_read_unlock();
6633 put_online_cpus(); 6669 put_online_cpus();
6634 6670
6635 return retval; 6671 return retval;
@@ -6684,7 +6720,7 @@ SYSCALL_DEFINE0(sched_yield)
6684 */ 6720 */
6685 __release(rq->lock); 6721 __release(rq->lock);
6686 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 6722 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
6687 _raw_spin_unlock(&rq->lock); 6723 do_raw_spin_unlock(&rq->lock);
6688 preempt_enable_no_resched(); 6724 preempt_enable_no_resched();
6689 6725
6690 schedule(); 6726 schedule();
@@ -6873,7 +6909,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6873 return -EINVAL; 6909 return -EINVAL;
6874 6910
6875 retval = -ESRCH; 6911 retval = -ESRCH;
6876 read_lock(&tasklist_lock); 6912 rcu_read_lock();
6877 p = find_process_by_pid(pid); 6913 p = find_process_by_pid(pid);
6878 if (!p) 6914 if (!p)
6879 goto out_unlock; 6915 goto out_unlock;
@@ -6886,13 +6922,13 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6886 time_slice = p->sched_class->get_rr_interval(rq, p); 6922 time_slice = p->sched_class->get_rr_interval(rq, p);
6887 task_rq_unlock(rq, &flags); 6923 task_rq_unlock(rq, &flags);
6888 6924
6889 read_unlock(&tasklist_lock); 6925 rcu_read_unlock();
6890 jiffies_to_timespec(time_slice, &t); 6926 jiffies_to_timespec(time_slice, &t);
6891 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 6927 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
6892 return retval; 6928 return retval;
6893 6929
6894out_unlock: 6930out_unlock:
6895 read_unlock(&tasklist_lock); 6931 rcu_read_unlock();
6896 return retval; 6932 return retval;
6897} 6933}
6898 6934
@@ -6980,9 +7016,10 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
6980 struct rq *rq = cpu_rq(cpu); 7016 struct rq *rq = cpu_rq(cpu);
6981 unsigned long flags; 7017 unsigned long flags;
6982 7018
6983 spin_lock_irqsave(&rq->lock, flags); 7019 raw_spin_lock_irqsave(&rq->lock, flags);
6984 7020
6985 __sched_fork(idle); 7021 __sched_fork(idle);
7022 idle->state = TASK_RUNNING;
6986 idle->se.exec_start = sched_clock(); 7023 idle->se.exec_start = sched_clock();
6987 7024
6988 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); 7025 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
@@ -6992,7 +7029,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
6992#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 7029#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
6993 idle->oncpu = 1; 7030 idle->oncpu = 1;
6994#endif 7031#endif
6995 spin_unlock_irqrestore(&rq->lock, flags); 7032 raw_spin_unlock_irqrestore(&rq->lock, flags);
6996 7033
6997 /* Set the preempt count _outside_ the spinlocks! */ 7034 /* Set the preempt count _outside_ the spinlocks! */
6998#if defined(CONFIG_PREEMPT) 7035#if defined(CONFIG_PREEMPT)
@@ -7097,7 +7134,23 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
7097 struct rq *rq; 7134 struct rq *rq;
7098 int ret = 0; 7135 int ret = 0;
7099 7136
7137 /*
7138 * Since we rely on wake-ups to migrate sleeping tasks, don't change
7139 * the ->cpus_allowed mask from under waking tasks, which would be
7140 * possible when we change rq->lock in ttwu(), so synchronize against
7141 * TASK_WAKING to avoid that.
7142 */
7143again:
7144 while (p->state == TASK_WAKING)
7145 cpu_relax();
7146
7100 rq = task_rq_lock(p, &flags); 7147 rq = task_rq_lock(p, &flags);
7148
7149 if (p->state == TASK_WAKING) {
7150 task_rq_unlock(rq, &flags);
7151 goto again;
7152 }
7153
7101 if (!cpumask_intersects(new_mask, cpu_active_mask)) { 7154 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
7102 ret = -EINVAL; 7155 ret = -EINVAL;
7103 goto out; 7156 goto out;
@@ -7153,7 +7206,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
7153static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) 7206static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
7154{ 7207{
7155 struct rq *rq_dest, *rq_src; 7208 struct rq *rq_dest, *rq_src;
7156 int ret = 0, on_rq; 7209 int ret = 0;
7157 7210
7158 if (unlikely(!cpu_active(dest_cpu))) 7211 if (unlikely(!cpu_active(dest_cpu)))
7159 return ret; 7212 return ret;
@@ -7169,12 +7222,13 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
7169 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 7222 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
7170 goto fail; 7223 goto fail;
7171 7224
7172 on_rq = p->se.on_rq; 7225 /*
7173 if (on_rq) 7226 * If we're not on a rq, the next wake-up will ensure we're
7227 * placed properly.
7228 */
7229 if (p->se.on_rq) {
7174 deactivate_task(rq_src, p, 0); 7230 deactivate_task(rq_src, p, 0);
7175 7231 set_task_cpu(p, dest_cpu);
7176 set_task_cpu(p, dest_cpu);
7177 if (on_rq) {
7178 activate_task(rq_dest, p, 0); 7232 activate_task(rq_dest, p, 0);
7179 check_preempt_curr(rq_dest, p, 0); 7233 check_preempt_curr(rq_dest, p, 0);
7180 } 7234 }
@@ -7209,10 +7263,10 @@ static int migration_thread(void *data)
7209 struct migration_req *req; 7263 struct migration_req *req;
7210 struct list_head *head; 7264 struct list_head *head;
7211 7265
7212 spin_lock_irq(&rq->lock); 7266 raw_spin_lock_irq(&rq->lock);
7213 7267
7214 if (cpu_is_offline(cpu)) { 7268 if (cpu_is_offline(cpu)) {
7215 spin_unlock_irq(&rq->lock); 7269 raw_spin_unlock_irq(&rq->lock);
7216 break; 7270 break;
7217 } 7271 }
7218 7272
@@ -7224,7 +7278,7 @@ static int migration_thread(void *data)
7224 head = &rq->migration_queue; 7278 head = &rq->migration_queue;
7225 7279
7226 if (list_empty(head)) { 7280 if (list_empty(head)) {
7227 spin_unlock_irq(&rq->lock); 7281 raw_spin_unlock_irq(&rq->lock);
7228 schedule(); 7282 schedule();
7229 set_current_state(TASK_INTERRUPTIBLE); 7283 set_current_state(TASK_INTERRUPTIBLE);
7230 continue; 7284 continue;
@@ -7233,14 +7287,14 @@ static int migration_thread(void *data)
7233 list_del_init(head->next); 7287 list_del_init(head->next);
7234 7288
7235 if (req->task != NULL) { 7289 if (req->task != NULL) {
7236 spin_unlock(&rq->lock); 7290 raw_spin_unlock(&rq->lock);
7237 __migrate_task(req->task, cpu, req->dest_cpu); 7291 __migrate_task(req->task, cpu, req->dest_cpu);
7238 } else if (likely(cpu == (badcpu = smp_processor_id()))) { 7292 } else if (likely(cpu == (badcpu = smp_processor_id()))) {
7239 req->dest_cpu = RCU_MIGRATION_GOT_QS; 7293 req->dest_cpu = RCU_MIGRATION_GOT_QS;
7240 spin_unlock(&rq->lock); 7294 raw_spin_unlock(&rq->lock);
7241 } else { 7295 } else {
7242 req->dest_cpu = RCU_MIGRATION_MUST_SYNC; 7296 req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
7243 spin_unlock(&rq->lock); 7297 raw_spin_unlock(&rq->lock);
7244 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); 7298 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
7245 } 7299 }
7246 local_irq_enable(); 7300 local_irq_enable();
@@ -7270,37 +7324,10 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
7270static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 7324static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
7271{ 7325{
7272 int dest_cpu; 7326 int dest_cpu;
7273 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
7274 7327
7275again: 7328again:
7276 /* Look for allowed, online CPU in same node. */ 7329 dest_cpu = select_fallback_rq(dead_cpu, p);
7277 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
7278 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
7279 goto move;
7280
7281 /* Any allowed, online CPU? */
7282 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
7283 if (dest_cpu < nr_cpu_ids)
7284 goto move;
7285
7286 /* No more Mr. Nice Guy. */
7287 if (dest_cpu >= nr_cpu_ids) {
7288 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
7289 dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
7290
7291 /*
7292 * Don't tell them about moving exiting tasks or
7293 * kernel threads (both mm NULL), since they never
7294 * leave kernel.
7295 */
7296 if (p->mm && printk_ratelimit()) {
7297 printk(KERN_INFO "process %d (%s) no "
7298 "longer affine to cpu%d\n",
7299 task_pid_nr(p), p->comm, dead_cpu);
7300 }
7301 }
7302 7330
7303move:
7304 /* It can have affinity changed while we were choosing. */ 7331 /* It can have affinity changed while we were choosing. */
7305 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) 7332 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
7306 goto again; 7333 goto again;
@@ -7363,14 +7390,14 @@ void sched_idle_next(void)
7363 * Strictly not necessary since rest of the CPUs are stopped by now 7390 * Strictly not necessary since rest of the CPUs are stopped by now
7364 * and interrupts disabled on the current cpu. 7391 * and interrupts disabled on the current cpu.
7365 */ 7392 */
7366 spin_lock_irqsave(&rq->lock, flags); 7393 raw_spin_lock_irqsave(&rq->lock, flags);
7367 7394
7368 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 7395 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
7369 7396
7370 update_rq_clock(rq); 7397 update_rq_clock(rq);
7371 activate_task(rq, p, 0); 7398 activate_task(rq, p, 0);
7372 7399
7373 spin_unlock_irqrestore(&rq->lock, flags); 7400 raw_spin_unlock_irqrestore(&rq->lock, flags);
7374} 7401}
7375 7402
7376/* 7403/*
@@ -7406,9 +7433,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
7406 * that's OK. No task can be added to this CPU, so iteration is 7433 * that's OK. No task can be added to this CPU, so iteration is
7407 * fine. 7434 * fine.
7408 */ 7435 */
7409 spin_unlock_irq(&rq->lock); 7436 raw_spin_unlock_irq(&rq->lock);
7410 move_task_off_dead_cpu(dead_cpu, p); 7437 move_task_off_dead_cpu(dead_cpu, p);
7411 spin_lock_irq(&rq->lock); 7438 raw_spin_lock_irq(&rq->lock);
7412 7439
7413 put_task_struct(p); 7440 put_task_struct(p);
7414} 7441}
@@ -7674,13 +7701,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7674 7701
7675 /* Update our root-domain */ 7702 /* Update our root-domain */
7676 rq = cpu_rq(cpu); 7703 rq = cpu_rq(cpu);
7677 spin_lock_irqsave(&rq->lock, flags); 7704 raw_spin_lock_irqsave(&rq->lock, flags);
7678 if (rq->rd) { 7705 if (rq->rd) {
7679 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7706 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7680 7707
7681 set_rq_online(rq); 7708 set_rq_online(rq);
7682 } 7709 }
7683 spin_unlock_irqrestore(&rq->lock, flags); 7710 raw_spin_unlock_irqrestore(&rq->lock, flags);
7684 break; 7711 break;
7685 7712
7686#ifdef CONFIG_HOTPLUG_CPU 7713#ifdef CONFIG_HOTPLUG_CPU
@@ -7705,13 +7732,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7705 put_task_struct(rq->migration_thread); 7732 put_task_struct(rq->migration_thread);
7706 rq->migration_thread = NULL; 7733 rq->migration_thread = NULL;
7707 /* Idle task back to normal (off runqueue, low prio) */ 7734 /* Idle task back to normal (off runqueue, low prio) */
7708 spin_lock_irq(&rq->lock); 7735 raw_spin_lock_irq(&rq->lock);
7709 update_rq_clock(rq); 7736 update_rq_clock(rq);
7710 deactivate_task(rq, rq->idle, 0); 7737 deactivate_task(rq, rq->idle, 0);
7711 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); 7738 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
7712 rq->idle->sched_class = &idle_sched_class; 7739 rq->idle->sched_class = &idle_sched_class;
7713 migrate_dead_tasks(cpu); 7740 migrate_dead_tasks(cpu);
7714 spin_unlock_irq(&rq->lock); 7741 raw_spin_unlock_irq(&rq->lock);
7715 cpuset_unlock(); 7742 cpuset_unlock();
7716 migrate_nr_uninterruptible(rq); 7743 migrate_nr_uninterruptible(rq);
7717 BUG_ON(rq->nr_running != 0); 7744 BUG_ON(rq->nr_running != 0);
@@ -7721,30 +7748,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7721 * they didn't take sched_hotcpu_mutex. Just wake up 7748 * they didn't take sched_hotcpu_mutex. Just wake up
7722 * the requestors. 7749 * the requestors.
7723 */ 7750 */
7724 spin_lock_irq(&rq->lock); 7751 raw_spin_lock_irq(&rq->lock);
7725 while (!list_empty(&rq->migration_queue)) { 7752 while (!list_empty(&rq->migration_queue)) {
7726 struct migration_req *req; 7753 struct migration_req *req;
7727 7754
7728 req = list_entry(rq->migration_queue.next, 7755 req = list_entry(rq->migration_queue.next,
7729 struct migration_req, list); 7756 struct migration_req, list);
7730 list_del_init(&req->list); 7757 list_del_init(&req->list);
7731 spin_unlock_irq(&rq->lock); 7758 raw_spin_unlock_irq(&rq->lock);
7732 complete(&req->done); 7759 complete(&req->done);
7733 spin_lock_irq(&rq->lock); 7760 raw_spin_lock_irq(&rq->lock);
7734 } 7761 }
7735 spin_unlock_irq(&rq->lock); 7762 raw_spin_unlock_irq(&rq->lock);
7736 break; 7763 break;
7737 7764
7738 case CPU_DYING: 7765 case CPU_DYING:
7739 case CPU_DYING_FROZEN: 7766 case CPU_DYING_FROZEN:
7740 /* Update our root-domain */ 7767 /* Update our root-domain */
7741 rq = cpu_rq(cpu); 7768 rq = cpu_rq(cpu);
7742 spin_lock_irqsave(&rq->lock, flags); 7769 raw_spin_lock_irqsave(&rq->lock, flags);
7743 if (rq->rd) { 7770 if (rq->rd) {
7744 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7771 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7745 set_rq_offline(rq); 7772 set_rq_offline(rq);
7746 } 7773 }
7747 spin_unlock_irqrestore(&rq->lock, flags); 7774 raw_spin_unlock_irqrestore(&rq->lock, flags);
7748 break; 7775 break;
7749#endif 7776#endif
7750 } 7777 }
@@ -7974,7 +8001,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7974 struct root_domain *old_rd = NULL; 8001 struct root_domain *old_rd = NULL;
7975 unsigned long flags; 8002 unsigned long flags;
7976 8003
7977 spin_lock_irqsave(&rq->lock, flags); 8004 raw_spin_lock_irqsave(&rq->lock, flags);
7978 8005
7979 if (rq->rd) { 8006 if (rq->rd) {
7980 old_rd = rq->rd; 8007 old_rd = rq->rd;
@@ -8000,7 +8027,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
8000 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 8027 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
8001 set_rq_online(rq); 8028 set_rq_online(rq);
8002 8029
8003 spin_unlock_irqrestore(&rq->lock, flags); 8030 raw_spin_unlock_irqrestore(&rq->lock, flags);
8004 8031
8005 if (old_rd) 8032 if (old_rd)
8006 free_rootdomain(old_rd); 8033 free_rootdomain(old_rd);
@@ -9357,13 +9384,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
9357#ifdef CONFIG_SMP 9384#ifdef CONFIG_SMP
9358 rt_rq->rt_nr_migratory = 0; 9385 rt_rq->rt_nr_migratory = 0;
9359 rt_rq->overloaded = 0; 9386 rt_rq->overloaded = 0;
9360 plist_head_init(&rt_rq->pushable_tasks, &rq->lock); 9387 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
9361#endif 9388#endif
9362 9389
9363 rt_rq->rt_time = 0; 9390 rt_rq->rt_time = 0;
9364 rt_rq->rt_throttled = 0; 9391 rt_rq->rt_throttled = 0;
9365 rt_rq->rt_runtime = 0; 9392 rt_rq->rt_runtime = 0;
9366 spin_lock_init(&rt_rq->rt_runtime_lock); 9393 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
9367 9394
9368#ifdef CONFIG_RT_GROUP_SCHED 9395#ifdef CONFIG_RT_GROUP_SCHED
9369 rt_rq->rt_nr_boosted = 0; 9396 rt_rq->rt_nr_boosted = 0;
@@ -9523,7 +9550,7 @@ void __init sched_init(void)
9523 struct rq *rq; 9550 struct rq *rq;
9524 9551
9525 rq = cpu_rq(i); 9552 rq = cpu_rq(i);
9526 spin_lock_init(&rq->lock); 9553 raw_spin_lock_init(&rq->lock);
9527 rq->nr_running = 0; 9554 rq->nr_running = 0;
9528 rq->calc_load_active = 0; 9555 rq->calc_load_active = 0;
9529 rq->calc_load_update = jiffies + LOAD_FREQ; 9556 rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -9621,7 +9648,7 @@ void __init sched_init(void)
9621#endif 9648#endif
9622 9649
9623#ifdef CONFIG_RT_MUTEXES 9650#ifdef CONFIG_RT_MUTEXES
9624 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); 9651 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
9625#endif 9652#endif
9626 9653
9627 /* 9654 /*
@@ -9665,7 +9692,7 @@ void __init sched_init(void)
9665#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 9692#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
9666static inline int preempt_count_equals(int preempt_offset) 9693static inline int preempt_count_equals(int preempt_offset)
9667{ 9694{
9668 int nested = preempt_count() & ~PREEMPT_ACTIVE; 9695 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
9669 9696
9670 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); 9697 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
9671} 9698}
@@ -9746,13 +9773,13 @@ void normalize_rt_tasks(void)
9746 continue; 9773 continue;
9747 } 9774 }
9748 9775
9749 spin_lock(&p->pi_lock); 9776 raw_spin_lock(&p->pi_lock);
9750 rq = __task_rq_lock(p); 9777 rq = __task_rq_lock(p);
9751 9778
9752 normalize_task(rq, p); 9779 normalize_task(rq, p);
9753 9780
9754 __task_rq_unlock(rq); 9781 __task_rq_unlock(rq);
9755 spin_unlock(&p->pi_lock); 9782 raw_spin_unlock(&p->pi_lock);
9756 } while_each_thread(g, p); 9783 } while_each_thread(g, p);
9757 9784
9758 read_unlock_irqrestore(&tasklist_lock, flags); 9785 read_unlock_irqrestore(&tasklist_lock, flags);
@@ -10080,7 +10107,7 @@ void sched_move_task(struct task_struct *tsk)
10080 10107
10081#ifdef CONFIG_FAIR_GROUP_SCHED 10108#ifdef CONFIG_FAIR_GROUP_SCHED
10082 if (tsk->sched_class->moved_group) 10109 if (tsk->sched_class->moved_group)
10083 tsk->sched_class->moved_group(tsk); 10110 tsk->sched_class->moved_group(tsk, on_rq);
10084#endif 10111#endif
10085 10112
10086 if (unlikely(running)) 10113 if (unlikely(running))
@@ -10115,9 +10142,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
10115 struct rq *rq = cfs_rq->rq; 10142 struct rq *rq = cfs_rq->rq;
10116 unsigned long flags; 10143 unsigned long flags;
10117 10144
10118 spin_lock_irqsave(&rq->lock, flags); 10145 raw_spin_lock_irqsave(&rq->lock, flags);
10119 __set_se_shares(se, shares); 10146 __set_se_shares(se, shares);
10120 spin_unlock_irqrestore(&rq->lock, flags); 10147 raw_spin_unlock_irqrestore(&rq->lock, flags);
10121} 10148}
10122 10149
10123static DEFINE_MUTEX(shares_mutex); 10150static DEFINE_MUTEX(shares_mutex);
@@ -10302,18 +10329,18 @@ static int tg_set_bandwidth(struct task_group *tg,
10302 if (err) 10329 if (err)
10303 goto unlock; 10330 goto unlock;
10304 10331
10305 spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 10332 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
10306 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 10333 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
10307 tg->rt_bandwidth.rt_runtime = rt_runtime; 10334 tg->rt_bandwidth.rt_runtime = rt_runtime;
10308 10335
10309 for_each_possible_cpu(i) { 10336 for_each_possible_cpu(i) {
10310 struct rt_rq *rt_rq = tg->rt_rq[i]; 10337 struct rt_rq *rt_rq = tg->rt_rq[i];
10311 10338
10312 spin_lock(&rt_rq->rt_runtime_lock); 10339 raw_spin_lock(&rt_rq->rt_runtime_lock);
10313 rt_rq->rt_runtime = rt_runtime; 10340 rt_rq->rt_runtime = rt_runtime;
10314 spin_unlock(&rt_rq->rt_runtime_lock); 10341 raw_spin_unlock(&rt_rq->rt_runtime_lock);
10315 } 10342 }
10316 spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 10343 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
10317 unlock: 10344 unlock:
10318 read_unlock(&tasklist_lock); 10345 read_unlock(&tasklist_lock);
10319 mutex_unlock(&rt_constraints_mutex); 10346 mutex_unlock(&rt_constraints_mutex);
@@ -10418,15 +10445,15 @@ static int sched_rt_global_constraints(void)
10418 if (sysctl_sched_rt_runtime == 0) 10445 if (sysctl_sched_rt_runtime == 0)
10419 return -EBUSY; 10446 return -EBUSY;
10420 10447
10421 spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 10448 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
10422 for_each_possible_cpu(i) { 10449 for_each_possible_cpu(i) {
10423 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 10450 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
10424 10451
10425 spin_lock(&rt_rq->rt_runtime_lock); 10452 raw_spin_lock(&rt_rq->rt_runtime_lock);
10426 rt_rq->rt_runtime = global_rt_runtime(); 10453 rt_rq->rt_runtime = global_rt_runtime();
10427 spin_unlock(&rt_rq->rt_runtime_lock); 10454 raw_spin_unlock(&rt_rq->rt_runtime_lock);
10428 } 10455 }
10429 spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 10456 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
10430 10457
10431 return 0; 10458 return 0;
10432} 10459}
@@ -10717,9 +10744,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
10717 /* 10744 /*
10718 * Take rq->lock to make 64-bit read safe on 32-bit platforms. 10745 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
10719 */ 10746 */
10720 spin_lock_irq(&cpu_rq(cpu)->lock); 10747 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
10721 data = *cpuusage; 10748 data = *cpuusage;
10722 spin_unlock_irq(&cpu_rq(cpu)->lock); 10749 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
10723#else 10750#else
10724 data = *cpuusage; 10751 data = *cpuusage;
10725#endif 10752#endif
@@ -10735,9 +10762,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
10735 /* 10762 /*
10736 * Take rq->lock to make 64-bit write safe on 32-bit platforms. 10763 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
10737 */ 10764 */
10738 spin_lock_irq(&cpu_rq(cpu)->lock); 10765 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
10739 *cpuusage = val; 10766 *cpuusage = val;
10740 spin_unlock_irq(&cpu_rq(cpu)->lock); 10767 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
10741#else 10768#else
10742 *cpuusage = val; 10769 *cpuusage = val;
10743#endif 10770#endif
@@ -10971,9 +10998,9 @@ void synchronize_sched_expedited(void)
10971 init_completion(&req->done); 10998 init_completion(&req->done);
10972 req->task = NULL; 10999 req->task = NULL;
10973 req->dest_cpu = RCU_MIGRATION_NEED_QS; 11000 req->dest_cpu = RCU_MIGRATION_NEED_QS;
10974 spin_lock_irqsave(&rq->lock, flags); 11001 raw_spin_lock_irqsave(&rq->lock, flags);
10975 list_add(&req->list, &rq->migration_queue); 11002 list_add(&req->list, &rq->migration_queue);
10976 spin_unlock_irqrestore(&rq->lock, flags); 11003 raw_spin_unlock_irqrestore(&rq->lock, flags);
10977 wake_up_process(rq->migration_thread); 11004 wake_up_process(rq->migration_thread);
10978 } 11005 }
10979 for_each_online_cpu(cpu) { 11006 for_each_online_cpu(cpu) {
@@ -10981,11 +11008,11 @@ void synchronize_sched_expedited(void)
10981 req = &per_cpu(rcu_migration_req, cpu); 11008 req = &per_cpu(rcu_migration_req, cpu);
10982 rq = cpu_rq(cpu); 11009 rq = cpu_rq(cpu);
10983 wait_for_completion(&req->done); 11010 wait_for_completion(&req->done);
10984 spin_lock_irqsave(&rq->lock, flags); 11011 raw_spin_lock_irqsave(&rq->lock, flags);
10985 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) 11012 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
10986 need_full_sync = 1; 11013 need_full_sync = 1;
10987 req->dest_cpu = RCU_MIGRATION_IDLE; 11014 req->dest_cpu = RCU_MIGRATION_IDLE;
10988 spin_unlock_irqrestore(&rq->lock, flags); 11015 raw_spin_unlock_irqrestore(&rq->lock, flags);
10989 } 11016 }
10990 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; 11017 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
10991 synchronize_sched_expedited_count++; 11018 synchronize_sched_expedited_count++;