aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c747
1 files changed, 391 insertions, 356 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e7f2cfa6a257..87f1f47beffe 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -141,7 +141,7 @@ struct rt_prio_array {
141 141
142struct rt_bandwidth { 142struct rt_bandwidth {
143 /* nests inside the rq lock: */ 143 /* nests inside the rq lock: */
144 spinlock_t rt_runtime_lock; 144 raw_spinlock_t rt_runtime_lock;
145 ktime_t rt_period; 145 ktime_t rt_period;
146 u64 rt_runtime; 146 u64 rt_runtime;
147 struct hrtimer rt_period_timer; 147 struct hrtimer rt_period_timer;
@@ -178,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
178 rt_b->rt_period = ns_to_ktime(period); 178 rt_b->rt_period = ns_to_ktime(period);
179 rt_b->rt_runtime = runtime; 179 rt_b->rt_runtime = runtime;
180 180
181 spin_lock_init(&rt_b->rt_runtime_lock); 181 raw_spin_lock_init(&rt_b->rt_runtime_lock);
182 182
183 hrtimer_init(&rt_b->rt_period_timer, 183 hrtimer_init(&rt_b->rt_period_timer,
184 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 184 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -200,7 +200,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
200 if (hrtimer_active(&rt_b->rt_period_timer)) 200 if (hrtimer_active(&rt_b->rt_period_timer))
201 return; 201 return;
202 202
203 spin_lock(&rt_b->rt_runtime_lock); 203 raw_spin_lock(&rt_b->rt_runtime_lock);
204 for (;;) { 204 for (;;) {
205 unsigned long delta; 205 unsigned long delta;
206 ktime_t soft, hard; 206 ktime_t soft, hard;
@@ -217,7 +217,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
217 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, 217 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
218 HRTIMER_MODE_ABS_PINNED, 0); 218 HRTIMER_MODE_ABS_PINNED, 0);
219 } 219 }
220 spin_unlock(&rt_b->rt_runtime_lock); 220 raw_spin_unlock(&rt_b->rt_runtime_lock);
221} 221}
222 222
223#ifdef CONFIG_RT_GROUP_SCHED 223#ifdef CONFIG_RT_GROUP_SCHED
@@ -298,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
298 298
299#ifdef CONFIG_RT_GROUP_SCHED 299#ifdef CONFIG_RT_GROUP_SCHED
300static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); 300static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
301static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq); 301static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var);
302#endif /* CONFIG_RT_GROUP_SCHED */ 302#endif /* CONFIG_RT_GROUP_SCHED */
303#else /* !CONFIG_USER_SCHED */ 303#else /* !CONFIG_USER_SCHED */
304#define root_task_group init_task_group 304#define root_task_group init_task_group
@@ -470,7 +470,7 @@ struct rt_rq {
470 u64 rt_time; 470 u64 rt_time;
471 u64 rt_runtime; 471 u64 rt_runtime;
472 /* Nests inside the rq lock: */ 472 /* Nests inside the rq lock: */
473 spinlock_t rt_runtime_lock; 473 raw_spinlock_t rt_runtime_lock;
474 474
475#ifdef CONFIG_RT_GROUP_SCHED 475#ifdef CONFIG_RT_GROUP_SCHED
476 unsigned long rt_nr_boosted; 476 unsigned long rt_nr_boosted;
@@ -525,7 +525,7 @@ static struct root_domain def_root_domain;
525 */ 525 */
526struct rq { 526struct rq {
527 /* runqueue lock: */ 527 /* runqueue lock: */
528 spinlock_t lock; 528 raw_spinlock_t lock;
529 529
530 /* 530 /*
531 * nr_running and cpu_load should be in the same cacheline because 531 * nr_running and cpu_load should be in the same cacheline because
@@ -685,7 +685,7 @@ inline void update_rq_clock(struct rq *rq)
685 */ 685 */
686int runqueue_is_locked(int cpu) 686int runqueue_is_locked(int cpu)
687{ 687{
688 return spin_is_locked(&cpu_rq(cpu)->lock); 688 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
689} 689}
690 690
691/* 691/*
@@ -814,6 +814,7 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
814 * default: 0.25ms 814 * default: 0.25ms
815 */ 815 */
816unsigned int sysctl_sched_shares_ratelimit = 250000; 816unsigned int sysctl_sched_shares_ratelimit = 250000;
817unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;
817 818
818/* 819/*
819 * Inject some fuzzyness into changing the per-cpu group shares 820 * Inject some fuzzyness into changing the per-cpu group shares
@@ -892,7 +893,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
892 */ 893 */
893 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 894 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
894 895
895 spin_unlock_irq(&rq->lock); 896 raw_spin_unlock_irq(&rq->lock);
896} 897}
897 898
898#else /* __ARCH_WANT_UNLOCKED_CTXSW */ 899#else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -916,9 +917,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
916 next->oncpu = 1; 917 next->oncpu = 1;
917#endif 918#endif
918#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 919#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
919 spin_unlock_irq(&rq->lock); 920 raw_spin_unlock_irq(&rq->lock);
920#else 921#else
921 spin_unlock(&rq->lock); 922 raw_spin_unlock(&rq->lock);
922#endif 923#endif
923} 924}
924 925
@@ -948,10 +949,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
948{ 949{
949 for (;;) { 950 for (;;) {
950 struct rq *rq = task_rq(p); 951 struct rq *rq = task_rq(p);
951 spin_lock(&rq->lock); 952 raw_spin_lock(&rq->lock);
952 if (likely(rq == task_rq(p))) 953 if (likely(rq == task_rq(p)))
953 return rq; 954 return rq;
954 spin_unlock(&rq->lock); 955 raw_spin_unlock(&rq->lock);
955 } 956 }
956} 957}
957 958
@@ -968,10 +969,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
968 for (;;) { 969 for (;;) {
969 local_irq_save(*flags); 970 local_irq_save(*flags);
970 rq = task_rq(p); 971 rq = task_rq(p);
971 spin_lock(&rq->lock); 972 raw_spin_lock(&rq->lock);
972 if (likely(rq == task_rq(p))) 973 if (likely(rq == task_rq(p)))
973 return rq; 974 return rq;
974 spin_unlock_irqrestore(&rq->lock, *flags); 975 raw_spin_unlock_irqrestore(&rq->lock, *flags);
975 } 976 }
976} 977}
977 978
@@ -980,19 +981,19 @@ void task_rq_unlock_wait(struct task_struct *p)
980 struct rq *rq = task_rq(p); 981 struct rq *rq = task_rq(p);
981 982
982 smp_mb(); /* spin-unlock-wait is not a full memory barrier */ 983 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
983 spin_unlock_wait(&rq->lock); 984 raw_spin_unlock_wait(&rq->lock);
984} 985}
985 986
986static void __task_rq_unlock(struct rq *rq) 987static void __task_rq_unlock(struct rq *rq)
987 __releases(rq->lock) 988 __releases(rq->lock)
988{ 989{
989 spin_unlock(&rq->lock); 990 raw_spin_unlock(&rq->lock);
990} 991}
991 992
992static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) 993static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
993 __releases(rq->lock) 994 __releases(rq->lock)
994{ 995{
995 spin_unlock_irqrestore(&rq->lock, *flags); 996 raw_spin_unlock_irqrestore(&rq->lock, *flags);
996} 997}
997 998
998/* 999/*
@@ -1005,7 +1006,7 @@ static struct rq *this_rq_lock(void)
1005 1006
1006 local_irq_disable(); 1007 local_irq_disable();
1007 rq = this_rq(); 1008 rq = this_rq();
1008 spin_lock(&rq->lock); 1009 raw_spin_lock(&rq->lock);
1009 1010
1010 return rq; 1011 return rq;
1011} 1012}
@@ -1052,10 +1053,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1052 1053
1053 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 1054 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1054 1055
1055 spin_lock(&rq->lock); 1056 raw_spin_lock(&rq->lock);
1056 update_rq_clock(rq); 1057 update_rq_clock(rq);
1057 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 1058 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
1058 spin_unlock(&rq->lock); 1059 raw_spin_unlock(&rq->lock);
1059 1060
1060 return HRTIMER_NORESTART; 1061 return HRTIMER_NORESTART;
1061} 1062}
@@ -1068,10 +1069,10 @@ static void __hrtick_start(void *arg)
1068{ 1069{
1069 struct rq *rq = arg; 1070 struct rq *rq = arg;
1070 1071
1071 spin_lock(&rq->lock); 1072 raw_spin_lock(&rq->lock);
1072 hrtimer_restart(&rq->hrtick_timer); 1073 hrtimer_restart(&rq->hrtick_timer);
1073 rq->hrtick_csd_pending = 0; 1074 rq->hrtick_csd_pending = 0;
1074 spin_unlock(&rq->lock); 1075 raw_spin_unlock(&rq->lock);
1075} 1076}
1076 1077
1077/* 1078/*
@@ -1178,7 +1179,7 @@ static void resched_task(struct task_struct *p)
1178{ 1179{
1179 int cpu; 1180 int cpu;
1180 1181
1181 assert_spin_locked(&task_rq(p)->lock); 1182 assert_raw_spin_locked(&task_rq(p)->lock);
1182 1183
1183 if (test_tsk_need_resched(p)) 1184 if (test_tsk_need_resched(p))
1184 return; 1185 return;
@@ -1200,10 +1201,10 @@ static void resched_cpu(int cpu)
1200 struct rq *rq = cpu_rq(cpu); 1201 struct rq *rq = cpu_rq(cpu);
1201 unsigned long flags; 1202 unsigned long flags;
1202 1203
1203 if (!spin_trylock_irqsave(&rq->lock, flags)) 1204 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
1204 return; 1205 return;
1205 resched_task(cpu_curr(cpu)); 1206 resched_task(cpu_curr(cpu));
1206 spin_unlock_irqrestore(&rq->lock, flags); 1207 raw_spin_unlock_irqrestore(&rq->lock, flags);
1207} 1208}
1208 1209
1209#ifdef CONFIG_NO_HZ 1210#ifdef CONFIG_NO_HZ
@@ -1272,7 +1273,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1272#else /* !CONFIG_SMP */ 1273#else /* !CONFIG_SMP */
1273static void resched_task(struct task_struct *p) 1274static void resched_task(struct task_struct *p)
1274{ 1275{
1275 assert_spin_locked(&task_rq(p)->lock); 1276 assert_raw_spin_locked(&task_rq(p)->lock);
1276 set_tsk_need_resched(p); 1277 set_tsk_need_resched(p);
1277} 1278}
1278 1279
@@ -1599,11 +1600,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
1599 struct rq *rq = cpu_rq(cpu); 1600 struct rq *rq = cpu_rq(cpu);
1600 unsigned long flags; 1601 unsigned long flags;
1601 1602
1602 spin_lock_irqsave(&rq->lock, flags); 1603 raw_spin_lock_irqsave(&rq->lock, flags);
1603 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; 1604 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
1604 tg->cfs_rq[cpu]->shares = boost ? 0 : shares; 1605 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
1605 __set_se_shares(tg->se[cpu], shares); 1606 __set_se_shares(tg->se[cpu], shares);
1606 spin_unlock_irqrestore(&rq->lock, flags); 1607 raw_spin_unlock_irqrestore(&rq->lock, flags);
1607 } 1608 }
1608} 1609}
1609 1610
@@ -1614,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
1614 */ 1615 */
1615static int tg_shares_up(struct task_group *tg, void *data) 1616static int tg_shares_up(struct task_group *tg, void *data)
1616{ 1617{
1617 unsigned long weight, rq_weight = 0, shares = 0; 1618 unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
1618 unsigned long *usd_rq_weight; 1619 unsigned long *usd_rq_weight;
1619 struct sched_domain *sd = data; 1620 struct sched_domain *sd = data;
1620 unsigned long flags; 1621 unsigned long flags;
@@ -1630,6 +1631,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
1630 weight = tg->cfs_rq[i]->load.weight; 1631 weight = tg->cfs_rq[i]->load.weight;
1631 usd_rq_weight[i] = weight; 1632 usd_rq_weight[i] = weight;
1632 1633
1634 rq_weight += weight;
1633 /* 1635 /*
1634 * If there are currently no tasks on the cpu pretend there 1636 * If there are currently no tasks on the cpu pretend there
1635 * is one of average load so that when a new task gets to 1637 * is one of average load so that when a new task gets to
@@ -1638,10 +1640,13 @@ static int tg_shares_up(struct task_group *tg, void *data)
1638 if (!weight) 1640 if (!weight)
1639 weight = NICE_0_LOAD; 1641 weight = NICE_0_LOAD;
1640 1642
1641 rq_weight += weight; 1643 sum_weight += weight;
1642 shares += tg->cfs_rq[i]->shares; 1644 shares += tg->cfs_rq[i]->shares;
1643 } 1645 }
1644 1646
1647 if (!rq_weight)
1648 rq_weight = sum_weight;
1649
1645 if ((!shares && rq_weight) || shares > tg->shares) 1650 if ((!shares && rq_weight) || shares > tg->shares)
1646 shares = tg->shares; 1651 shares = tg->shares;
1647 1652
@@ -1701,9 +1706,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1701 if (root_task_group_empty()) 1706 if (root_task_group_empty())
1702 return; 1707 return;
1703 1708
1704 spin_unlock(&rq->lock); 1709 raw_spin_unlock(&rq->lock);
1705 update_shares(sd); 1710 update_shares(sd);
1706 spin_lock(&rq->lock); 1711 raw_spin_lock(&rq->lock);
1707} 1712}
1708 1713
1709static void update_h_load(long cpu) 1714static void update_h_load(long cpu)
@@ -1743,7 +1748,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1743 __acquires(busiest->lock) 1748 __acquires(busiest->lock)
1744 __acquires(this_rq->lock) 1749 __acquires(this_rq->lock)
1745{ 1750{
1746 spin_unlock(&this_rq->lock); 1751 raw_spin_unlock(&this_rq->lock);
1747 double_rq_lock(this_rq, busiest); 1752 double_rq_lock(this_rq, busiest);
1748 1753
1749 return 1; 1754 return 1;
@@ -1764,14 +1769,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1764{ 1769{
1765 int ret = 0; 1770 int ret = 0;
1766 1771
1767 if (unlikely(!spin_trylock(&busiest->lock))) { 1772 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1768 if (busiest < this_rq) { 1773 if (busiest < this_rq) {
1769 spin_unlock(&this_rq->lock); 1774 raw_spin_unlock(&this_rq->lock);
1770 spin_lock(&busiest->lock); 1775 raw_spin_lock(&busiest->lock);
1771 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); 1776 raw_spin_lock_nested(&this_rq->lock,
1777 SINGLE_DEPTH_NESTING);
1772 ret = 1; 1778 ret = 1;
1773 } else 1779 } else
1774 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); 1780 raw_spin_lock_nested(&busiest->lock,
1781 SINGLE_DEPTH_NESTING);
1775 } 1782 }
1776 return ret; 1783 return ret;
1777} 1784}
@@ -1785,7 +1792,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1785{ 1792{
1786 if (unlikely(!irqs_disabled())) { 1793 if (unlikely(!irqs_disabled())) {
1787 /* printk() doesn't work good under rq->lock */ 1794 /* printk() doesn't work good under rq->lock */
1788 spin_unlock(&this_rq->lock); 1795 raw_spin_unlock(&this_rq->lock);
1789 BUG_ON(1); 1796 BUG_ON(1);
1790 } 1797 }
1791 1798
@@ -1795,7 +1802,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1795static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1802static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1796 __releases(busiest->lock) 1803 __releases(busiest->lock)
1797{ 1804{
1798 spin_unlock(&busiest->lock); 1805 raw_spin_unlock(&busiest->lock);
1799 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1806 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1800} 1807}
1801#endif 1808#endif
@@ -1810,6 +1817,22 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1810#endif 1817#endif
1811 1818
1812static void calc_load_account_active(struct rq *this_rq); 1819static void calc_load_account_active(struct rq *this_rq);
1820static void update_sysctl(void);
1821static int get_update_sysctl_factor(void);
1822
1823static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1824{
1825 set_task_rq(p, cpu);
1826#ifdef CONFIG_SMP
1827 /*
1828 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1829 * successfuly executed on another CPU. We must ensure that updates of
1830 * per-task data have been completed by this moment.
1831 */
1832 smp_wmb();
1833 task_thread_info(p)->cpu = cpu;
1834#endif
1835}
1813 1836
1814#include "sched_stats.h" 1837#include "sched_stats.h"
1815#include "sched_idletask.c" 1838#include "sched_idletask.c"
@@ -1967,20 +1990,6 @@ inline int task_curr(const struct task_struct *p)
1967 return cpu_curr(task_cpu(p)) == p; 1990 return cpu_curr(task_cpu(p)) == p;
1968} 1991}
1969 1992
1970static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1971{
1972 set_task_rq(p, cpu);
1973#ifdef CONFIG_SMP
1974 /*
1975 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1976 * successfuly executed on another CPU. We must ensure that updates of
1977 * per-task data have been completed by this moment.
1978 */
1979 smp_wmb();
1980 task_thread_info(p)->cpu = cpu;
1981#endif
1982}
1983
1984static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1993static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1985 const struct sched_class *prev_class, 1994 const struct sched_class *prev_class,
1986 int oldprio, int running) 1995 int oldprio, int running)
@@ -1993,39 +2002,6 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1993 p->sched_class->prio_changed(rq, p, oldprio, running); 2002 p->sched_class->prio_changed(rq, p, oldprio, running);
1994} 2003}
1995 2004
1996/**
1997 * kthread_bind - bind a just-created kthread to a cpu.
1998 * @p: thread created by kthread_create().
1999 * @cpu: cpu (might not be online, must be possible) for @k to run on.
2000 *
2001 * Description: This function is equivalent to set_cpus_allowed(),
2002 * except that @cpu doesn't need to be online, and the thread must be
2003 * stopped (i.e., just returned from kthread_create()).
2004 *
2005 * Function lives here instead of kthread.c because it messes with
2006 * scheduler internals which require locking.
2007 */
2008void kthread_bind(struct task_struct *p, unsigned int cpu)
2009{
2010 struct rq *rq = cpu_rq(cpu);
2011 unsigned long flags;
2012
2013 /* Must have done schedule() in kthread() before we set_task_cpu */
2014 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
2015 WARN_ON(1);
2016 return;
2017 }
2018
2019 spin_lock_irqsave(&rq->lock, flags);
2020 update_rq_clock(rq);
2021 set_task_cpu(p, cpu);
2022 p->cpus_allowed = cpumask_of_cpu(cpu);
2023 p->rt.nr_cpus_allowed = 1;
2024 p->flags |= PF_THREAD_BOUND;
2025 spin_unlock_irqrestore(&rq->lock, flags);
2026}
2027EXPORT_SYMBOL(kthread_bind);
2028
2029#ifdef CONFIG_SMP 2005#ifdef CONFIG_SMP
2030/* 2006/*
2031 * Is this task likely cache-hot: 2007 * Is this task likely cache-hot:
@@ -2035,6 +2011,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2035{ 2011{
2036 s64 delta; 2012 s64 delta;
2037 2013
2014 if (p->sched_class != &fair_sched_class)
2015 return 0;
2016
2038 /* 2017 /*
2039 * Buddy candidates are cache hot: 2018 * Buddy candidates are cache hot:
2040 */ 2019 */
@@ -2043,9 +2022,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2043 &p->se == cfs_rq_of(&p->se)->last)) 2022 &p->se == cfs_rq_of(&p->se)->last))
2044 return 1; 2023 return 1;
2045 2024
2046 if (p->sched_class != &fair_sched_class)
2047 return 0;
2048
2049 if (sysctl_sched_migration_cost == -1) 2025 if (sysctl_sched_migration_cost == -1)
2050 return 1; 2026 return 1;
2051 if (sysctl_sched_migration_cost == 0) 2027 if (sysctl_sched_migration_cost == 0)
@@ -2056,38 +2032,24 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2056 return delta < (s64)sysctl_sched_migration_cost; 2032 return delta < (s64)sysctl_sched_migration_cost;
2057} 2033}
2058 2034
2059
2060void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 2035void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2061{ 2036{
2062 int old_cpu = task_cpu(p); 2037#ifdef CONFIG_SCHED_DEBUG
2063 struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu); 2038 /*
2064 struct cfs_rq *old_cfsrq = task_cfs_rq(p), 2039 * We should never call set_task_cpu() on a blocked task,
2065 *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu); 2040 * ttwu() will sort out the placement.
2066 u64 clock_offset; 2041 */
2067 2042 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2068 clock_offset = old_rq->clock - new_rq->clock; 2043 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2044#endif
2069 2045
2070 trace_sched_migrate_task(p, new_cpu); 2046 trace_sched_migrate_task(p, new_cpu);
2071 2047
2072#ifdef CONFIG_SCHEDSTATS 2048 if (task_cpu(p) == new_cpu)
2073 if (p->se.wait_start) 2049 return;
2074 p->se.wait_start -= clock_offset; 2050
2075 if (p->se.sleep_start) 2051 p->se.nr_migrations++;
2076 p->se.sleep_start -= clock_offset; 2052 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
2077 if (p->se.block_start)
2078 p->se.block_start -= clock_offset;
2079#endif
2080 if (old_cpu != new_cpu) {
2081 p->se.nr_migrations++;
2082#ifdef CONFIG_SCHEDSTATS
2083 if (task_hot(p, old_rq->clock, NULL))
2084 schedstat_inc(p, se.nr_forced2_migrations);
2085#endif
2086 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
2087 1, 1, NULL, 0);
2088 }
2089 p->se.vruntime -= old_cfsrq->min_vruntime -
2090 new_cfsrq->min_vruntime;
2091 2053
2092 __set_task_cpu(p, new_cpu); 2054 __set_task_cpu(p, new_cpu);
2093} 2055}
@@ -2112,13 +2074,10 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2112 2074
2113 /* 2075 /*
2114 * If the task is not on a runqueue (and not running), then 2076 * If the task is not on a runqueue (and not running), then
2115 * it is sufficient to simply update the task's cpu field. 2077 * the next wake-up will properly place the task.
2116 */ 2078 */
2117 if (!p->se.on_rq && !task_running(rq, p)) { 2079 if (!p->se.on_rq && !task_running(rq, p))
2118 update_rq_clock(rq);
2119 set_task_cpu(p, dest_cpu);
2120 return 0; 2080 return 0;
2121 }
2122 2081
2123 init_completion(&req->done); 2082 init_completion(&req->done);
2124 req->task = p; 2083 req->task = p;
@@ -2323,6 +2282,77 @@ void task_oncpu_function_call(struct task_struct *p,
2323 preempt_enable(); 2282 preempt_enable();
2324} 2283}
2325 2284
2285#ifdef CONFIG_SMP
2286static int select_fallback_rq(int cpu, struct task_struct *p)
2287{
2288 int dest_cpu;
2289 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2290
2291 /* Look for allowed, online CPU in same node. */
2292 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2293 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2294 return dest_cpu;
2295
2296 /* Any allowed, online CPU? */
2297 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2298 if (dest_cpu < nr_cpu_ids)
2299 return dest_cpu;
2300
2301 /* No more Mr. Nice Guy. */
2302 if (dest_cpu >= nr_cpu_ids) {
2303 rcu_read_lock();
2304 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
2305 rcu_read_unlock();
2306 dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
2307
2308 /*
2309 * Don't tell them about moving exiting tasks or
2310 * kernel threads (both mm NULL), since they never
2311 * leave kernel.
2312 */
2313 if (p->mm && printk_ratelimit()) {
2314 printk(KERN_INFO "process %d (%s) no "
2315 "longer affine to cpu%d\n",
2316 task_pid_nr(p), p->comm, cpu);
2317 }
2318 }
2319
2320 return dest_cpu;
2321}
2322
2323/*
2324 * Called from:
2325 *
2326 * - fork, @p is stable because it isn't on the tasklist yet
2327 *
2328 * - exec, @p is unstable, retry loop
2329 *
2330 * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so
2331 * we should be good.
2332 */
2333static inline
2334int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
2335{
2336 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
2337
2338 /*
2339 * In order not to call set_task_cpu() on a blocking task we need
2340 * to rely on ttwu() to place the task on a valid ->cpus_allowed
2341 * cpu.
2342 *
2343 * Since this is common to all placement strategies, this lives here.
2344 *
2345 * [ this allows ->select_task() to simply return task_cpu(p) and
2346 * not worry about this generic constraint ]
2347 */
2348 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
2349 !cpu_online(cpu)))
2350 cpu = select_fallback_rq(task_cpu(p), p);
2351
2352 return cpu;
2353}
2354#endif
2355
2326/*** 2356/***
2327 * try_to_wake_up - wake up a thread 2357 * try_to_wake_up - wake up a thread
2328 * @p: the to-be-woken-up thread 2358 * @p: the to-be-woken-up thread
@@ -2374,17 +2404,18 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2374 if (task_contributes_to_load(p)) 2404 if (task_contributes_to_load(p))
2375 rq->nr_uninterruptible--; 2405 rq->nr_uninterruptible--;
2376 p->state = TASK_WAKING; 2406 p->state = TASK_WAKING;
2377 task_rq_unlock(rq, &flags);
2378 2407
2379 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags); 2408 if (p->sched_class->task_waking)
2380 if (cpu != orig_cpu) { 2409 p->sched_class->task_waking(rq, p);
2381 local_irq_save(flags); 2410
2382 rq = cpu_rq(cpu); 2411 __task_rq_unlock(rq);
2383 update_rq_clock(rq); 2412
2413 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2414 if (cpu != orig_cpu)
2384 set_task_cpu(p, cpu); 2415 set_task_cpu(p, cpu);
2385 local_irq_restore(flags); 2416
2386 } 2417 rq = __task_rq_lock(p);
2387 rq = task_rq_lock(p, &flags); 2418 update_rq_clock(rq);
2388 2419
2389 WARN_ON(p->state != TASK_WAKING); 2420 WARN_ON(p->state != TASK_WAKING);
2390 cpu = task_cpu(p); 2421 cpu = task_cpu(p);
@@ -2440,8 +2471,8 @@ out_running:
2440 2471
2441 p->state = TASK_RUNNING; 2472 p->state = TASK_RUNNING;
2442#ifdef CONFIG_SMP 2473#ifdef CONFIG_SMP
2443 if (p->sched_class->task_wake_up) 2474 if (p->sched_class->task_woken)
2444 p->sched_class->task_wake_up(rq, p); 2475 p->sched_class->task_woken(rq, p);
2445 2476
2446 if (unlikely(rq->idle_stamp)) { 2477 if (unlikely(rq->idle_stamp)) {
2447 u64 delta = rq->clock - rq->idle_stamp; 2478 u64 delta = rq->clock - rq->idle_stamp;
@@ -2499,7 +2530,6 @@ static void __sched_fork(struct task_struct *p)
2499 p->se.avg_overlap = 0; 2530 p->se.avg_overlap = 0;
2500 p->se.start_runtime = 0; 2531 p->se.start_runtime = 0;
2501 p->se.avg_wakeup = sysctl_sched_wakeup_granularity; 2532 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
2502 p->se.avg_running = 0;
2503 2533
2504#ifdef CONFIG_SCHEDSTATS 2534#ifdef CONFIG_SCHEDSTATS
2505 p->se.wait_start = 0; 2535 p->se.wait_start = 0;
@@ -2521,7 +2551,6 @@ static void __sched_fork(struct task_struct *p)
2521 p->se.nr_failed_migrations_running = 0; 2551 p->se.nr_failed_migrations_running = 0;
2522 p->se.nr_failed_migrations_hot = 0; 2552 p->se.nr_failed_migrations_hot = 0;
2523 p->se.nr_forced_migrations = 0; 2553 p->se.nr_forced_migrations = 0;
2524 p->se.nr_forced2_migrations = 0;
2525 2554
2526 p->se.nr_wakeups = 0; 2555 p->se.nr_wakeups = 0;
2527 p->se.nr_wakeups_sync = 0; 2556 p->se.nr_wakeups_sync = 0;
@@ -2542,14 +2571,6 @@ static void __sched_fork(struct task_struct *p)
2542#ifdef CONFIG_PREEMPT_NOTIFIERS 2571#ifdef CONFIG_PREEMPT_NOTIFIERS
2543 INIT_HLIST_HEAD(&p->preempt_notifiers); 2572 INIT_HLIST_HEAD(&p->preempt_notifiers);
2544#endif 2573#endif
2545
2546 /*
2547 * We mark the process as running here, but have not actually
2548 * inserted it onto the runqueue yet. This guarantees that
2549 * nobody will actually run it, and a signal or other external
2550 * event cannot wake it up and insert it on the runqueue either.
2551 */
2552 p->state = TASK_RUNNING;
2553} 2574}
2554 2575
2555/* 2576/*
@@ -2558,9 +2579,14 @@ static void __sched_fork(struct task_struct *p)
2558void sched_fork(struct task_struct *p, int clone_flags) 2579void sched_fork(struct task_struct *p, int clone_flags)
2559{ 2580{
2560 int cpu = get_cpu(); 2581 int cpu = get_cpu();
2561 unsigned long flags;
2562 2582
2563 __sched_fork(p); 2583 __sched_fork(p);
2584 /*
2585 * We mark the process as waking here. This guarantees that
2586 * nobody will actually run it, and a signal or other external
2587 * event cannot wake it up and insert it on the runqueue either.
2588 */
2589 p->state = TASK_WAKING;
2564 2590
2565 /* 2591 /*
2566 * Revert to default priority/policy on fork if requested. 2592 * Revert to default priority/policy on fork if requested.
@@ -2592,13 +2618,13 @@ void sched_fork(struct task_struct *p, int clone_flags)
2592 if (!rt_prio(p->prio)) 2618 if (!rt_prio(p->prio))
2593 p->sched_class = &fair_sched_class; 2619 p->sched_class = &fair_sched_class;
2594 2620
2621 if (p->sched_class->task_fork)
2622 p->sched_class->task_fork(p);
2623
2595#ifdef CONFIG_SMP 2624#ifdef CONFIG_SMP
2596 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0); 2625 cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
2597#endif 2626#endif
2598 local_irq_save(flags);
2599 update_rq_clock(cpu_rq(cpu));
2600 set_task_cpu(p, cpu); 2627 set_task_cpu(p, cpu);
2601 local_irq_restore(flags);
2602 2628
2603#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 2629#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2604 if (likely(sched_info_on())) 2630 if (likely(sched_info_on()))
@@ -2629,24 +2655,15 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2629 struct rq *rq; 2655 struct rq *rq;
2630 2656
2631 rq = task_rq_lock(p, &flags); 2657 rq = task_rq_lock(p, &flags);
2632 BUG_ON(p->state != TASK_RUNNING); 2658 BUG_ON(p->state != TASK_WAKING);
2659 p->state = TASK_RUNNING;
2633 update_rq_clock(rq); 2660 update_rq_clock(rq);
2634 2661 activate_task(rq, p, 0);
2635 if (!p->sched_class->task_new || !current->se.on_rq) {
2636 activate_task(rq, p, 0);
2637 } else {
2638 /*
2639 * Let the scheduling class do new task startup
2640 * management (if any):
2641 */
2642 p->sched_class->task_new(rq, p);
2643 inc_nr_running(rq);
2644 }
2645 trace_sched_wakeup_new(rq, p, 1); 2662 trace_sched_wakeup_new(rq, p, 1);
2646 check_preempt_curr(rq, p, WF_FORK); 2663 check_preempt_curr(rq, p, WF_FORK);
2647#ifdef CONFIG_SMP 2664#ifdef CONFIG_SMP
2648 if (p->sched_class->task_wake_up) 2665 if (p->sched_class->task_woken)
2649 p->sched_class->task_wake_up(rq, p); 2666 p->sched_class->task_woken(rq, p);
2650#endif 2667#endif
2651 task_rq_unlock(rq, &flags); 2668 task_rq_unlock(rq, &flags);
2652} 2669}
@@ -2798,10 +2815,10 @@ static inline void post_schedule(struct rq *rq)
2798 if (rq->post_schedule) { 2815 if (rq->post_schedule) {
2799 unsigned long flags; 2816 unsigned long flags;
2800 2817
2801 spin_lock_irqsave(&rq->lock, flags); 2818 raw_spin_lock_irqsave(&rq->lock, flags);
2802 if (rq->curr->sched_class->post_schedule) 2819 if (rq->curr->sched_class->post_schedule)
2803 rq->curr->sched_class->post_schedule(rq); 2820 rq->curr->sched_class->post_schedule(rq);
2804 spin_unlock_irqrestore(&rq->lock, flags); 2821 raw_spin_unlock_irqrestore(&rq->lock, flags);
2805 2822
2806 rq->post_schedule = 0; 2823 rq->post_schedule = 0;
2807 } 2824 }
@@ -3083,15 +3100,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
3083{ 3100{
3084 BUG_ON(!irqs_disabled()); 3101 BUG_ON(!irqs_disabled());
3085 if (rq1 == rq2) { 3102 if (rq1 == rq2) {
3086 spin_lock(&rq1->lock); 3103 raw_spin_lock(&rq1->lock);
3087 __acquire(rq2->lock); /* Fake it out ;) */ 3104 __acquire(rq2->lock); /* Fake it out ;) */
3088 } else { 3105 } else {
3089 if (rq1 < rq2) { 3106 if (rq1 < rq2) {
3090 spin_lock(&rq1->lock); 3107 raw_spin_lock(&rq1->lock);
3091 spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 3108 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
3092 } else { 3109 } else {
3093 spin_lock(&rq2->lock); 3110 raw_spin_lock(&rq2->lock);
3094 spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 3111 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
3095 } 3112 }
3096 } 3113 }
3097 update_rq_clock(rq1); 3114 update_rq_clock(rq1);
@@ -3108,29 +3125,44 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
3108 __releases(rq1->lock) 3125 __releases(rq1->lock)
3109 __releases(rq2->lock) 3126 __releases(rq2->lock)
3110{ 3127{
3111 spin_unlock(&rq1->lock); 3128 raw_spin_unlock(&rq1->lock);
3112 if (rq1 != rq2) 3129 if (rq1 != rq2)
3113 spin_unlock(&rq2->lock); 3130 raw_spin_unlock(&rq2->lock);
3114 else 3131 else
3115 __release(rq2->lock); 3132 __release(rq2->lock);
3116} 3133}
3117 3134
3118/* 3135/*
3119 * If dest_cpu is allowed for this process, migrate the task to it. 3136 * sched_exec - execve() is a valuable balancing opportunity, because at
3120 * This is accomplished by forcing the cpu_allowed mask to only 3137 * this point the task has the smallest effective memory and cache footprint.
3121 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
3122 * the cpu_allowed mask is restored.
3123 */ 3138 */
3124static void sched_migrate_task(struct task_struct *p, int dest_cpu) 3139void sched_exec(void)
3125{ 3140{
3141 struct task_struct *p = current;
3126 struct migration_req req; 3142 struct migration_req req;
3143 int dest_cpu, this_cpu;
3127 unsigned long flags; 3144 unsigned long flags;
3128 struct rq *rq; 3145 struct rq *rq;
3129 3146
3147again:
3148 this_cpu = get_cpu();
3149 dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
3150 if (dest_cpu == this_cpu) {
3151 put_cpu();
3152 return;
3153 }
3154
3130 rq = task_rq_lock(p, &flags); 3155 rq = task_rq_lock(p, &flags);
3156 put_cpu();
3157
3158 /*
3159 * select_task_rq() can race against ->cpus_allowed
3160 */
3131 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) 3161 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
3132 || unlikely(!cpu_active(dest_cpu))) 3162 || unlikely(!cpu_active(dest_cpu))) {
3133 goto out; 3163 task_rq_unlock(rq, &flags);
3164 goto again;
3165 }
3134 3166
3135 /* force the process onto the specified CPU */ 3167 /* force the process onto the specified CPU */
3136 if (migrate_task(p, dest_cpu, &req)) { 3168 if (migrate_task(p, dest_cpu, &req)) {
@@ -3145,24 +3177,10 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
3145 3177
3146 return; 3178 return;
3147 } 3179 }
3148out:
3149 task_rq_unlock(rq, &flags); 3180 task_rq_unlock(rq, &flags);
3150} 3181}
3151 3182
3152/* 3183/*
3153 * sched_exec - execve() is a valuable balancing opportunity, because at
3154 * this point the task has the smallest effective memory and cache footprint.
3155 */
3156void sched_exec(void)
3157{
3158 int new_cpu, this_cpu = get_cpu();
3159 new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0);
3160 put_cpu();
3161 if (new_cpu != this_cpu)
3162 sched_migrate_task(current, new_cpu);
3163}
3164
3165/*
3166 * pull_task - move a task from a remote runqueue to the local runqueue. 3184 * pull_task - move a task from a remote runqueue to the local runqueue.
3167 * Both runqueues must be locked. 3185 * Both runqueues must be locked.
3168 */ 3186 */
@@ -3172,10 +3190,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
3172 deactivate_task(src_rq, p, 0); 3190 deactivate_task(src_rq, p, 0);
3173 set_task_cpu(p, this_cpu); 3191 set_task_cpu(p, this_cpu);
3174 activate_task(this_rq, p, 0); 3192 activate_task(this_rq, p, 0);
3175 /*
3176 * Note that idle threads have a prio of MAX_PRIO, for this test
3177 * to be always true for them.
3178 */
3179 check_preempt_curr(this_rq, p, 0); 3193 check_preempt_curr(this_rq, p, 0);
3180} 3194}
3181 3195
@@ -4134,7 +4148,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4134 unsigned long flags; 4148 unsigned long flags;
4135 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); 4149 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4136 4150
4137 cpumask_copy(cpus, cpu_online_mask); 4151 cpumask_copy(cpus, cpu_active_mask);
4138 4152
4139 /* 4153 /*
4140 * When power savings policy is enabled for the parent domain, idle 4154 * When power savings policy is enabled for the parent domain, idle
@@ -4207,14 +4221,15 @@ redo:
4207 4221
4208 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { 4222 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
4209 4223
4210 spin_lock_irqsave(&busiest->lock, flags); 4224 raw_spin_lock_irqsave(&busiest->lock, flags);
4211 4225
4212 /* don't kick the migration_thread, if the curr 4226 /* don't kick the migration_thread, if the curr
4213 * task on busiest cpu can't be moved to this_cpu 4227 * task on busiest cpu can't be moved to this_cpu
4214 */ 4228 */
4215 if (!cpumask_test_cpu(this_cpu, 4229 if (!cpumask_test_cpu(this_cpu,
4216 &busiest->curr->cpus_allowed)) { 4230 &busiest->curr->cpus_allowed)) {
4217 spin_unlock_irqrestore(&busiest->lock, flags); 4231 raw_spin_unlock_irqrestore(&busiest->lock,
4232 flags);
4218 all_pinned = 1; 4233 all_pinned = 1;
4219 goto out_one_pinned; 4234 goto out_one_pinned;
4220 } 4235 }
@@ -4224,7 +4239,7 @@ redo:
4224 busiest->push_cpu = this_cpu; 4239 busiest->push_cpu = this_cpu;
4225 active_balance = 1; 4240 active_balance = 1;
4226 } 4241 }
4227 spin_unlock_irqrestore(&busiest->lock, flags); 4242 raw_spin_unlock_irqrestore(&busiest->lock, flags);
4228 if (active_balance) 4243 if (active_balance)
4229 wake_up_process(busiest->migration_thread); 4244 wake_up_process(busiest->migration_thread);
4230 4245
@@ -4297,7 +4312,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
4297 int all_pinned = 0; 4312 int all_pinned = 0;
4298 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); 4313 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4299 4314
4300 cpumask_copy(cpus, cpu_online_mask); 4315 cpumask_copy(cpus, cpu_active_mask);
4301 4316
4302 /* 4317 /*
4303 * When power savings policy is enabled for the parent domain, idle 4318 * When power savings policy is enabled for the parent domain, idle
@@ -4406,10 +4421,10 @@ redo:
4406 /* 4421 /*
4407 * Should not call ttwu while holding a rq->lock 4422 * Should not call ttwu while holding a rq->lock
4408 */ 4423 */
4409 spin_unlock(&this_rq->lock); 4424 raw_spin_unlock(&this_rq->lock);
4410 if (active_balance) 4425 if (active_balance)
4411 wake_up_process(busiest->migration_thread); 4426 wake_up_process(busiest->migration_thread);
4412 spin_lock(&this_rq->lock); 4427 raw_spin_lock(&this_rq->lock);
4413 4428
4414 } else 4429 } else
4415 sd->nr_balance_failed = 0; 4430 sd->nr_balance_failed = 0;
@@ -4694,7 +4709,7 @@ int select_nohz_load_balancer(int stop_tick)
4694 cpumask_set_cpu(cpu, nohz.cpu_mask); 4709 cpumask_set_cpu(cpu, nohz.cpu_mask);
4695 4710
4696 /* time for ilb owner also to sleep */ 4711 /* time for ilb owner also to sleep */
4697 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { 4712 if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
4698 if (atomic_read(&nohz.load_balancer) == cpu) 4713 if (atomic_read(&nohz.load_balancer) == cpu)
4699 atomic_set(&nohz.load_balancer, -1); 4714 atomic_set(&nohz.load_balancer, -1);
4700 return 0; 4715 return 0;
@@ -5278,11 +5293,11 @@ void scheduler_tick(void)
5278 5293
5279 sched_clock_tick(); 5294 sched_clock_tick();
5280 5295
5281 spin_lock(&rq->lock); 5296 raw_spin_lock(&rq->lock);
5282 update_rq_clock(rq); 5297 update_rq_clock(rq);
5283 update_cpu_load(rq); 5298 update_cpu_load(rq);
5284 curr->sched_class->task_tick(rq, curr, 0); 5299 curr->sched_class->task_tick(rq, curr, 0);
5285 spin_unlock(&rq->lock); 5300 raw_spin_unlock(&rq->lock);
5286 5301
5287 perf_event_task_tick(curr, cpu); 5302 perf_event_task_tick(curr, cpu);
5288 5303
@@ -5396,13 +5411,14 @@ static inline void schedule_debug(struct task_struct *prev)
5396#endif 5411#endif
5397} 5412}
5398 5413
5399static void put_prev_task(struct rq *rq, struct task_struct *p) 5414static void put_prev_task(struct rq *rq, struct task_struct *prev)
5400{ 5415{
5401 u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime; 5416 if (prev->state == TASK_RUNNING) {
5417 u64 runtime = prev->se.sum_exec_runtime;
5402 5418
5403 update_avg(&p->se.avg_running, runtime); 5419 runtime -= prev->se.prev_sum_exec_runtime;
5420 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
5404 5421
5405 if (p->state == TASK_RUNNING) {
5406 /* 5422 /*
5407 * In order to avoid avg_overlap growing stale when we are 5423 * In order to avoid avg_overlap growing stale when we are
5408 * indeed overlapping and hence not getting put to sleep, grow 5424 * indeed overlapping and hence not getting put to sleep, grow
@@ -5412,12 +5428,9 @@ static void put_prev_task(struct rq *rq, struct task_struct *p)
5412 * correlates to the amount of cache footprint a task can 5428 * correlates to the amount of cache footprint a task can
5413 * build up. 5429 * build up.
5414 */ 5430 */
5415 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); 5431 update_avg(&prev->se.avg_overlap, runtime);
5416 update_avg(&p->se.avg_overlap, runtime);
5417 } else {
5418 update_avg(&p->se.avg_running, 0);
5419 } 5432 }
5420 p->sched_class->put_prev_task(rq, p); 5433 prev->sched_class->put_prev_task(rq, prev);
5421} 5434}
5422 5435
5423/* 5436/*
@@ -5478,7 +5491,7 @@ need_resched_nonpreemptible:
5478 if (sched_feat(HRTICK)) 5491 if (sched_feat(HRTICK))
5479 hrtick_clear(rq); 5492 hrtick_clear(rq);
5480 5493
5481 spin_lock_irq(&rq->lock); 5494 raw_spin_lock_irq(&rq->lock);
5482 update_rq_clock(rq); 5495 update_rq_clock(rq);
5483 clear_tsk_need_resched(prev); 5496 clear_tsk_need_resched(prev);
5484 5497
@@ -5514,7 +5527,7 @@ need_resched_nonpreemptible:
5514 cpu = smp_processor_id(); 5527 cpu = smp_processor_id();
5515 rq = cpu_rq(cpu); 5528 rq = cpu_rq(cpu);
5516 } else 5529 } else
5517 spin_unlock_irq(&rq->lock); 5530 raw_spin_unlock_irq(&rq->lock);
5518 5531
5519 post_schedule(rq); 5532 post_schedule(rq);
5520 5533
@@ -5931,14 +5944,15 @@ EXPORT_SYMBOL(wait_for_completion_killable);
5931 */ 5944 */
5932bool try_wait_for_completion(struct completion *x) 5945bool try_wait_for_completion(struct completion *x)
5933{ 5946{
5947 unsigned long flags;
5934 int ret = 1; 5948 int ret = 1;
5935 5949
5936 spin_lock_irq(&x->wait.lock); 5950 spin_lock_irqsave(&x->wait.lock, flags);
5937 if (!x->done) 5951 if (!x->done)
5938 ret = 0; 5952 ret = 0;
5939 else 5953 else
5940 x->done--; 5954 x->done--;
5941 spin_unlock_irq(&x->wait.lock); 5955 spin_unlock_irqrestore(&x->wait.lock, flags);
5942 return ret; 5956 return ret;
5943} 5957}
5944EXPORT_SYMBOL(try_wait_for_completion); 5958EXPORT_SYMBOL(try_wait_for_completion);
@@ -5953,12 +5967,13 @@ EXPORT_SYMBOL(try_wait_for_completion);
5953 */ 5967 */
5954bool completion_done(struct completion *x) 5968bool completion_done(struct completion *x)
5955{ 5969{
5970 unsigned long flags;
5956 int ret = 1; 5971 int ret = 1;
5957 5972
5958 spin_lock_irq(&x->wait.lock); 5973 spin_lock_irqsave(&x->wait.lock, flags);
5959 if (!x->done) 5974 if (!x->done)
5960 ret = 0; 5975 ret = 0;
5961 spin_unlock_irq(&x->wait.lock); 5976 spin_unlock_irqrestore(&x->wait.lock, flags);
5962 return ret; 5977 return ret;
5963} 5978}
5964EXPORT_SYMBOL(completion_done); 5979EXPORT_SYMBOL(completion_done);
@@ -6343,7 +6358,7 @@ recheck:
6343 * make sure no PI-waiters arrive (or leave) while we are 6358 * make sure no PI-waiters arrive (or leave) while we are
6344 * changing the priority of the task: 6359 * changing the priority of the task:
6345 */ 6360 */
6346 spin_lock_irqsave(&p->pi_lock, flags); 6361 raw_spin_lock_irqsave(&p->pi_lock, flags);
6347 /* 6362 /*
6348 * To be able to change p->policy safely, the apropriate 6363 * To be able to change p->policy safely, the apropriate
6349 * runqueue lock must be held. 6364 * runqueue lock must be held.
@@ -6353,7 +6368,7 @@ recheck:
6353 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 6368 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
6354 policy = oldpolicy = -1; 6369 policy = oldpolicy = -1;
6355 __task_rq_unlock(rq); 6370 __task_rq_unlock(rq);
6356 spin_unlock_irqrestore(&p->pi_lock, flags); 6371 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
6357 goto recheck; 6372 goto recheck;
6358 } 6373 }
6359 update_rq_clock(rq); 6374 update_rq_clock(rq);
@@ -6377,7 +6392,7 @@ recheck:
6377 check_class_changed(rq, p, prev_class, oldprio, running); 6392 check_class_changed(rq, p, prev_class, oldprio, running);
6378 } 6393 }
6379 __task_rq_unlock(rq); 6394 __task_rq_unlock(rq);
6380 spin_unlock_irqrestore(&p->pi_lock, flags); 6395 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
6381 6396
6382 rt_mutex_adjust_pi(p); 6397 rt_mutex_adjust_pi(p);
6383 6398
@@ -6477,7 +6492,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
6477 return -EINVAL; 6492 return -EINVAL;
6478 6493
6479 retval = -ESRCH; 6494 retval = -ESRCH;
6480 read_lock(&tasklist_lock); 6495 rcu_read_lock();
6481 p = find_process_by_pid(pid); 6496 p = find_process_by_pid(pid);
6482 if (p) { 6497 if (p) {
6483 retval = security_task_getscheduler(p); 6498 retval = security_task_getscheduler(p);
@@ -6485,7 +6500,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
6485 retval = p->policy 6500 retval = p->policy
6486 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 6501 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
6487 } 6502 }
6488 read_unlock(&tasklist_lock); 6503 rcu_read_unlock();
6489 return retval; 6504 return retval;
6490} 6505}
6491 6506
@@ -6503,7 +6518,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6503 if (!param || pid < 0) 6518 if (!param || pid < 0)
6504 return -EINVAL; 6519 return -EINVAL;
6505 6520
6506 read_lock(&tasklist_lock); 6521 rcu_read_lock();
6507 p = find_process_by_pid(pid); 6522 p = find_process_by_pid(pid);
6508 retval = -ESRCH; 6523 retval = -ESRCH;
6509 if (!p) 6524 if (!p)
@@ -6514,7 +6529,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6514 goto out_unlock; 6529 goto out_unlock;
6515 6530
6516 lp.sched_priority = p->rt_priority; 6531 lp.sched_priority = p->rt_priority;
6517 read_unlock(&tasklist_lock); 6532 rcu_read_unlock();
6518 6533
6519 /* 6534 /*
6520 * This one might sleep, we cannot do it with a spinlock held ... 6535 * This one might sleep, we cannot do it with a spinlock held ...
@@ -6524,7 +6539,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6524 return retval; 6539 return retval;
6525 6540
6526out_unlock: 6541out_unlock:
6527 read_unlock(&tasklist_lock); 6542 rcu_read_unlock();
6528 return retval; 6543 return retval;
6529} 6544}
6530 6545
@@ -6535,22 +6550,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
6535 int retval; 6550 int retval;
6536 6551
6537 get_online_cpus(); 6552 get_online_cpus();
6538 read_lock(&tasklist_lock); 6553 rcu_read_lock();
6539 6554
6540 p = find_process_by_pid(pid); 6555 p = find_process_by_pid(pid);
6541 if (!p) { 6556 if (!p) {
6542 read_unlock(&tasklist_lock); 6557 rcu_read_unlock();
6543 put_online_cpus(); 6558 put_online_cpus();
6544 return -ESRCH; 6559 return -ESRCH;
6545 } 6560 }
6546 6561
6547 /* 6562 /* Prevent p going away */
6548 * It is not safe to call set_cpus_allowed with the
6549 * tasklist_lock held. We will bump the task_struct's
6550 * usage count and then drop tasklist_lock.
6551 */
6552 get_task_struct(p); 6563 get_task_struct(p);
6553 read_unlock(&tasklist_lock); 6564 rcu_read_unlock();
6554 6565
6555 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 6566 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
6556 retval = -ENOMEM; 6567 retval = -ENOMEM;
@@ -6631,10 +6642,12 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
6631long sched_getaffinity(pid_t pid, struct cpumask *mask) 6642long sched_getaffinity(pid_t pid, struct cpumask *mask)
6632{ 6643{
6633 struct task_struct *p; 6644 struct task_struct *p;
6645 unsigned long flags;
6646 struct rq *rq;
6634 int retval; 6647 int retval;
6635 6648
6636 get_online_cpus(); 6649 get_online_cpus();
6637 read_lock(&tasklist_lock); 6650 rcu_read_lock();
6638 6651
6639 retval = -ESRCH; 6652 retval = -ESRCH;
6640 p = find_process_by_pid(pid); 6653 p = find_process_by_pid(pid);
@@ -6645,10 +6658,12 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
6645 if (retval) 6658 if (retval)
6646 goto out_unlock; 6659 goto out_unlock;
6647 6660
6661 rq = task_rq_lock(p, &flags);
6648 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); 6662 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
6663 task_rq_unlock(rq, &flags);
6649 6664
6650out_unlock: 6665out_unlock:
6651 read_unlock(&tasklist_lock); 6666 rcu_read_unlock();
6652 put_online_cpus(); 6667 put_online_cpus();
6653 6668
6654 return retval; 6669 return retval;
@@ -6703,7 +6718,7 @@ SYSCALL_DEFINE0(sched_yield)
6703 */ 6718 */
6704 __release(rq->lock); 6719 __release(rq->lock);
6705 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 6720 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
6706 _raw_spin_unlock(&rq->lock); 6721 do_raw_spin_unlock(&rq->lock);
6707 preempt_enable_no_resched(); 6722 preempt_enable_no_resched();
6708 6723
6709 schedule(); 6724 schedule();
@@ -6883,6 +6898,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6883{ 6898{
6884 struct task_struct *p; 6899 struct task_struct *p;
6885 unsigned int time_slice; 6900 unsigned int time_slice;
6901 unsigned long flags;
6902 struct rq *rq;
6886 int retval; 6903 int retval;
6887 struct timespec t; 6904 struct timespec t;
6888 6905
@@ -6890,7 +6907,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6890 return -EINVAL; 6907 return -EINVAL;
6891 6908
6892 retval = -ESRCH; 6909 retval = -ESRCH;
6893 read_lock(&tasklist_lock); 6910 rcu_read_lock();
6894 p = find_process_by_pid(pid); 6911 p = find_process_by_pid(pid);
6895 if (!p) 6912 if (!p)
6896 goto out_unlock; 6913 goto out_unlock;
@@ -6899,15 +6916,17 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6899 if (retval) 6916 if (retval)
6900 goto out_unlock; 6917 goto out_unlock;
6901 6918
6902 time_slice = p->sched_class->get_rr_interval(p); 6919 rq = task_rq_lock(p, &flags);
6920 time_slice = p->sched_class->get_rr_interval(rq, p);
6921 task_rq_unlock(rq, &flags);
6903 6922
6904 read_unlock(&tasklist_lock); 6923 rcu_read_unlock();
6905 jiffies_to_timespec(time_slice, &t); 6924 jiffies_to_timespec(time_slice, &t);
6906 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 6925 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
6907 return retval; 6926 return retval;
6908 6927
6909out_unlock: 6928out_unlock:
6910 read_unlock(&tasklist_lock); 6929 rcu_read_unlock();
6911 return retval; 6930 return retval;
6912} 6931}
6913 6932
@@ -6995,12 +7014,12 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
6995 struct rq *rq = cpu_rq(cpu); 7014 struct rq *rq = cpu_rq(cpu);
6996 unsigned long flags; 7015 unsigned long flags;
6997 7016
6998 spin_lock_irqsave(&rq->lock, flags); 7017 raw_spin_lock_irqsave(&rq->lock, flags);
6999 7018
7000 __sched_fork(idle); 7019 __sched_fork(idle);
7020 idle->state = TASK_RUNNING;
7001 idle->se.exec_start = sched_clock(); 7021 idle->se.exec_start = sched_clock();
7002 7022
7003 idle->prio = idle->normal_prio = MAX_PRIO;
7004 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); 7023 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
7005 __set_task_cpu(idle, cpu); 7024 __set_task_cpu(idle, cpu);
7006 7025
@@ -7008,7 +7027,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
7008#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 7027#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
7009 idle->oncpu = 1; 7028 idle->oncpu = 1;
7010#endif 7029#endif
7011 spin_unlock_irqrestore(&rq->lock, flags); 7030 raw_spin_unlock_irqrestore(&rq->lock, flags);
7012 7031
7013 /* Set the preempt count _outside_ the spinlocks! */ 7032 /* Set the preempt count _outside_ the spinlocks! */
7014#if defined(CONFIG_PREEMPT) 7033#if defined(CONFIG_PREEMPT)
@@ -7041,22 +7060,43 @@ cpumask_var_t nohz_cpu_mask;
7041 * 7060 *
7042 * This idea comes from the SD scheduler of Con Kolivas: 7061 * This idea comes from the SD scheduler of Con Kolivas:
7043 */ 7062 */
7044static inline void sched_init_granularity(void) 7063static int get_update_sysctl_factor(void)
7045{ 7064{
7046 unsigned int factor = 1 + ilog2(num_online_cpus()); 7065 unsigned int cpus = min_t(int, num_online_cpus(), 8);
7047 const unsigned long limit = 200000000; 7066 unsigned int factor;
7067
7068 switch (sysctl_sched_tunable_scaling) {
7069 case SCHED_TUNABLESCALING_NONE:
7070 factor = 1;
7071 break;
7072 case SCHED_TUNABLESCALING_LINEAR:
7073 factor = cpus;
7074 break;
7075 case SCHED_TUNABLESCALING_LOG:
7076 default:
7077 factor = 1 + ilog2(cpus);
7078 break;
7079 }
7048 7080
7049 sysctl_sched_min_granularity *= factor; 7081 return factor;
7050 if (sysctl_sched_min_granularity > limit) 7082}
7051 sysctl_sched_min_granularity = limit;
7052 7083
7053 sysctl_sched_latency *= factor; 7084static void update_sysctl(void)
7054 if (sysctl_sched_latency > limit) 7085{
7055 sysctl_sched_latency = limit; 7086 unsigned int factor = get_update_sysctl_factor();
7056 7087
7057 sysctl_sched_wakeup_granularity *= factor; 7088#define SET_SYSCTL(name) \
7089 (sysctl_##name = (factor) * normalized_sysctl_##name)
7090 SET_SYSCTL(sched_min_granularity);
7091 SET_SYSCTL(sched_latency);
7092 SET_SYSCTL(sched_wakeup_granularity);
7093 SET_SYSCTL(sched_shares_ratelimit);
7094#undef SET_SYSCTL
7095}
7058 7096
7059 sysctl_sched_shares_ratelimit *= factor; 7097static inline void sched_init_granularity(void)
7098{
7099 update_sysctl();
7060} 7100}
7061 7101
7062#ifdef CONFIG_SMP 7102#ifdef CONFIG_SMP
@@ -7092,8 +7132,24 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
7092 struct rq *rq; 7132 struct rq *rq;
7093 int ret = 0; 7133 int ret = 0;
7094 7134
7135 /*
7136 * Since we rely on wake-ups to migrate sleeping tasks, don't change
7137 * the ->cpus_allowed mask from under waking tasks, which would be
7138 * possible when we change rq->lock in ttwu(), so synchronize against
7139 * TASK_WAKING to avoid that.
7140 */
7141again:
7142 while (p->state == TASK_WAKING)
7143 cpu_relax();
7144
7095 rq = task_rq_lock(p, &flags); 7145 rq = task_rq_lock(p, &flags);
7096 if (!cpumask_intersects(new_mask, cpu_online_mask)) { 7146
7147 if (p->state == TASK_WAKING) {
7148 task_rq_unlock(rq, &flags);
7149 goto again;
7150 }
7151
7152 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
7097 ret = -EINVAL; 7153 ret = -EINVAL;
7098 goto out; 7154 goto out;
7099 } 7155 }
@@ -7115,7 +7171,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
7115 if (cpumask_test_cpu(task_cpu(p), new_mask)) 7171 if (cpumask_test_cpu(task_cpu(p), new_mask))
7116 goto out; 7172 goto out;
7117 7173
7118 if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { 7174 if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
7119 /* Need help from migration thread: drop lock and wait. */ 7175 /* Need help from migration thread: drop lock and wait. */
7120 struct task_struct *mt = rq->migration_thread; 7176 struct task_struct *mt = rq->migration_thread;
7121 7177
@@ -7148,7 +7204,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
7148static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) 7204static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
7149{ 7205{
7150 struct rq *rq_dest, *rq_src; 7206 struct rq *rq_dest, *rq_src;
7151 int ret = 0, on_rq; 7207 int ret = 0;
7152 7208
7153 if (unlikely(!cpu_active(dest_cpu))) 7209 if (unlikely(!cpu_active(dest_cpu)))
7154 return ret; 7210 return ret;
@@ -7164,12 +7220,13 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
7164 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 7220 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
7165 goto fail; 7221 goto fail;
7166 7222
7167 on_rq = p->se.on_rq; 7223 /*
7168 if (on_rq) 7224 * If we're not on a rq, the next wake-up will ensure we're
7225 * placed properly.
7226 */
7227 if (p->se.on_rq) {
7169 deactivate_task(rq_src, p, 0); 7228 deactivate_task(rq_src, p, 0);
7170 7229 set_task_cpu(p, dest_cpu);
7171 set_task_cpu(p, dest_cpu);
7172 if (on_rq) {
7173 activate_task(rq_dest, p, 0); 7230 activate_task(rq_dest, p, 0);
7174 check_preempt_curr(rq_dest, p, 0); 7231 check_preempt_curr(rq_dest, p, 0);
7175 } 7232 }
@@ -7204,10 +7261,10 @@ static int migration_thread(void *data)
7204 struct migration_req *req; 7261 struct migration_req *req;
7205 struct list_head *head; 7262 struct list_head *head;
7206 7263
7207 spin_lock_irq(&rq->lock); 7264 raw_spin_lock_irq(&rq->lock);
7208 7265
7209 if (cpu_is_offline(cpu)) { 7266 if (cpu_is_offline(cpu)) {
7210 spin_unlock_irq(&rq->lock); 7267 raw_spin_unlock_irq(&rq->lock);
7211 break; 7268 break;
7212 } 7269 }
7213 7270
@@ -7219,7 +7276,7 @@ static int migration_thread(void *data)
7219 head = &rq->migration_queue; 7276 head = &rq->migration_queue;
7220 7277
7221 if (list_empty(head)) { 7278 if (list_empty(head)) {
7222 spin_unlock_irq(&rq->lock); 7279 raw_spin_unlock_irq(&rq->lock);
7223 schedule(); 7280 schedule();
7224 set_current_state(TASK_INTERRUPTIBLE); 7281 set_current_state(TASK_INTERRUPTIBLE);
7225 continue; 7282 continue;
@@ -7228,14 +7285,14 @@ static int migration_thread(void *data)
7228 list_del_init(head->next); 7285 list_del_init(head->next);
7229 7286
7230 if (req->task != NULL) { 7287 if (req->task != NULL) {
7231 spin_unlock(&rq->lock); 7288 raw_spin_unlock(&rq->lock);
7232 __migrate_task(req->task, cpu, req->dest_cpu); 7289 __migrate_task(req->task, cpu, req->dest_cpu);
7233 } else if (likely(cpu == (badcpu = smp_processor_id()))) { 7290 } else if (likely(cpu == (badcpu = smp_processor_id()))) {
7234 req->dest_cpu = RCU_MIGRATION_GOT_QS; 7291 req->dest_cpu = RCU_MIGRATION_GOT_QS;
7235 spin_unlock(&rq->lock); 7292 raw_spin_unlock(&rq->lock);
7236 } else { 7293 } else {
7237 req->dest_cpu = RCU_MIGRATION_MUST_SYNC; 7294 req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
7238 spin_unlock(&rq->lock); 7295 raw_spin_unlock(&rq->lock);
7239 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); 7296 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
7240 } 7297 }
7241 local_irq_enable(); 7298 local_irq_enable();
@@ -7265,37 +7322,10 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
7265static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 7322static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
7266{ 7323{
7267 int dest_cpu; 7324 int dest_cpu;
7268 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
7269 7325
7270again: 7326again:
7271 /* Look for allowed, online CPU in same node. */ 7327 dest_cpu = select_fallback_rq(dead_cpu, p);
7272 for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
7273 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
7274 goto move;
7275
7276 /* Any allowed, online CPU? */
7277 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
7278 if (dest_cpu < nr_cpu_ids)
7279 goto move;
7280 7328
7281 /* No more Mr. Nice Guy. */
7282 if (dest_cpu >= nr_cpu_ids) {
7283 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
7284 dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
7285
7286 /*
7287 * Don't tell them about moving exiting tasks or
7288 * kernel threads (both mm NULL), since they never
7289 * leave kernel.
7290 */
7291 if (p->mm && printk_ratelimit()) {
7292 printk(KERN_INFO "process %d (%s) no "
7293 "longer affine to cpu%d\n",
7294 task_pid_nr(p), p->comm, dead_cpu);
7295 }
7296 }
7297
7298move:
7299 /* It can have affinity changed while we were choosing. */ 7329 /* It can have affinity changed while we were choosing. */
7300 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) 7330 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
7301 goto again; 7331 goto again;
@@ -7310,7 +7340,7 @@ move:
7310 */ 7340 */
7311static void migrate_nr_uninterruptible(struct rq *rq_src) 7341static void migrate_nr_uninterruptible(struct rq *rq_src)
7312{ 7342{
7313 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); 7343 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
7314 unsigned long flags; 7344 unsigned long flags;
7315 7345
7316 local_irq_save(flags); 7346 local_irq_save(flags);
@@ -7358,14 +7388,14 @@ void sched_idle_next(void)
7358 * Strictly not necessary since rest of the CPUs are stopped by now 7388 * Strictly not necessary since rest of the CPUs are stopped by now
7359 * and interrupts disabled on the current cpu. 7389 * and interrupts disabled on the current cpu.
7360 */ 7390 */
7361 spin_lock_irqsave(&rq->lock, flags); 7391 raw_spin_lock_irqsave(&rq->lock, flags);
7362 7392
7363 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 7393 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
7364 7394
7365 update_rq_clock(rq); 7395 update_rq_clock(rq);
7366 activate_task(rq, p, 0); 7396 activate_task(rq, p, 0);
7367 7397
7368 spin_unlock_irqrestore(&rq->lock, flags); 7398 raw_spin_unlock_irqrestore(&rq->lock, flags);
7369} 7399}
7370 7400
7371/* 7401/*
@@ -7401,9 +7431,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
7401 * that's OK. No task can be added to this CPU, so iteration is 7431 * that's OK. No task can be added to this CPU, so iteration is
7402 * fine. 7432 * fine.
7403 */ 7433 */
7404 spin_unlock_irq(&rq->lock); 7434 raw_spin_unlock_irq(&rq->lock);
7405 move_task_off_dead_cpu(dead_cpu, p); 7435 move_task_off_dead_cpu(dead_cpu, p);
7406 spin_lock_irq(&rq->lock); 7436 raw_spin_lock_irq(&rq->lock);
7407 7437
7408 put_task_struct(p); 7438 put_task_struct(p);
7409} 7439}
@@ -7563,7 +7593,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
7563static struct ctl_table_header *sd_sysctl_header; 7593static struct ctl_table_header *sd_sysctl_header;
7564static void register_sched_domain_sysctl(void) 7594static void register_sched_domain_sysctl(void)
7565{ 7595{
7566 int i, cpu_num = num_online_cpus(); 7596 int i, cpu_num = num_possible_cpus();
7567 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); 7597 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
7568 char buf[32]; 7598 char buf[32];
7569 7599
@@ -7573,7 +7603,7 @@ static void register_sched_domain_sysctl(void)
7573 if (entry == NULL) 7603 if (entry == NULL)
7574 return; 7604 return;
7575 7605
7576 for_each_online_cpu(i) { 7606 for_each_possible_cpu(i) {
7577 snprintf(buf, 32, "cpu%d", i); 7607 snprintf(buf, 32, "cpu%d", i);
7578 entry->procname = kstrdup(buf, GFP_KERNEL); 7608 entry->procname = kstrdup(buf, GFP_KERNEL);
7579 entry->mode = 0555; 7609 entry->mode = 0555;
@@ -7669,13 +7699,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7669 7699
7670 /* Update our root-domain */ 7700 /* Update our root-domain */
7671 rq = cpu_rq(cpu); 7701 rq = cpu_rq(cpu);
7672 spin_lock_irqsave(&rq->lock, flags); 7702 raw_spin_lock_irqsave(&rq->lock, flags);
7673 if (rq->rd) { 7703 if (rq->rd) {
7674 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7704 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7675 7705
7676 set_rq_online(rq); 7706 set_rq_online(rq);
7677 } 7707 }
7678 spin_unlock_irqrestore(&rq->lock, flags); 7708 raw_spin_unlock_irqrestore(&rq->lock, flags);
7679 break; 7709 break;
7680 7710
7681#ifdef CONFIG_HOTPLUG_CPU 7711#ifdef CONFIG_HOTPLUG_CPU
@@ -7700,14 +7730,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7700 put_task_struct(rq->migration_thread); 7730 put_task_struct(rq->migration_thread);
7701 rq->migration_thread = NULL; 7731 rq->migration_thread = NULL;
7702 /* Idle task back to normal (off runqueue, low prio) */ 7732 /* Idle task back to normal (off runqueue, low prio) */
7703 spin_lock_irq(&rq->lock); 7733 raw_spin_lock_irq(&rq->lock);
7704 update_rq_clock(rq); 7734 update_rq_clock(rq);
7705 deactivate_task(rq, rq->idle, 0); 7735 deactivate_task(rq, rq->idle, 0);
7706 rq->idle->static_prio = MAX_PRIO;
7707 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); 7736 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
7708 rq->idle->sched_class = &idle_sched_class; 7737 rq->idle->sched_class = &idle_sched_class;
7709 migrate_dead_tasks(cpu); 7738 migrate_dead_tasks(cpu);
7710 spin_unlock_irq(&rq->lock); 7739 raw_spin_unlock_irq(&rq->lock);
7711 cpuset_unlock(); 7740 cpuset_unlock();
7712 migrate_nr_uninterruptible(rq); 7741 migrate_nr_uninterruptible(rq);
7713 BUG_ON(rq->nr_running != 0); 7742 BUG_ON(rq->nr_running != 0);
@@ -7717,30 +7746,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7717 * they didn't take sched_hotcpu_mutex. Just wake up 7746 * they didn't take sched_hotcpu_mutex. Just wake up
7718 * the requestors. 7747 * the requestors.
7719 */ 7748 */
7720 spin_lock_irq(&rq->lock); 7749 raw_spin_lock_irq(&rq->lock);
7721 while (!list_empty(&rq->migration_queue)) { 7750 while (!list_empty(&rq->migration_queue)) {
7722 struct migration_req *req; 7751 struct migration_req *req;
7723 7752
7724 req = list_entry(rq->migration_queue.next, 7753 req = list_entry(rq->migration_queue.next,
7725 struct migration_req, list); 7754 struct migration_req, list);
7726 list_del_init(&req->list); 7755 list_del_init(&req->list);
7727 spin_unlock_irq(&rq->lock); 7756 raw_spin_unlock_irq(&rq->lock);
7728 complete(&req->done); 7757 complete(&req->done);
7729 spin_lock_irq(&rq->lock); 7758 raw_spin_lock_irq(&rq->lock);
7730 } 7759 }
7731 spin_unlock_irq(&rq->lock); 7760 raw_spin_unlock_irq(&rq->lock);
7732 break; 7761 break;
7733 7762
7734 case CPU_DYING: 7763 case CPU_DYING:
7735 case CPU_DYING_FROZEN: 7764 case CPU_DYING_FROZEN:
7736 /* Update our root-domain */ 7765 /* Update our root-domain */
7737 rq = cpu_rq(cpu); 7766 rq = cpu_rq(cpu);
7738 spin_lock_irqsave(&rq->lock, flags); 7767 raw_spin_lock_irqsave(&rq->lock, flags);
7739 if (rq->rd) { 7768 if (rq->rd) {
7740 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7769 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7741 set_rq_offline(rq); 7770 set_rq_offline(rq);
7742 } 7771 }
7743 spin_unlock_irqrestore(&rq->lock, flags); 7772 raw_spin_unlock_irqrestore(&rq->lock, flags);
7744 break; 7773 break;
7745#endif 7774#endif
7746 } 7775 }
@@ -7970,7 +7999,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7970 struct root_domain *old_rd = NULL; 7999 struct root_domain *old_rd = NULL;
7971 unsigned long flags; 8000 unsigned long flags;
7972 8001
7973 spin_lock_irqsave(&rq->lock, flags); 8002 raw_spin_lock_irqsave(&rq->lock, flags);
7974 8003
7975 if (rq->rd) { 8004 if (rq->rd) {
7976 old_rd = rq->rd; 8005 old_rd = rq->rd;
@@ -7996,7 +8025,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7996 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 8025 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
7997 set_rq_online(rq); 8026 set_rq_online(rq);
7998 8027
7999 spin_unlock_irqrestore(&rq->lock, flags); 8028 raw_spin_unlock_irqrestore(&rq->lock, flags);
8000 8029
8001 if (old_rd) 8030 if (old_rd)
8002 free_rootdomain(old_rd); 8031 free_rootdomain(old_rd);
@@ -8282,14 +8311,14 @@ enum s_alloc {
8282 */ 8311 */
8283#ifdef CONFIG_SCHED_SMT 8312#ifdef CONFIG_SCHED_SMT
8284static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); 8313static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
8285static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); 8314static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
8286 8315
8287static int 8316static int
8288cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, 8317cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
8289 struct sched_group **sg, struct cpumask *unused) 8318 struct sched_group **sg, struct cpumask *unused)
8290{ 8319{
8291 if (sg) 8320 if (sg)
8292 *sg = &per_cpu(sched_group_cpus, cpu).sg; 8321 *sg = &per_cpu(sched_groups, cpu).sg;
8293 return cpu; 8322 return cpu;
8294} 8323}
8295#endif /* CONFIG_SCHED_SMT */ 8324#endif /* CONFIG_SCHED_SMT */
@@ -9099,7 +9128,7 @@ match1:
9099 if (doms_new == NULL) { 9128 if (doms_new == NULL) {
9100 ndoms_cur = 0; 9129 ndoms_cur = 0;
9101 doms_new = &fallback_doms; 9130 doms_new = &fallback_doms;
9102 cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map); 9131 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
9103 WARN_ON_ONCE(dattr_new); 9132 WARN_ON_ONCE(dattr_new);
9104 } 9133 }
9105 9134
@@ -9230,8 +9259,10 @@ static int update_sched_domains(struct notifier_block *nfb,
9230 switch (action) { 9259 switch (action) {
9231 case CPU_ONLINE: 9260 case CPU_ONLINE:
9232 case CPU_ONLINE_FROZEN: 9261 case CPU_ONLINE_FROZEN:
9233 case CPU_DEAD: 9262 case CPU_DOWN_PREPARE:
9234 case CPU_DEAD_FROZEN: 9263 case CPU_DOWN_PREPARE_FROZEN:
9264 case CPU_DOWN_FAILED:
9265 case CPU_DOWN_FAILED_FROZEN:
9235 partition_sched_domains(1, NULL, NULL); 9266 partition_sched_domains(1, NULL, NULL);
9236 return NOTIFY_OK; 9267 return NOTIFY_OK;
9237 9268
@@ -9278,7 +9309,7 @@ void __init sched_init_smp(void)
9278#endif 9309#endif
9279 get_online_cpus(); 9310 get_online_cpus();
9280 mutex_lock(&sched_domains_mutex); 9311 mutex_lock(&sched_domains_mutex);
9281 arch_init_sched_domains(cpu_online_mask); 9312 arch_init_sched_domains(cpu_active_mask);
9282 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); 9313 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
9283 if (cpumask_empty(non_isolated_cpus)) 9314 if (cpumask_empty(non_isolated_cpus))
9284 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); 9315 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
@@ -9351,13 +9382,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
9351#ifdef CONFIG_SMP 9382#ifdef CONFIG_SMP
9352 rt_rq->rt_nr_migratory = 0; 9383 rt_rq->rt_nr_migratory = 0;
9353 rt_rq->overloaded = 0; 9384 rt_rq->overloaded = 0;
9354 plist_head_init(&rt_rq->pushable_tasks, &rq->lock); 9385 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
9355#endif 9386#endif
9356 9387
9357 rt_rq->rt_time = 0; 9388 rt_rq->rt_time = 0;
9358 rt_rq->rt_throttled = 0; 9389 rt_rq->rt_throttled = 0;
9359 rt_rq->rt_runtime = 0; 9390 rt_rq->rt_runtime = 0;
9360 spin_lock_init(&rt_rq->rt_runtime_lock); 9391 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
9361 9392
9362#ifdef CONFIG_RT_GROUP_SCHED 9393#ifdef CONFIG_RT_GROUP_SCHED
9363 rt_rq->rt_nr_boosted = 0; 9394 rt_rq->rt_nr_boosted = 0;
@@ -9517,7 +9548,7 @@ void __init sched_init(void)
9517 struct rq *rq; 9548 struct rq *rq;
9518 9549
9519 rq = cpu_rq(i); 9550 rq = cpu_rq(i);
9520 spin_lock_init(&rq->lock); 9551 raw_spin_lock_init(&rq->lock);
9521 rq->nr_running = 0; 9552 rq->nr_running = 0;
9522 rq->calc_load_active = 0; 9553 rq->calc_load_active = 0;
9523 rq->calc_load_update = jiffies + LOAD_FREQ; 9554 rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -9577,7 +9608,7 @@ void __init sched_init(void)
9577#elif defined CONFIG_USER_SCHED 9608#elif defined CONFIG_USER_SCHED
9578 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); 9609 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
9579 init_tg_rt_entry(&init_task_group, 9610 init_tg_rt_entry(&init_task_group,
9580 &per_cpu(init_rt_rq, i), 9611 &per_cpu(init_rt_rq_var, i),
9581 &per_cpu(init_sched_rt_entity, i), i, 1, 9612 &per_cpu(init_sched_rt_entity, i), i, 1,
9582 root_task_group.rt_se[i]); 9613 root_task_group.rt_se[i]);
9583#endif 9614#endif
@@ -9615,7 +9646,7 @@ void __init sched_init(void)
9615#endif 9646#endif
9616 9647
9617#ifdef CONFIG_RT_MUTEXES 9648#ifdef CONFIG_RT_MUTEXES
9618 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); 9649 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
9619#endif 9650#endif
9620 9651
9621 /* 9652 /*
@@ -9659,7 +9690,7 @@ void __init sched_init(void)
9659#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 9690#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
9660static inline int preempt_count_equals(int preempt_offset) 9691static inline int preempt_count_equals(int preempt_offset)
9661{ 9692{
9662 int nested = preempt_count() & ~PREEMPT_ACTIVE; 9693 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
9663 9694
9664 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); 9695 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
9665} 9696}
@@ -9740,13 +9771,13 @@ void normalize_rt_tasks(void)
9740 continue; 9771 continue;
9741 } 9772 }
9742 9773
9743 spin_lock(&p->pi_lock); 9774 raw_spin_lock(&p->pi_lock);
9744 rq = __task_rq_lock(p); 9775 rq = __task_rq_lock(p);
9745 9776
9746 normalize_task(rq, p); 9777 normalize_task(rq, p);
9747 9778
9748 __task_rq_unlock(rq); 9779 __task_rq_unlock(rq);
9749 spin_unlock(&p->pi_lock); 9780 raw_spin_unlock(&p->pi_lock);
9750 } while_each_thread(g, p); 9781 } while_each_thread(g, p);
9751 9782
9752 read_unlock_irqrestore(&tasklist_lock, flags); 9783 read_unlock_irqrestore(&tasklist_lock, flags);
@@ -9842,13 +9873,15 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
9842 se = kzalloc_node(sizeof(struct sched_entity), 9873 se = kzalloc_node(sizeof(struct sched_entity),
9843 GFP_KERNEL, cpu_to_node(i)); 9874 GFP_KERNEL, cpu_to_node(i));
9844 if (!se) 9875 if (!se)
9845 goto err; 9876 goto err_free_rq;
9846 9877
9847 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); 9878 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
9848 } 9879 }
9849 9880
9850 return 1; 9881 return 1;
9851 9882
9883 err_free_rq:
9884 kfree(cfs_rq);
9852 err: 9885 err:
9853 return 0; 9886 return 0;
9854} 9887}
@@ -9930,13 +9963,15 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
9930 rt_se = kzalloc_node(sizeof(struct sched_rt_entity), 9963 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
9931 GFP_KERNEL, cpu_to_node(i)); 9964 GFP_KERNEL, cpu_to_node(i));
9932 if (!rt_se) 9965 if (!rt_se)
9933 goto err; 9966 goto err_free_rq;
9934 9967
9935 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); 9968 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
9936 } 9969 }
9937 9970
9938 return 1; 9971 return 1;
9939 9972
9973 err_free_rq:
9974 kfree(rt_rq);
9940 err: 9975 err:
9941 return 0; 9976 return 0;
9942} 9977}
@@ -10070,7 +10105,7 @@ void sched_move_task(struct task_struct *tsk)
10070 10105
10071#ifdef CONFIG_FAIR_GROUP_SCHED 10106#ifdef CONFIG_FAIR_GROUP_SCHED
10072 if (tsk->sched_class->moved_group) 10107 if (tsk->sched_class->moved_group)
10073 tsk->sched_class->moved_group(tsk); 10108 tsk->sched_class->moved_group(tsk, on_rq);
10074#endif 10109#endif
10075 10110
10076 if (unlikely(running)) 10111 if (unlikely(running))
@@ -10105,9 +10140,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
10105 struct rq *rq = cfs_rq->rq; 10140 struct rq *rq = cfs_rq->rq;
10106 unsigned long flags; 10141 unsigned long flags;
10107 10142
10108 spin_lock_irqsave(&rq->lock, flags); 10143 raw_spin_lock_irqsave(&rq->lock, flags);
10109 __set_se_shares(se, shares); 10144 __set_se_shares(se, shares);
10110 spin_unlock_irqrestore(&rq->lock, flags); 10145 raw_spin_unlock_irqrestore(&rq->lock, flags);
10111} 10146}
10112 10147
10113static DEFINE_MUTEX(shares_mutex); 10148static DEFINE_MUTEX(shares_mutex);
@@ -10292,18 +10327,18 @@ static int tg_set_bandwidth(struct task_group *tg,
10292 if (err) 10327 if (err)
10293 goto unlock; 10328 goto unlock;
10294 10329
10295 spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 10330 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
10296 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 10331 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
10297 tg->rt_bandwidth.rt_runtime = rt_runtime; 10332 tg->rt_bandwidth.rt_runtime = rt_runtime;
10298 10333
10299 for_each_possible_cpu(i) { 10334 for_each_possible_cpu(i) {
10300 struct rt_rq *rt_rq = tg->rt_rq[i]; 10335 struct rt_rq *rt_rq = tg->rt_rq[i];
10301 10336
10302 spin_lock(&rt_rq->rt_runtime_lock); 10337 raw_spin_lock(&rt_rq->rt_runtime_lock);
10303 rt_rq->rt_runtime = rt_runtime; 10338 rt_rq->rt_runtime = rt_runtime;
10304 spin_unlock(&rt_rq->rt_runtime_lock); 10339 raw_spin_unlock(&rt_rq->rt_runtime_lock);
10305 } 10340 }
10306 spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 10341 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
10307 unlock: 10342 unlock:
10308 read_unlock(&tasklist_lock); 10343 read_unlock(&tasklist_lock);
10309 mutex_unlock(&rt_constraints_mutex); 10344 mutex_unlock(&rt_constraints_mutex);
@@ -10408,15 +10443,15 @@ static int sched_rt_global_constraints(void)
10408 if (sysctl_sched_rt_runtime == 0) 10443 if (sysctl_sched_rt_runtime == 0)
10409 return -EBUSY; 10444 return -EBUSY;
10410 10445
10411 spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 10446 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
10412 for_each_possible_cpu(i) { 10447 for_each_possible_cpu(i) {
10413 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 10448 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
10414 10449
10415 spin_lock(&rt_rq->rt_runtime_lock); 10450 raw_spin_lock(&rt_rq->rt_runtime_lock);
10416 rt_rq->rt_runtime = global_rt_runtime(); 10451 rt_rq->rt_runtime = global_rt_runtime();
10417 spin_unlock(&rt_rq->rt_runtime_lock); 10452 raw_spin_unlock(&rt_rq->rt_runtime_lock);
10418 } 10453 }
10419 spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 10454 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
10420 10455
10421 return 0; 10456 return 0;
10422} 10457}
@@ -10707,9 +10742,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
10707 /* 10742 /*
10708 * Take rq->lock to make 64-bit read safe on 32-bit platforms. 10743 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
10709 */ 10744 */
10710 spin_lock_irq(&cpu_rq(cpu)->lock); 10745 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
10711 data = *cpuusage; 10746 data = *cpuusage;
10712 spin_unlock_irq(&cpu_rq(cpu)->lock); 10747 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
10713#else 10748#else
10714 data = *cpuusage; 10749 data = *cpuusage;
10715#endif 10750#endif
@@ -10725,9 +10760,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
10725 /* 10760 /*
10726 * Take rq->lock to make 64-bit write safe on 32-bit platforms. 10761 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
10727 */ 10762 */
10728 spin_lock_irq(&cpu_rq(cpu)->lock); 10763 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
10729 *cpuusage = val; 10764 *cpuusage = val;
10730 spin_unlock_irq(&cpu_rq(cpu)->lock); 10765 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
10731#else 10766#else
10732 *cpuusage = val; 10767 *cpuusage = val;
10733#endif 10768#endif
@@ -10961,9 +10996,9 @@ void synchronize_sched_expedited(void)
10961 init_completion(&req->done); 10996 init_completion(&req->done);
10962 req->task = NULL; 10997 req->task = NULL;
10963 req->dest_cpu = RCU_MIGRATION_NEED_QS; 10998 req->dest_cpu = RCU_MIGRATION_NEED_QS;
10964 spin_lock_irqsave(&rq->lock, flags); 10999 raw_spin_lock_irqsave(&rq->lock, flags);
10965 list_add(&req->list, &rq->migration_queue); 11000 list_add(&req->list, &rq->migration_queue);
10966 spin_unlock_irqrestore(&rq->lock, flags); 11001 raw_spin_unlock_irqrestore(&rq->lock, flags);
10967 wake_up_process(rq->migration_thread); 11002 wake_up_process(rq->migration_thread);
10968 } 11003 }
10969 for_each_online_cpu(cpu) { 11004 for_each_online_cpu(cpu) {
@@ -10971,11 +11006,11 @@ void synchronize_sched_expedited(void)
10971 req = &per_cpu(rcu_migration_req, cpu); 11006 req = &per_cpu(rcu_migration_req, cpu);
10972 rq = cpu_rq(cpu); 11007 rq = cpu_rq(cpu);
10973 wait_for_completion(&req->done); 11008 wait_for_completion(&req->done);
10974 spin_lock_irqsave(&rq->lock, flags); 11009 raw_spin_lock_irqsave(&rq->lock, flags);
10975 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) 11010 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
10976 need_full_sync = 1; 11011 need_full_sync = 1;
10977 req->dest_cpu = RCU_MIGRATION_IDLE; 11012 req->dest_cpu = RCU_MIGRATION_IDLE;
10978 spin_unlock_irqrestore(&rq->lock, flags); 11013 raw_spin_unlock_irqrestore(&rq->lock, flags);
10979 } 11014 }
10980 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; 11015 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
10981 synchronize_sched_expedited_count++; 11016 synchronize_sched_expedited_count++;