diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 454 |
1 files changed, 233 insertions, 221 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index aa31244caa9f..18cceeecce35 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -141,7 +141,7 @@ struct rt_prio_array { | |||
141 | 141 | ||
142 | struct rt_bandwidth { | 142 | struct rt_bandwidth { |
143 | /* nests inside the rq lock: */ | 143 | /* nests inside the rq lock: */ |
144 | spinlock_t rt_runtime_lock; | 144 | raw_spinlock_t rt_runtime_lock; |
145 | ktime_t rt_period; | 145 | ktime_t rt_period; |
146 | u64 rt_runtime; | 146 | u64 rt_runtime; |
147 | struct hrtimer rt_period_timer; | 147 | struct hrtimer rt_period_timer; |
@@ -178,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) | |||
178 | rt_b->rt_period = ns_to_ktime(period); | 178 | rt_b->rt_period = ns_to_ktime(period); |
179 | rt_b->rt_runtime = runtime; | 179 | rt_b->rt_runtime = runtime; |
180 | 180 | ||
181 | spin_lock_init(&rt_b->rt_runtime_lock); | 181 | raw_spin_lock_init(&rt_b->rt_runtime_lock); |
182 | 182 | ||
183 | hrtimer_init(&rt_b->rt_period_timer, | 183 | hrtimer_init(&rt_b->rt_period_timer, |
184 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 184 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
@@ -200,7 +200,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
200 | if (hrtimer_active(&rt_b->rt_period_timer)) | 200 | if (hrtimer_active(&rt_b->rt_period_timer)) |
201 | return; | 201 | return; |
202 | 202 | ||
203 | spin_lock(&rt_b->rt_runtime_lock); | 203 | raw_spin_lock(&rt_b->rt_runtime_lock); |
204 | for (;;) { | 204 | for (;;) { |
205 | unsigned long delta; | 205 | unsigned long delta; |
206 | ktime_t soft, hard; | 206 | ktime_t soft, hard; |
@@ -217,7 +217,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
217 | __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, | 217 | __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, |
218 | HRTIMER_MODE_ABS_PINNED, 0); | 218 | HRTIMER_MODE_ABS_PINNED, 0); |
219 | } | 219 | } |
220 | spin_unlock(&rt_b->rt_runtime_lock); | 220 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
221 | } | 221 | } |
222 | 222 | ||
223 | #ifdef CONFIG_RT_GROUP_SCHED | 223 | #ifdef CONFIG_RT_GROUP_SCHED |
@@ -298,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq); | |||
298 | 298 | ||
299 | #ifdef CONFIG_RT_GROUP_SCHED | 299 | #ifdef CONFIG_RT_GROUP_SCHED |
300 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | 300 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); |
301 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq); | 301 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var); |
302 | #endif /* CONFIG_RT_GROUP_SCHED */ | 302 | #endif /* CONFIG_RT_GROUP_SCHED */ |
303 | #else /* !CONFIG_USER_SCHED */ | 303 | #else /* !CONFIG_USER_SCHED */ |
304 | #define root_task_group init_task_group | 304 | #define root_task_group init_task_group |
@@ -470,7 +470,7 @@ struct rt_rq { | |||
470 | u64 rt_time; | 470 | u64 rt_time; |
471 | u64 rt_runtime; | 471 | u64 rt_runtime; |
472 | /* Nests inside the rq lock: */ | 472 | /* Nests inside the rq lock: */ |
473 | spinlock_t rt_runtime_lock; | 473 | raw_spinlock_t rt_runtime_lock; |
474 | 474 | ||
475 | #ifdef CONFIG_RT_GROUP_SCHED | 475 | #ifdef CONFIG_RT_GROUP_SCHED |
476 | unsigned long rt_nr_boosted; | 476 | unsigned long rt_nr_boosted; |
@@ -525,7 +525,7 @@ static struct root_domain def_root_domain; | |||
525 | */ | 525 | */ |
526 | struct rq { | 526 | struct rq { |
527 | /* runqueue lock: */ | 527 | /* runqueue lock: */ |
528 | spinlock_t lock; | 528 | raw_spinlock_t lock; |
529 | 529 | ||
530 | /* | 530 | /* |
531 | * nr_running and cpu_load should be in the same cacheline because | 531 | * nr_running and cpu_load should be in the same cacheline because |
@@ -685,7 +685,7 @@ inline void update_rq_clock(struct rq *rq) | |||
685 | */ | 685 | */ |
686 | int runqueue_is_locked(int cpu) | 686 | int runqueue_is_locked(int cpu) |
687 | { | 687 | { |
688 | return spin_is_locked(&cpu_rq(cpu)->lock); | 688 | return raw_spin_is_locked(&cpu_rq(cpu)->lock); |
689 | } | 689 | } |
690 | 690 | ||
691 | /* | 691 | /* |
@@ -814,6 +814,7 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; | |||
814 | * default: 0.25ms | 814 | * default: 0.25ms |
815 | */ | 815 | */ |
816 | unsigned int sysctl_sched_shares_ratelimit = 250000; | 816 | unsigned int sysctl_sched_shares_ratelimit = 250000; |
817 | unsigned int normalized_sysctl_sched_shares_ratelimit = 250000; | ||
817 | 818 | ||
818 | /* | 819 | /* |
819 | * Inject some fuzzyness into changing the per-cpu group shares | 820 | * Inject some fuzzyness into changing the per-cpu group shares |
@@ -892,7 +893,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
892 | */ | 893 | */ |
893 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); | 894 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); |
894 | 895 | ||
895 | spin_unlock_irq(&rq->lock); | 896 | raw_spin_unlock_irq(&rq->lock); |
896 | } | 897 | } |
897 | 898 | ||
898 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ | 899 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ |
@@ -916,9 +917,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | |||
916 | next->oncpu = 1; | 917 | next->oncpu = 1; |
917 | #endif | 918 | #endif |
918 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 919 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
919 | spin_unlock_irq(&rq->lock); | 920 | raw_spin_unlock_irq(&rq->lock); |
920 | #else | 921 | #else |
921 | spin_unlock(&rq->lock); | 922 | raw_spin_unlock(&rq->lock); |
922 | #endif | 923 | #endif |
923 | } | 924 | } |
924 | 925 | ||
@@ -948,10 +949,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p) | |||
948 | { | 949 | { |
949 | for (;;) { | 950 | for (;;) { |
950 | struct rq *rq = task_rq(p); | 951 | struct rq *rq = task_rq(p); |
951 | spin_lock(&rq->lock); | 952 | raw_spin_lock(&rq->lock); |
952 | if (likely(rq == task_rq(p))) | 953 | if (likely(rq == task_rq(p))) |
953 | return rq; | 954 | return rq; |
954 | spin_unlock(&rq->lock); | 955 | raw_spin_unlock(&rq->lock); |
955 | } | 956 | } |
956 | } | 957 | } |
957 | 958 | ||
@@ -968,10 +969,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
968 | for (;;) { | 969 | for (;;) { |
969 | local_irq_save(*flags); | 970 | local_irq_save(*flags); |
970 | rq = task_rq(p); | 971 | rq = task_rq(p); |
971 | spin_lock(&rq->lock); | 972 | raw_spin_lock(&rq->lock); |
972 | if (likely(rq == task_rq(p))) | 973 | if (likely(rq == task_rq(p))) |
973 | return rq; | 974 | return rq; |
974 | spin_unlock_irqrestore(&rq->lock, *flags); | 975 | raw_spin_unlock_irqrestore(&rq->lock, *flags); |
975 | } | 976 | } |
976 | } | 977 | } |
977 | 978 | ||
@@ -980,19 +981,19 @@ void task_rq_unlock_wait(struct task_struct *p) | |||
980 | struct rq *rq = task_rq(p); | 981 | struct rq *rq = task_rq(p); |
981 | 982 | ||
982 | smp_mb(); /* spin-unlock-wait is not a full memory barrier */ | 983 | smp_mb(); /* spin-unlock-wait is not a full memory barrier */ |
983 | spin_unlock_wait(&rq->lock); | 984 | raw_spin_unlock_wait(&rq->lock); |
984 | } | 985 | } |
985 | 986 | ||
986 | static void __task_rq_unlock(struct rq *rq) | 987 | static void __task_rq_unlock(struct rq *rq) |
987 | __releases(rq->lock) | 988 | __releases(rq->lock) |
988 | { | 989 | { |
989 | spin_unlock(&rq->lock); | 990 | raw_spin_unlock(&rq->lock); |
990 | } | 991 | } |
991 | 992 | ||
992 | static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) | 993 | static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) |
993 | __releases(rq->lock) | 994 | __releases(rq->lock) |
994 | { | 995 | { |
995 | spin_unlock_irqrestore(&rq->lock, *flags); | 996 | raw_spin_unlock_irqrestore(&rq->lock, *flags); |
996 | } | 997 | } |
997 | 998 | ||
998 | /* | 999 | /* |
@@ -1005,7 +1006,7 @@ static struct rq *this_rq_lock(void) | |||
1005 | 1006 | ||
1006 | local_irq_disable(); | 1007 | local_irq_disable(); |
1007 | rq = this_rq(); | 1008 | rq = this_rq(); |
1008 | spin_lock(&rq->lock); | 1009 | raw_spin_lock(&rq->lock); |
1009 | 1010 | ||
1010 | return rq; | 1011 | return rq; |
1011 | } | 1012 | } |
@@ -1052,10 +1053,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) | |||
1052 | 1053 | ||
1053 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); | 1054 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); |
1054 | 1055 | ||
1055 | spin_lock(&rq->lock); | 1056 | raw_spin_lock(&rq->lock); |
1056 | update_rq_clock(rq); | 1057 | update_rq_clock(rq); |
1057 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); | 1058 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); |
1058 | spin_unlock(&rq->lock); | 1059 | raw_spin_unlock(&rq->lock); |
1059 | 1060 | ||
1060 | return HRTIMER_NORESTART; | 1061 | return HRTIMER_NORESTART; |
1061 | } | 1062 | } |
@@ -1068,10 +1069,10 @@ static void __hrtick_start(void *arg) | |||
1068 | { | 1069 | { |
1069 | struct rq *rq = arg; | 1070 | struct rq *rq = arg; |
1070 | 1071 | ||
1071 | spin_lock(&rq->lock); | 1072 | raw_spin_lock(&rq->lock); |
1072 | hrtimer_restart(&rq->hrtick_timer); | 1073 | hrtimer_restart(&rq->hrtick_timer); |
1073 | rq->hrtick_csd_pending = 0; | 1074 | rq->hrtick_csd_pending = 0; |
1074 | spin_unlock(&rq->lock); | 1075 | raw_spin_unlock(&rq->lock); |
1075 | } | 1076 | } |
1076 | 1077 | ||
1077 | /* | 1078 | /* |
@@ -1178,7 +1179,7 @@ static void resched_task(struct task_struct *p) | |||
1178 | { | 1179 | { |
1179 | int cpu; | 1180 | int cpu; |
1180 | 1181 | ||
1181 | assert_spin_locked(&task_rq(p)->lock); | 1182 | assert_raw_spin_locked(&task_rq(p)->lock); |
1182 | 1183 | ||
1183 | if (test_tsk_need_resched(p)) | 1184 | if (test_tsk_need_resched(p)) |
1184 | return; | 1185 | return; |
@@ -1200,10 +1201,10 @@ static void resched_cpu(int cpu) | |||
1200 | struct rq *rq = cpu_rq(cpu); | 1201 | struct rq *rq = cpu_rq(cpu); |
1201 | unsigned long flags; | 1202 | unsigned long flags; |
1202 | 1203 | ||
1203 | if (!spin_trylock_irqsave(&rq->lock, flags)) | 1204 | if (!raw_spin_trylock_irqsave(&rq->lock, flags)) |
1204 | return; | 1205 | return; |
1205 | resched_task(cpu_curr(cpu)); | 1206 | resched_task(cpu_curr(cpu)); |
1206 | spin_unlock_irqrestore(&rq->lock, flags); | 1207 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
1207 | } | 1208 | } |
1208 | 1209 | ||
1209 | #ifdef CONFIG_NO_HZ | 1210 | #ifdef CONFIG_NO_HZ |
@@ -1272,7 +1273,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) | |||
1272 | #else /* !CONFIG_SMP */ | 1273 | #else /* !CONFIG_SMP */ |
1273 | static void resched_task(struct task_struct *p) | 1274 | static void resched_task(struct task_struct *p) |
1274 | { | 1275 | { |
1275 | assert_spin_locked(&task_rq(p)->lock); | 1276 | assert_raw_spin_locked(&task_rq(p)->lock); |
1276 | set_tsk_need_resched(p); | 1277 | set_tsk_need_resched(p); |
1277 | } | 1278 | } |
1278 | 1279 | ||
@@ -1599,11 +1600,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1599 | struct rq *rq = cpu_rq(cpu); | 1600 | struct rq *rq = cpu_rq(cpu); |
1600 | unsigned long flags; | 1601 | unsigned long flags; |
1601 | 1602 | ||
1602 | spin_lock_irqsave(&rq->lock, flags); | 1603 | raw_spin_lock_irqsave(&rq->lock, flags); |
1603 | tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; | 1604 | tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; |
1604 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | 1605 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; |
1605 | __set_se_shares(tg->se[cpu], shares); | 1606 | __set_se_shares(tg->se[cpu], shares); |
1606 | spin_unlock_irqrestore(&rq->lock, flags); | 1607 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
1607 | } | 1608 | } |
1608 | } | 1609 | } |
1609 | 1610 | ||
@@ -1614,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1614 | */ | 1615 | */ |
1615 | static int tg_shares_up(struct task_group *tg, void *data) | 1616 | static int tg_shares_up(struct task_group *tg, void *data) |
1616 | { | 1617 | { |
1617 | unsigned long weight, rq_weight = 0, shares = 0; | 1618 | unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0; |
1618 | unsigned long *usd_rq_weight; | 1619 | unsigned long *usd_rq_weight; |
1619 | struct sched_domain *sd = data; | 1620 | struct sched_domain *sd = data; |
1620 | unsigned long flags; | 1621 | unsigned long flags; |
@@ -1630,6 +1631,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1630 | weight = tg->cfs_rq[i]->load.weight; | 1631 | weight = tg->cfs_rq[i]->load.weight; |
1631 | usd_rq_weight[i] = weight; | 1632 | usd_rq_weight[i] = weight; |
1632 | 1633 | ||
1634 | rq_weight += weight; | ||
1633 | /* | 1635 | /* |
1634 | * If there are currently no tasks on the cpu pretend there | 1636 | * If there are currently no tasks on the cpu pretend there |
1635 | * is one of average load so that when a new task gets to | 1637 | * is one of average load so that when a new task gets to |
@@ -1638,10 +1640,13 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1638 | if (!weight) | 1640 | if (!weight) |
1639 | weight = NICE_0_LOAD; | 1641 | weight = NICE_0_LOAD; |
1640 | 1642 | ||
1641 | rq_weight += weight; | 1643 | sum_weight += weight; |
1642 | shares += tg->cfs_rq[i]->shares; | 1644 | shares += tg->cfs_rq[i]->shares; |
1643 | } | 1645 | } |
1644 | 1646 | ||
1647 | if (!rq_weight) | ||
1648 | rq_weight = sum_weight; | ||
1649 | |||
1645 | if ((!shares && rq_weight) || shares > tg->shares) | 1650 | if ((!shares && rq_weight) || shares > tg->shares) |
1646 | shares = tg->shares; | 1651 | shares = tg->shares; |
1647 | 1652 | ||
@@ -1701,9 +1706,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd) | |||
1701 | if (root_task_group_empty()) | 1706 | if (root_task_group_empty()) |
1702 | return; | 1707 | return; |
1703 | 1708 | ||
1704 | spin_unlock(&rq->lock); | 1709 | raw_spin_unlock(&rq->lock); |
1705 | update_shares(sd); | 1710 | update_shares(sd); |
1706 | spin_lock(&rq->lock); | 1711 | raw_spin_lock(&rq->lock); |
1707 | } | 1712 | } |
1708 | 1713 | ||
1709 | static void update_h_load(long cpu) | 1714 | static void update_h_load(long cpu) |
@@ -1743,7 +1748,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1743 | __acquires(busiest->lock) | 1748 | __acquires(busiest->lock) |
1744 | __acquires(this_rq->lock) | 1749 | __acquires(this_rq->lock) |
1745 | { | 1750 | { |
1746 | spin_unlock(&this_rq->lock); | 1751 | raw_spin_unlock(&this_rq->lock); |
1747 | double_rq_lock(this_rq, busiest); | 1752 | double_rq_lock(this_rq, busiest); |
1748 | 1753 | ||
1749 | return 1; | 1754 | return 1; |
@@ -1764,14 +1769,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1764 | { | 1769 | { |
1765 | int ret = 0; | 1770 | int ret = 0; |
1766 | 1771 | ||
1767 | if (unlikely(!spin_trylock(&busiest->lock))) { | 1772 | if (unlikely(!raw_spin_trylock(&busiest->lock))) { |
1768 | if (busiest < this_rq) { | 1773 | if (busiest < this_rq) { |
1769 | spin_unlock(&this_rq->lock); | 1774 | raw_spin_unlock(&this_rq->lock); |
1770 | spin_lock(&busiest->lock); | 1775 | raw_spin_lock(&busiest->lock); |
1771 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | 1776 | raw_spin_lock_nested(&this_rq->lock, |
1777 | SINGLE_DEPTH_NESTING); | ||
1772 | ret = 1; | 1778 | ret = 1; |
1773 | } else | 1779 | } else |
1774 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | 1780 | raw_spin_lock_nested(&busiest->lock, |
1781 | SINGLE_DEPTH_NESTING); | ||
1775 | } | 1782 | } |
1776 | return ret; | 1783 | return ret; |
1777 | } | 1784 | } |
@@ -1785,7 +1792,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1785 | { | 1792 | { |
1786 | if (unlikely(!irqs_disabled())) { | 1793 | if (unlikely(!irqs_disabled())) { |
1787 | /* printk() doesn't work good under rq->lock */ | 1794 | /* printk() doesn't work good under rq->lock */ |
1788 | spin_unlock(&this_rq->lock); | 1795 | raw_spin_unlock(&this_rq->lock); |
1789 | BUG_ON(1); | 1796 | BUG_ON(1); |
1790 | } | 1797 | } |
1791 | 1798 | ||
@@ -1795,7 +1802,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1795 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | 1802 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
1796 | __releases(busiest->lock) | 1803 | __releases(busiest->lock) |
1797 | { | 1804 | { |
1798 | spin_unlock(&busiest->lock); | 1805 | raw_spin_unlock(&busiest->lock); |
1799 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | 1806 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
1800 | } | 1807 | } |
1801 | #endif | 1808 | #endif |
@@ -1810,6 +1817,22 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) | |||
1810 | #endif | 1817 | #endif |
1811 | 1818 | ||
1812 | static void calc_load_account_active(struct rq *this_rq); | 1819 | static void calc_load_account_active(struct rq *this_rq); |
1820 | static void update_sysctl(void); | ||
1821 | static int get_update_sysctl_factor(void); | ||
1822 | |||
1823 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | ||
1824 | { | ||
1825 | set_task_rq(p, cpu); | ||
1826 | #ifdef CONFIG_SMP | ||
1827 | /* | ||
1828 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be | ||
1829 | * successfuly executed on another CPU. We must ensure that updates of | ||
1830 | * per-task data have been completed by this moment. | ||
1831 | */ | ||
1832 | smp_wmb(); | ||
1833 | task_thread_info(p)->cpu = cpu; | ||
1834 | #endif | ||
1835 | } | ||
1813 | 1836 | ||
1814 | #include "sched_stats.h" | 1837 | #include "sched_stats.h" |
1815 | #include "sched_idletask.c" | 1838 | #include "sched_idletask.c" |
@@ -1967,20 +1990,6 @@ inline int task_curr(const struct task_struct *p) | |||
1967 | return cpu_curr(task_cpu(p)) == p; | 1990 | return cpu_curr(task_cpu(p)) == p; |
1968 | } | 1991 | } |
1969 | 1992 | ||
1970 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | ||
1971 | { | ||
1972 | set_task_rq(p, cpu); | ||
1973 | #ifdef CONFIG_SMP | ||
1974 | /* | ||
1975 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be | ||
1976 | * successfuly executed on another CPU. We must ensure that updates of | ||
1977 | * per-task data have been completed by this moment. | ||
1978 | */ | ||
1979 | smp_wmb(); | ||
1980 | task_thread_info(p)->cpu = cpu; | ||
1981 | #endif | ||
1982 | } | ||
1983 | |||
1984 | static inline void check_class_changed(struct rq *rq, struct task_struct *p, | 1993 | static inline void check_class_changed(struct rq *rq, struct task_struct *p, |
1985 | const struct sched_class *prev_class, | 1994 | const struct sched_class *prev_class, |
1986 | int oldprio, int running) | 1995 | int oldprio, int running) |
@@ -2016,13 +2025,13 @@ void kthread_bind(struct task_struct *p, unsigned int cpu) | |||
2016 | return; | 2025 | return; |
2017 | } | 2026 | } |
2018 | 2027 | ||
2019 | spin_lock_irqsave(&rq->lock, flags); | 2028 | raw_spin_lock_irqsave(&rq->lock, flags); |
2020 | update_rq_clock(rq); | 2029 | update_rq_clock(rq); |
2021 | set_task_cpu(p, cpu); | 2030 | set_task_cpu(p, cpu); |
2022 | p->cpus_allowed = cpumask_of_cpu(cpu); | 2031 | p->cpus_allowed = cpumask_of_cpu(cpu); |
2023 | p->rt.nr_cpus_allowed = 1; | 2032 | p->rt.nr_cpus_allowed = 1; |
2024 | p->flags |= PF_THREAD_BOUND; | 2033 | p->flags |= PF_THREAD_BOUND; |
2025 | spin_unlock_irqrestore(&rq->lock, flags); | 2034 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
2026 | } | 2035 | } |
2027 | EXPORT_SYMBOL(kthread_bind); | 2036 | EXPORT_SYMBOL(kthread_bind); |
2028 | 2037 | ||
@@ -2060,29 +2069,13 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
2060 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | 2069 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
2061 | { | 2070 | { |
2062 | int old_cpu = task_cpu(p); | 2071 | int old_cpu = task_cpu(p); |
2063 | struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu); | ||
2064 | struct cfs_rq *old_cfsrq = task_cfs_rq(p), | 2072 | struct cfs_rq *old_cfsrq = task_cfs_rq(p), |
2065 | *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu); | 2073 | *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu); |
2066 | u64 clock_offset; | ||
2067 | |||
2068 | clock_offset = old_rq->clock - new_rq->clock; | ||
2069 | 2074 | ||
2070 | trace_sched_migrate_task(p, new_cpu); | 2075 | trace_sched_migrate_task(p, new_cpu); |
2071 | 2076 | ||
2072 | #ifdef CONFIG_SCHEDSTATS | ||
2073 | if (p->se.wait_start) | ||
2074 | p->se.wait_start -= clock_offset; | ||
2075 | if (p->se.sleep_start) | ||
2076 | p->se.sleep_start -= clock_offset; | ||
2077 | if (p->se.block_start) | ||
2078 | p->se.block_start -= clock_offset; | ||
2079 | #endif | ||
2080 | if (old_cpu != new_cpu) { | 2077 | if (old_cpu != new_cpu) { |
2081 | p->se.nr_migrations++; | 2078 | p->se.nr_migrations++; |
2082 | #ifdef CONFIG_SCHEDSTATS | ||
2083 | if (task_hot(p, old_rq->clock, NULL)) | ||
2084 | schedstat_inc(p, se.nr_forced2_migrations); | ||
2085 | #endif | ||
2086 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, | 2079 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, |
2087 | 1, 1, NULL, 0); | 2080 | 1, 1, NULL, 0); |
2088 | } | 2081 | } |
@@ -2323,6 +2316,14 @@ void task_oncpu_function_call(struct task_struct *p, | |||
2323 | preempt_enable(); | 2316 | preempt_enable(); |
2324 | } | 2317 | } |
2325 | 2318 | ||
2319 | #ifdef CONFIG_SMP | ||
2320 | static inline | ||
2321 | int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) | ||
2322 | { | ||
2323 | return p->sched_class->select_task_rq(p, sd_flags, wake_flags); | ||
2324 | } | ||
2325 | #endif | ||
2326 | |||
2326 | /*** | 2327 | /*** |
2327 | * try_to_wake_up - wake up a thread | 2328 | * try_to_wake_up - wake up a thread |
2328 | * @p: the to-be-woken-up thread | 2329 | * @p: the to-be-woken-up thread |
@@ -2374,17 +2375,14 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2374 | if (task_contributes_to_load(p)) | 2375 | if (task_contributes_to_load(p)) |
2375 | rq->nr_uninterruptible--; | 2376 | rq->nr_uninterruptible--; |
2376 | p->state = TASK_WAKING; | 2377 | p->state = TASK_WAKING; |
2377 | task_rq_unlock(rq, &flags); | 2378 | __task_rq_unlock(rq); |
2378 | 2379 | ||
2379 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags); | 2380 | cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); |
2380 | if (cpu != orig_cpu) { | 2381 | if (cpu != orig_cpu) |
2381 | local_irq_save(flags); | ||
2382 | rq = cpu_rq(cpu); | ||
2383 | update_rq_clock(rq); | ||
2384 | set_task_cpu(p, cpu); | 2382 | set_task_cpu(p, cpu); |
2385 | local_irq_restore(flags); | 2383 | |
2386 | } | 2384 | rq = __task_rq_lock(p); |
2387 | rq = task_rq_lock(p, &flags); | 2385 | update_rq_clock(rq); |
2388 | 2386 | ||
2389 | WARN_ON(p->state != TASK_WAKING); | 2387 | WARN_ON(p->state != TASK_WAKING); |
2390 | cpu = task_cpu(p); | 2388 | cpu = task_cpu(p); |
@@ -2499,7 +2497,6 @@ static void __sched_fork(struct task_struct *p) | |||
2499 | p->se.avg_overlap = 0; | 2497 | p->se.avg_overlap = 0; |
2500 | p->se.start_runtime = 0; | 2498 | p->se.start_runtime = 0; |
2501 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; | 2499 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; |
2502 | p->se.avg_running = 0; | ||
2503 | 2500 | ||
2504 | #ifdef CONFIG_SCHEDSTATS | 2501 | #ifdef CONFIG_SCHEDSTATS |
2505 | p->se.wait_start = 0; | 2502 | p->se.wait_start = 0; |
@@ -2521,7 +2518,6 @@ static void __sched_fork(struct task_struct *p) | |||
2521 | p->se.nr_failed_migrations_running = 0; | 2518 | p->se.nr_failed_migrations_running = 0; |
2522 | p->se.nr_failed_migrations_hot = 0; | 2519 | p->se.nr_failed_migrations_hot = 0; |
2523 | p->se.nr_forced_migrations = 0; | 2520 | p->se.nr_forced_migrations = 0; |
2524 | p->se.nr_forced2_migrations = 0; | ||
2525 | 2521 | ||
2526 | p->se.nr_wakeups = 0; | 2522 | p->se.nr_wakeups = 0; |
2527 | p->se.nr_wakeups_sync = 0; | 2523 | p->se.nr_wakeups_sync = 0; |
@@ -2558,7 +2554,6 @@ static void __sched_fork(struct task_struct *p) | |||
2558 | void sched_fork(struct task_struct *p, int clone_flags) | 2554 | void sched_fork(struct task_struct *p, int clone_flags) |
2559 | { | 2555 | { |
2560 | int cpu = get_cpu(); | 2556 | int cpu = get_cpu(); |
2561 | unsigned long flags; | ||
2562 | 2557 | ||
2563 | __sched_fork(p); | 2558 | __sched_fork(p); |
2564 | 2559 | ||
@@ -2592,13 +2587,13 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2592 | if (!rt_prio(p->prio)) | 2587 | if (!rt_prio(p->prio)) |
2593 | p->sched_class = &fair_sched_class; | 2588 | p->sched_class = &fair_sched_class; |
2594 | 2589 | ||
2590 | if (p->sched_class->task_fork) | ||
2591 | p->sched_class->task_fork(p); | ||
2592 | |||
2595 | #ifdef CONFIG_SMP | 2593 | #ifdef CONFIG_SMP |
2596 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0); | 2594 | cpu = select_task_rq(p, SD_BALANCE_FORK, 0); |
2597 | #endif | 2595 | #endif |
2598 | local_irq_save(flags); | ||
2599 | update_rq_clock(cpu_rq(cpu)); | ||
2600 | set_task_cpu(p, cpu); | 2596 | set_task_cpu(p, cpu); |
2601 | local_irq_restore(flags); | ||
2602 | 2597 | ||
2603 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 2598 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
2604 | if (likely(sched_info_on())) | 2599 | if (likely(sched_info_on())) |
@@ -2631,17 +2626,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2631 | rq = task_rq_lock(p, &flags); | 2626 | rq = task_rq_lock(p, &flags); |
2632 | BUG_ON(p->state != TASK_RUNNING); | 2627 | BUG_ON(p->state != TASK_RUNNING); |
2633 | update_rq_clock(rq); | 2628 | update_rq_clock(rq); |
2634 | 2629 | activate_task(rq, p, 0); | |
2635 | if (!p->sched_class->task_new || !current->se.on_rq) { | ||
2636 | activate_task(rq, p, 0); | ||
2637 | } else { | ||
2638 | /* | ||
2639 | * Let the scheduling class do new task startup | ||
2640 | * management (if any): | ||
2641 | */ | ||
2642 | p->sched_class->task_new(rq, p); | ||
2643 | inc_nr_running(rq); | ||
2644 | } | ||
2645 | trace_sched_wakeup_new(rq, p, 1); | 2630 | trace_sched_wakeup_new(rq, p, 1); |
2646 | check_preempt_curr(rq, p, WF_FORK); | 2631 | check_preempt_curr(rq, p, WF_FORK); |
2647 | #ifdef CONFIG_SMP | 2632 | #ifdef CONFIG_SMP |
@@ -2798,10 +2783,10 @@ static inline void post_schedule(struct rq *rq) | |||
2798 | if (rq->post_schedule) { | 2783 | if (rq->post_schedule) { |
2799 | unsigned long flags; | 2784 | unsigned long flags; |
2800 | 2785 | ||
2801 | spin_lock_irqsave(&rq->lock, flags); | 2786 | raw_spin_lock_irqsave(&rq->lock, flags); |
2802 | if (rq->curr->sched_class->post_schedule) | 2787 | if (rq->curr->sched_class->post_schedule) |
2803 | rq->curr->sched_class->post_schedule(rq); | 2788 | rq->curr->sched_class->post_schedule(rq); |
2804 | spin_unlock_irqrestore(&rq->lock, flags); | 2789 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
2805 | 2790 | ||
2806 | rq->post_schedule = 0; | 2791 | rq->post_schedule = 0; |
2807 | } | 2792 | } |
@@ -3083,15 +3068,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) | |||
3083 | { | 3068 | { |
3084 | BUG_ON(!irqs_disabled()); | 3069 | BUG_ON(!irqs_disabled()); |
3085 | if (rq1 == rq2) { | 3070 | if (rq1 == rq2) { |
3086 | spin_lock(&rq1->lock); | 3071 | raw_spin_lock(&rq1->lock); |
3087 | __acquire(rq2->lock); /* Fake it out ;) */ | 3072 | __acquire(rq2->lock); /* Fake it out ;) */ |
3088 | } else { | 3073 | } else { |
3089 | if (rq1 < rq2) { | 3074 | if (rq1 < rq2) { |
3090 | spin_lock(&rq1->lock); | 3075 | raw_spin_lock(&rq1->lock); |
3091 | spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); | 3076 | raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
3092 | } else { | 3077 | } else { |
3093 | spin_lock(&rq2->lock); | 3078 | raw_spin_lock(&rq2->lock); |
3094 | spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); | 3079 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
3095 | } | 3080 | } |
3096 | } | 3081 | } |
3097 | update_rq_clock(rq1); | 3082 | update_rq_clock(rq1); |
@@ -3108,9 +3093,9 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) | |||
3108 | __releases(rq1->lock) | 3093 | __releases(rq1->lock) |
3109 | __releases(rq2->lock) | 3094 | __releases(rq2->lock) |
3110 | { | 3095 | { |
3111 | spin_unlock(&rq1->lock); | 3096 | raw_spin_unlock(&rq1->lock); |
3112 | if (rq1 != rq2) | 3097 | if (rq1 != rq2) |
3113 | spin_unlock(&rq2->lock); | 3098 | raw_spin_unlock(&rq2->lock); |
3114 | else | 3099 | else |
3115 | __release(rq2->lock); | 3100 | __release(rq2->lock); |
3116 | } | 3101 | } |
@@ -3156,7 +3141,7 @@ out: | |||
3156 | void sched_exec(void) | 3141 | void sched_exec(void) |
3157 | { | 3142 | { |
3158 | int new_cpu, this_cpu = get_cpu(); | 3143 | int new_cpu, this_cpu = get_cpu(); |
3159 | new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0); | 3144 | new_cpu = select_task_rq(current, SD_BALANCE_EXEC, 0); |
3160 | put_cpu(); | 3145 | put_cpu(); |
3161 | if (new_cpu != this_cpu) | 3146 | if (new_cpu != this_cpu) |
3162 | sched_migrate_task(current, new_cpu); | 3147 | sched_migrate_task(current, new_cpu); |
@@ -3172,10 +3157,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, | |||
3172 | deactivate_task(src_rq, p, 0); | 3157 | deactivate_task(src_rq, p, 0); |
3173 | set_task_cpu(p, this_cpu); | 3158 | set_task_cpu(p, this_cpu); |
3174 | activate_task(this_rq, p, 0); | 3159 | activate_task(this_rq, p, 0); |
3175 | /* | ||
3176 | * Note that idle threads have a prio of MAX_PRIO, for this test | ||
3177 | * to be always true for them. | ||
3178 | */ | ||
3179 | check_preempt_curr(this_rq, p, 0); | 3160 | check_preempt_curr(this_rq, p, 0); |
3180 | } | 3161 | } |
3181 | 3162 | ||
@@ -4134,7 +4115,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
4134 | unsigned long flags; | 4115 | unsigned long flags; |
4135 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | 4116 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); |
4136 | 4117 | ||
4137 | cpumask_copy(cpus, cpu_online_mask); | 4118 | cpumask_copy(cpus, cpu_active_mask); |
4138 | 4119 | ||
4139 | /* | 4120 | /* |
4140 | * When power savings policy is enabled for the parent domain, idle | 4121 | * When power savings policy is enabled for the parent domain, idle |
@@ -4207,14 +4188,15 @@ redo: | |||
4207 | 4188 | ||
4208 | if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { | 4189 | if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { |
4209 | 4190 | ||
4210 | spin_lock_irqsave(&busiest->lock, flags); | 4191 | raw_spin_lock_irqsave(&busiest->lock, flags); |
4211 | 4192 | ||
4212 | /* don't kick the migration_thread, if the curr | 4193 | /* don't kick the migration_thread, if the curr |
4213 | * task on busiest cpu can't be moved to this_cpu | 4194 | * task on busiest cpu can't be moved to this_cpu |
4214 | */ | 4195 | */ |
4215 | if (!cpumask_test_cpu(this_cpu, | 4196 | if (!cpumask_test_cpu(this_cpu, |
4216 | &busiest->curr->cpus_allowed)) { | 4197 | &busiest->curr->cpus_allowed)) { |
4217 | spin_unlock_irqrestore(&busiest->lock, flags); | 4198 | raw_spin_unlock_irqrestore(&busiest->lock, |
4199 | flags); | ||
4218 | all_pinned = 1; | 4200 | all_pinned = 1; |
4219 | goto out_one_pinned; | 4201 | goto out_one_pinned; |
4220 | } | 4202 | } |
@@ -4224,7 +4206,7 @@ redo: | |||
4224 | busiest->push_cpu = this_cpu; | 4206 | busiest->push_cpu = this_cpu; |
4225 | active_balance = 1; | 4207 | active_balance = 1; |
4226 | } | 4208 | } |
4227 | spin_unlock_irqrestore(&busiest->lock, flags); | 4209 | raw_spin_unlock_irqrestore(&busiest->lock, flags); |
4228 | if (active_balance) | 4210 | if (active_balance) |
4229 | wake_up_process(busiest->migration_thread); | 4211 | wake_up_process(busiest->migration_thread); |
4230 | 4212 | ||
@@ -4297,7 +4279,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) | |||
4297 | int all_pinned = 0; | 4279 | int all_pinned = 0; |
4298 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | 4280 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); |
4299 | 4281 | ||
4300 | cpumask_copy(cpus, cpu_online_mask); | 4282 | cpumask_copy(cpus, cpu_active_mask); |
4301 | 4283 | ||
4302 | /* | 4284 | /* |
4303 | * When power savings policy is enabled for the parent domain, idle | 4285 | * When power savings policy is enabled for the parent domain, idle |
@@ -4406,10 +4388,10 @@ redo: | |||
4406 | /* | 4388 | /* |
4407 | * Should not call ttwu while holding a rq->lock | 4389 | * Should not call ttwu while holding a rq->lock |
4408 | */ | 4390 | */ |
4409 | spin_unlock(&this_rq->lock); | 4391 | raw_spin_unlock(&this_rq->lock); |
4410 | if (active_balance) | 4392 | if (active_balance) |
4411 | wake_up_process(busiest->migration_thread); | 4393 | wake_up_process(busiest->migration_thread); |
4412 | spin_lock(&this_rq->lock); | 4394 | raw_spin_lock(&this_rq->lock); |
4413 | 4395 | ||
4414 | } else | 4396 | } else |
4415 | sd->nr_balance_failed = 0; | 4397 | sd->nr_balance_failed = 0; |
@@ -4694,7 +4676,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
4694 | cpumask_set_cpu(cpu, nohz.cpu_mask); | 4676 | cpumask_set_cpu(cpu, nohz.cpu_mask); |
4695 | 4677 | ||
4696 | /* time for ilb owner also to sleep */ | 4678 | /* time for ilb owner also to sleep */ |
4697 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { | 4679 | if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) { |
4698 | if (atomic_read(&nohz.load_balancer) == cpu) | 4680 | if (atomic_read(&nohz.load_balancer) == cpu) |
4699 | atomic_set(&nohz.load_balancer, -1); | 4681 | atomic_set(&nohz.load_balancer, -1); |
4700 | return 0; | 4682 | return 0; |
@@ -5278,11 +5260,11 @@ void scheduler_tick(void) | |||
5278 | 5260 | ||
5279 | sched_clock_tick(); | 5261 | sched_clock_tick(); |
5280 | 5262 | ||
5281 | spin_lock(&rq->lock); | 5263 | raw_spin_lock(&rq->lock); |
5282 | update_rq_clock(rq); | 5264 | update_rq_clock(rq); |
5283 | update_cpu_load(rq); | 5265 | update_cpu_load(rq); |
5284 | curr->sched_class->task_tick(rq, curr, 0); | 5266 | curr->sched_class->task_tick(rq, curr, 0); |
5285 | spin_unlock(&rq->lock); | 5267 | raw_spin_unlock(&rq->lock); |
5286 | 5268 | ||
5287 | perf_event_task_tick(curr, cpu); | 5269 | perf_event_task_tick(curr, cpu); |
5288 | 5270 | ||
@@ -5396,13 +5378,14 @@ static inline void schedule_debug(struct task_struct *prev) | |||
5396 | #endif | 5378 | #endif |
5397 | } | 5379 | } |
5398 | 5380 | ||
5399 | static void put_prev_task(struct rq *rq, struct task_struct *p) | 5381 | static void put_prev_task(struct rq *rq, struct task_struct *prev) |
5400 | { | 5382 | { |
5401 | u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime; | 5383 | if (prev->state == TASK_RUNNING) { |
5384 | u64 runtime = prev->se.sum_exec_runtime; | ||
5402 | 5385 | ||
5403 | update_avg(&p->se.avg_running, runtime); | 5386 | runtime -= prev->se.prev_sum_exec_runtime; |
5387 | runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); | ||
5404 | 5388 | ||
5405 | if (p->state == TASK_RUNNING) { | ||
5406 | /* | 5389 | /* |
5407 | * In order to avoid avg_overlap growing stale when we are | 5390 | * In order to avoid avg_overlap growing stale when we are |
5408 | * indeed overlapping and hence not getting put to sleep, grow | 5391 | * indeed overlapping and hence not getting put to sleep, grow |
@@ -5412,12 +5395,9 @@ static void put_prev_task(struct rq *rq, struct task_struct *p) | |||
5412 | * correlates to the amount of cache footprint a task can | 5395 | * correlates to the amount of cache footprint a task can |
5413 | * build up. | 5396 | * build up. |
5414 | */ | 5397 | */ |
5415 | runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); | 5398 | update_avg(&prev->se.avg_overlap, runtime); |
5416 | update_avg(&p->se.avg_overlap, runtime); | ||
5417 | } else { | ||
5418 | update_avg(&p->se.avg_running, 0); | ||
5419 | } | 5399 | } |
5420 | p->sched_class->put_prev_task(rq, p); | 5400 | prev->sched_class->put_prev_task(rq, prev); |
5421 | } | 5401 | } |
5422 | 5402 | ||
5423 | /* | 5403 | /* |
@@ -5478,7 +5458,7 @@ need_resched_nonpreemptible: | |||
5478 | if (sched_feat(HRTICK)) | 5458 | if (sched_feat(HRTICK)) |
5479 | hrtick_clear(rq); | 5459 | hrtick_clear(rq); |
5480 | 5460 | ||
5481 | spin_lock_irq(&rq->lock); | 5461 | raw_spin_lock_irq(&rq->lock); |
5482 | update_rq_clock(rq); | 5462 | update_rq_clock(rq); |
5483 | clear_tsk_need_resched(prev); | 5463 | clear_tsk_need_resched(prev); |
5484 | 5464 | ||
@@ -5514,7 +5494,7 @@ need_resched_nonpreemptible: | |||
5514 | cpu = smp_processor_id(); | 5494 | cpu = smp_processor_id(); |
5515 | rq = cpu_rq(cpu); | 5495 | rq = cpu_rq(cpu); |
5516 | } else | 5496 | } else |
5517 | spin_unlock_irq(&rq->lock); | 5497 | raw_spin_unlock_irq(&rq->lock); |
5518 | 5498 | ||
5519 | post_schedule(rq); | 5499 | post_schedule(rq); |
5520 | 5500 | ||
@@ -6343,7 +6323,7 @@ recheck: | |||
6343 | * make sure no PI-waiters arrive (or leave) while we are | 6323 | * make sure no PI-waiters arrive (or leave) while we are |
6344 | * changing the priority of the task: | 6324 | * changing the priority of the task: |
6345 | */ | 6325 | */ |
6346 | spin_lock_irqsave(&p->pi_lock, flags); | 6326 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
6347 | /* | 6327 | /* |
6348 | * To be able to change p->policy safely, the apropriate | 6328 | * To be able to change p->policy safely, the apropriate |
6349 | * runqueue lock must be held. | 6329 | * runqueue lock must be held. |
@@ -6353,7 +6333,7 @@ recheck: | |||
6353 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { | 6333 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
6354 | policy = oldpolicy = -1; | 6334 | policy = oldpolicy = -1; |
6355 | __task_rq_unlock(rq); | 6335 | __task_rq_unlock(rq); |
6356 | spin_unlock_irqrestore(&p->pi_lock, flags); | 6336 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
6357 | goto recheck; | 6337 | goto recheck; |
6358 | } | 6338 | } |
6359 | update_rq_clock(rq); | 6339 | update_rq_clock(rq); |
@@ -6377,7 +6357,7 @@ recheck: | |||
6377 | check_class_changed(rq, p, prev_class, oldprio, running); | 6357 | check_class_changed(rq, p, prev_class, oldprio, running); |
6378 | } | 6358 | } |
6379 | __task_rq_unlock(rq); | 6359 | __task_rq_unlock(rq); |
6380 | spin_unlock_irqrestore(&p->pi_lock, flags); | 6360 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
6381 | 6361 | ||
6382 | rt_mutex_adjust_pi(p); | 6362 | rt_mutex_adjust_pi(p); |
6383 | 6363 | ||
@@ -6631,6 +6611,8 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, | |||
6631 | long sched_getaffinity(pid_t pid, struct cpumask *mask) | 6611 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
6632 | { | 6612 | { |
6633 | struct task_struct *p; | 6613 | struct task_struct *p; |
6614 | unsigned long flags; | ||
6615 | struct rq *rq; | ||
6634 | int retval; | 6616 | int retval; |
6635 | 6617 | ||
6636 | get_online_cpus(); | 6618 | get_online_cpus(); |
@@ -6645,7 +6627,9 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) | |||
6645 | if (retval) | 6627 | if (retval) |
6646 | goto out_unlock; | 6628 | goto out_unlock; |
6647 | 6629 | ||
6630 | rq = task_rq_lock(p, &flags); | ||
6648 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); | 6631 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
6632 | task_rq_unlock(rq, &flags); | ||
6649 | 6633 | ||
6650 | out_unlock: | 6634 | out_unlock: |
6651 | read_unlock(&tasklist_lock); | 6635 | read_unlock(&tasklist_lock); |
@@ -6703,7 +6687,7 @@ SYSCALL_DEFINE0(sched_yield) | |||
6703 | */ | 6687 | */ |
6704 | __release(rq->lock); | 6688 | __release(rq->lock); |
6705 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); | 6689 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
6706 | _raw_spin_unlock(&rq->lock); | 6690 | do_raw_spin_unlock(&rq->lock); |
6707 | preempt_enable_no_resched(); | 6691 | preempt_enable_no_resched(); |
6708 | 6692 | ||
6709 | schedule(); | 6693 | schedule(); |
@@ -6883,6 +6867,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, | |||
6883 | { | 6867 | { |
6884 | struct task_struct *p; | 6868 | struct task_struct *p; |
6885 | unsigned int time_slice; | 6869 | unsigned int time_slice; |
6870 | unsigned long flags; | ||
6871 | struct rq *rq; | ||
6886 | int retval; | 6872 | int retval; |
6887 | struct timespec t; | 6873 | struct timespec t; |
6888 | 6874 | ||
@@ -6899,7 +6885,9 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, | |||
6899 | if (retval) | 6885 | if (retval) |
6900 | goto out_unlock; | 6886 | goto out_unlock; |
6901 | 6887 | ||
6902 | time_slice = p->sched_class->get_rr_interval(p); | 6888 | rq = task_rq_lock(p, &flags); |
6889 | time_slice = p->sched_class->get_rr_interval(rq, p); | ||
6890 | task_rq_unlock(rq, &flags); | ||
6903 | 6891 | ||
6904 | read_unlock(&tasklist_lock); | 6892 | read_unlock(&tasklist_lock); |
6905 | jiffies_to_timespec(time_slice, &t); | 6893 | jiffies_to_timespec(time_slice, &t); |
@@ -6995,12 +6983,11 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
6995 | struct rq *rq = cpu_rq(cpu); | 6983 | struct rq *rq = cpu_rq(cpu); |
6996 | unsigned long flags; | 6984 | unsigned long flags; |
6997 | 6985 | ||
6998 | spin_lock_irqsave(&rq->lock, flags); | 6986 | raw_spin_lock_irqsave(&rq->lock, flags); |
6999 | 6987 | ||
7000 | __sched_fork(idle); | 6988 | __sched_fork(idle); |
7001 | idle->se.exec_start = sched_clock(); | 6989 | idle->se.exec_start = sched_clock(); |
7002 | 6990 | ||
7003 | idle->prio = idle->normal_prio = MAX_PRIO; | ||
7004 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); | 6991 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); |
7005 | __set_task_cpu(idle, cpu); | 6992 | __set_task_cpu(idle, cpu); |
7006 | 6993 | ||
@@ -7008,7 +6995,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
7008 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 6995 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
7009 | idle->oncpu = 1; | 6996 | idle->oncpu = 1; |
7010 | #endif | 6997 | #endif |
7011 | spin_unlock_irqrestore(&rq->lock, flags); | 6998 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7012 | 6999 | ||
7013 | /* Set the preempt count _outside_ the spinlocks! */ | 7000 | /* Set the preempt count _outside_ the spinlocks! */ |
7014 | #if defined(CONFIG_PREEMPT) | 7001 | #if defined(CONFIG_PREEMPT) |
@@ -7041,22 +7028,43 @@ cpumask_var_t nohz_cpu_mask; | |||
7041 | * | 7028 | * |
7042 | * This idea comes from the SD scheduler of Con Kolivas: | 7029 | * This idea comes from the SD scheduler of Con Kolivas: |
7043 | */ | 7030 | */ |
7044 | static inline void sched_init_granularity(void) | 7031 | static int get_update_sysctl_factor(void) |
7045 | { | 7032 | { |
7046 | unsigned int factor = 1 + ilog2(num_online_cpus()); | 7033 | unsigned int cpus = min_t(int, num_online_cpus(), 8); |
7047 | const unsigned long limit = 200000000; | 7034 | unsigned int factor; |
7035 | |||
7036 | switch (sysctl_sched_tunable_scaling) { | ||
7037 | case SCHED_TUNABLESCALING_NONE: | ||
7038 | factor = 1; | ||
7039 | break; | ||
7040 | case SCHED_TUNABLESCALING_LINEAR: | ||
7041 | factor = cpus; | ||
7042 | break; | ||
7043 | case SCHED_TUNABLESCALING_LOG: | ||
7044 | default: | ||
7045 | factor = 1 + ilog2(cpus); | ||
7046 | break; | ||
7047 | } | ||
7048 | 7048 | ||
7049 | sysctl_sched_min_granularity *= factor; | 7049 | return factor; |
7050 | if (sysctl_sched_min_granularity > limit) | 7050 | } |
7051 | sysctl_sched_min_granularity = limit; | ||
7052 | 7051 | ||
7053 | sysctl_sched_latency *= factor; | 7052 | static void update_sysctl(void) |
7054 | if (sysctl_sched_latency > limit) | 7053 | { |
7055 | sysctl_sched_latency = limit; | 7054 | unsigned int factor = get_update_sysctl_factor(); |
7056 | 7055 | ||
7057 | sysctl_sched_wakeup_granularity *= factor; | 7056 | #define SET_SYSCTL(name) \ |
7057 | (sysctl_##name = (factor) * normalized_sysctl_##name) | ||
7058 | SET_SYSCTL(sched_min_granularity); | ||
7059 | SET_SYSCTL(sched_latency); | ||
7060 | SET_SYSCTL(sched_wakeup_granularity); | ||
7061 | SET_SYSCTL(sched_shares_ratelimit); | ||
7062 | #undef SET_SYSCTL | ||
7063 | } | ||
7058 | 7064 | ||
7059 | sysctl_sched_shares_ratelimit *= factor; | 7065 | static inline void sched_init_granularity(void) |
7066 | { | ||
7067 | update_sysctl(); | ||
7060 | } | 7068 | } |
7061 | 7069 | ||
7062 | #ifdef CONFIG_SMP | 7070 | #ifdef CONFIG_SMP |
@@ -7093,7 +7101,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
7093 | int ret = 0; | 7101 | int ret = 0; |
7094 | 7102 | ||
7095 | rq = task_rq_lock(p, &flags); | 7103 | rq = task_rq_lock(p, &flags); |
7096 | if (!cpumask_intersects(new_mask, cpu_online_mask)) { | 7104 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { |
7097 | ret = -EINVAL; | 7105 | ret = -EINVAL; |
7098 | goto out; | 7106 | goto out; |
7099 | } | 7107 | } |
@@ -7115,7 +7123,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
7115 | if (cpumask_test_cpu(task_cpu(p), new_mask)) | 7123 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
7116 | goto out; | 7124 | goto out; |
7117 | 7125 | ||
7118 | if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { | 7126 | if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) { |
7119 | /* Need help from migration thread: drop lock and wait. */ | 7127 | /* Need help from migration thread: drop lock and wait. */ |
7120 | struct task_struct *mt = rq->migration_thread; | 7128 | struct task_struct *mt = rq->migration_thread; |
7121 | 7129 | ||
@@ -7204,10 +7212,10 @@ static int migration_thread(void *data) | |||
7204 | struct migration_req *req; | 7212 | struct migration_req *req; |
7205 | struct list_head *head; | 7213 | struct list_head *head; |
7206 | 7214 | ||
7207 | spin_lock_irq(&rq->lock); | 7215 | raw_spin_lock_irq(&rq->lock); |
7208 | 7216 | ||
7209 | if (cpu_is_offline(cpu)) { | 7217 | if (cpu_is_offline(cpu)) { |
7210 | spin_unlock_irq(&rq->lock); | 7218 | raw_spin_unlock_irq(&rq->lock); |
7211 | break; | 7219 | break; |
7212 | } | 7220 | } |
7213 | 7221 | ||
@@ -7219,7 +7227,7 @@ static int migration_thread(void *data) | |||
7219 | head = &rq->migration_queue; | 7227 | head = &rq->migration_queue; |
7220 | 7228 | ||
7221 | if (list_empty(head)) { | 7229 | if (list_empty(head)) { |
7222 | spin_unlock_irq(&rq->lock); | 7230 | raw_spin_unlock_irq(&rq->lock); |
7223 | schedule(); | 7231 | schedule(); |
7224 | set_current_state(TASK_INTERRUPTIBLE); | 7232 | set_current_state(TASK_INTERRUPTIBLE); |
7225 | continue; | 7233 | continue; |
@@ -7228,14 +7236,14 @@ static int migration_thread(void *data) | |||
7228 | list_del_init(head->next); | 7236 | list_del_init(head->next); |
7229 | 7237 | ||
7230 | if (req->task != NULL) { | 7238 | if (req->task != NULL) { |
7231 | spin_unlock(&rq->lock); | 7239 | raw_spin_unlock(&rq->lock); |
7232 | __migrate_task(req->task, cpu, req->dest_cpu); | 7240 | __migrate_task(req->task, cpu, req->dest_cpu); |
7233 | } else if (likely(cpu == (badcpu = smp_processor_id()))) { | 7241 | } else if (likely(cpu == (badcpu = smp_processor_id()))) { |
7234 | req->dest_cpu = RCU_MIGRATION_GOT_QS; | 7242 | req->dest_cpu = RCU_MIGRATION_GOT_QS; |
7235 | spin_unlock(&rq->lock); | 7243 | raw_spin_unlock(&rq->lock); |
7236 | } else { | 7244 | } else { |
7237 | req->dest_cpu = RCU_MIGRATION_MUST_SYNC; | 7245 | req->dest_cpu = RCU_MIGRATION_MUST_SYNC; |
7238 | spin_unlock(&rq->lock); | 7246 | raw_spin_unlock(&rq->lock); |
7239 | WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); | 7247 | WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); |
7240 | } | 7248 | } |
7241 | local_irq_enable(); | 7249 | local_irq_enable(); |
@@ -7269,19 +7277,19 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
7269 | 7277 | ||
7270 | again: | 7278 | again: |
7271 | /* Look for allowed, online CPU in same node. */ | 7279 | /* Look for allowed, online CPU in same node. */ |
7272 | for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask) | 7280 | for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) |
7273 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) | 7281 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
7274 | goto move; | 7282 | goto move; |
7275 | 7283 | ||
7276 | /* Any allowed, online CPU? */ | 7284 | /* Any allowed, online CPU? */ |
7277 | dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask); | 7285 | dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask); |
7278 | if (dest_cpu < nr_cpu_ids) | 7286 | if (dest_cpu < nr_cpu_ids) |
7279 | goto move; | 7287 | goto move; |
7280 | 7288 | ||
7281 | /* No more Mr. Nice Guy. */ | 7289 | /* No more Mr. Nice Guy. */ |
7282 | if (dest_cpu >= nr_cpu_ids) { | 7290 | if (dest_cpu >= nr_cpu_ids) { |
7283 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); | 7291 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); |
7284 | dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed); | 7292 | dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); |
7285 | 7293 | ||
7286 | /* | 7294 | /* |
7287 | * Don't tell them about moving exiting tasks or | 7295 | * Don't tell them about moving exiting tasks or |
@@ -7310,7 +7318,7 @@ move: | |||
7310 | */ | 7318 | */ |
7311 | static void migrate_nr_uninterruptible(struct rq *rq_src) | 7319 | static void migrate_nr_uninterruptible(struct rq *rq_src) |
7312 | { | 7320 | { |
7313 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); | 7321 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); |
7314 | unsigned long flags; | 7322 | unsigned long flags; |
7315 | 7323 | ||
7316 | local_irq_save(flags); | 7324 | local_irq_save(flags); |
@@ -7358,14 +7366,14 @@ void sched_idle_next(void) | |||
7358 | * Strictly not necessary since rest of the CPUs are stopped by now | 7366 | * Strictly not necessary since rest of the CPUs are stopped by now |
7359 | * and interrupts disabled on the current cpu. | 7367 | * and interrupts disabled on the current cpu. |
7360 | */ | 7368 | */ |
7361 | spin_lock_irqsave(&rq->lock, flags); | 7369 | raw_spin_lock_irqsave(&rq->lock, flags); |
7362 | 7370 | ||
7363 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); | 7371 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); |
7364 | 7372 | ||
7365 | update_rq_clock(rq); | 7373 | update_rq_clock(rq); |
7366 | activate_task(rq, p, 0); | 7374 | activate_task(rq, p, 0); |
7367 | 7375 | ||
7368 | spin_unlock_irqrestore(&rq->lock, flags); | 7376 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7369 | } | 7377 | } |
7370 | 7378 | ||
7371 | /* | 7379 | /* |
@@ -7401,9 +7409,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) | |||
7401 | * that's OK. No task can be added to this CPU, so iteration is | 7409 | * that's OK. No task can be added to this CPU, so iteration is |
7402 | * fine. | 7410 | * fine. |
7403 | */ | 7411 | */ |
7404 | spin_unlock_irq(&rq->lock); | 7412 | raw_spin_unlock_irq(&rq->lock); |
7405 | move_task_off_dead_cpu(dead_cpu, p); | 7413 | move_task_off_dead_cpu(dead_cpu, p); |
7406 | spin_lock_irq(&rq->lock); | 7414 | raw_spin_lock_irq(&rq->lock); |
7407 | 7415 | ||
7408 | put_task_struct(p); | 7416 | put_task_struct(p); |
7409 | } | 7417 | } |
@@ -7444,17 +7452,16 @@ static struct ctl_table sd_ctl_dir[] = { | |||
7444 | .procname = "sched_domain", | 7452 | .procname = "sched_domain", |
7445 | .mode = 0555, | 7453 | .mode = 0555, |
7446 | }, | 7454 | }, |
7447 | {0, }, | 7455 | {} |
7448 | }; | 7456 | }; |
7449 | 7457 | ||
7450 | static struct ctl_table sd_ctl_root[] = { | 7458 | static struct ctl_table sd_ctl_root[] = { |
7451 | { | 7459 | { |
7452 | .ctl_name = CTL_KERN, | ||
7453 | .procname = "kernel", | 7460 | .procname = "kernel", |
7454 | .mode = 0555, | 7461 | .mode = 0555, |
7455 | .child = sd_ctl_dir, | 7462 | .child = sd_ctl_dir, |
7456 | }, | 7463 | }, |
7457 | {0, }, | 7464 | {} |
7458 | }; | 7465 | }; |
7459 | 7466 | ||
7460 | static struct ctl_table *sd_alloc_ctl_entry(int n) | 7467 | static struct ctl_table *sd_alloc_ctl_entry(int n) |
@@ -7564,7 +7571,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu) | |||
7564 | static struct ctl_table_header *sd_sysctl_header; | 7571 | static struct ctl_table_header *sd_sysctl_header; |
7565 | static void register_sched_domain_sysctl(void) | 7572 | static void register_sched_domain_sysctl(void) |
7566 | { | 7573 | { |
7567 | int i, cpu_num = num_online_cpus(); | 7574 | int i, cpu_num = num_possible_cpus(); |
7568 | struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); | 7575 | struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); |
7569 | char buf[32]; | 7576 | char buf[32]; |
7570 | 7577 | ||
@@ -7574,7 +7581,7 @@ static void register_sched_domain_sysctl(void) | |||
7574 | if (entry == NULL) | 7581 | if (entry == NULL) |
7575 | return; | 7582 | return; |
7576 | 7583 | ||
7577 | for_each_online_cpu(i) { | 7584 | for_each_possible_cpu(i) { |
7578 | snprintf(buf, 32, "cpu%d", i); | 7585 | snprintf(buf, 32, "cpu%d", i); |
7579 | entry->procname = kstrdup(buf, GFP_KERNEL); | 7586 | entry->procname = kstrdup(buf, GFP_KERNEL); |
7580 | entry->mode = 0555; | 7587 | entry->mode = 0555; |
@@ -7670,13 +7677,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7670 | 7677 | ||
7671 | /* Update our root-domain */ | 7678 | /* Update our root-domain */ |
7672 | rq = cpu_rq(cpu); | 7679 | rq = cpu_rq(cpu); |
7673 | spin_lock_irqsave(&rq->lock, flags); | 7680 | raw_spin_lock_irqsave(&rq->lock, flags); |
7674 | if (rq->rd) { | 7681 | if (rq->rd) { |
7675 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); | 7682 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
7676 | 7683 | ||
7677 | set_rq_online(rq); | 7684 | set_rq_online(rq); |
7678 | } | 7685 | } |
7679 | spin_unlock_irqrestore(&rq->lock, flags); | 7686 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7680 | break; | 7687 | break; |
7681 | 7688 | ||
7682 | #ifdef CONFIG_HOTPLUG_CPU | 7689 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -7701,14 +7708,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7701 | put_task_struct(rq->migration_thread); | 7708 | put_task_struct(rq->migration_thread); |
7702 | rq->migration_thread = NULL; | 7709 | rq->migration_thread = NULL; |
7703 | /* Idle task back to normal (off runqueue, low prio) */ | 7710 | /* Idle task back to normal (off runqueue, low prio) */ |
7704 | spin_lock_irq(&rq->lock); | 7711 | raw_spin_lock_irq(&rq->lock); |
7705 | update_rq_clock(rq); | 7712 | update_rq_clock(rq); |
7706 | deactivate_task(rq, rq->idle, 0); | 7713 | deactivate_task(rq, rq->idle, 0); |
7707 | rq->idle->static_prio = MAX_PRIO; | ||
7708 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); | 7714 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); |
7709 | rq->idle->sched_class = &idle_sched_class; | 7715 | rq->idle->sched_class = &idle_sched_class; |
7710 | migrate_dead_tasks(cpu); | 7716 | migrate_dead_tasks(cpu); |
7711 | spin_unlock_irq(&rq->lock); | 7717 | raw_spin_unlock_irq(&rq->lock); |
7712 | cpuset_unlock(); | 7718 | cpuset_unlock(); |
7713 | migrate_nr_uninterruptible(rq); | 7719 | migrate_nr_uninterruptible(rq); |
7714 | BUG_ON(rq->nr_running != 0); | 7720 | BUG_ON(rq->nr_running != 0); |
@@ -7718,30 +7724,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7718 | * they didn't take sched_hotcpu_mutex. Just wake up | 7724 | * they didn't take sched_hotcpu_mutex. Just wake up |
7719 | * the requestors. | 7725 | * the requestors. |
7720 | */ | 7726 | */ |
7721 | spin_lock_irq(&rq->lock); | 7727 | raw_spin_lock_irq(&rq->lock); |
7722 | while (!list_empty(&rq->migration_queue)) { | 7728 | while (!list_empty(&rq->migration_queue)) { |
7723 | struct migration_req *req; | 7729 | struct migration_req *req; |
7724 | 7730 | ||
7725 | req = list_entry(rq->migration_queue.next, | 7731 | req = list_entry(rq->migration_queue.next, |
7726 | struct migration_req, list); | 7732 | struct migration_req, list); |
7727 | list_del_init(&req->list); | 7733 | list_del_init(&req->list); |
7728 | spin_unlock_irq(&rq->lock); | 7734 | raw_spin_unlock_irq(&rq->lock); |
7729 | complete(&req->done); | 7735 | complete(&req->done); |
7730 | spin_lock_irq(&rq->lock); | 7736 | raw_spin_lock_irq(&rq->lock); |
7731 | } | 7737 | } |
7732 | spin_unlock_irq(&rq->lock); | 7738 | raw_spin_unlock_irq(&rq->lock); |
7733 | break; | 7739 | break; |
7734 | 7740 | ||
7735 | case CPU_DYING: | 7741 | case CPU_DYING: |
7736 | case CPU_DYING_FROZEN: | 7742 | case CPU_DYING_FROZEN: |
7737 | /* Update our root-domain */ | 7743 | /* Update our root-domain */ |
7738 | rq = cpu_rq(cpu); | 7744 | rq = cpu_rq(cpu); |
7739 | spin_lock_irqsave(&rq->lock, flags); | 7745 | raw_spin_lock_irqsave(&rq->lock, flags); |
7740 | if (rq->rd) { | 7746 | if (rq->rd) { |
7741 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); | 7747 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
7742 | set_rq_offline(rq); | 7748 | set_rq_offline(rq); |
7743 | } | 7749 | } |
7744 | spin_unlock_irqrestore(&rq->lock, flags); | 7750 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7745 | break; | 7751 | break; |
7746 | #endif | 7752 | #endif |
7747 | } | 7753 | } |
@@ -7971,7 +7977,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
7971 | struct root_domain *old_rd = NULL; | 7977 | struct root_domain *old_rd = NULL; |
7972 | unsigned long flags; | 7978 | unsigned long flags; |
7973 | 7979 | ||
7974 | spin_lock_irqsave(&rq->lock, flags); | 7980 | raw_spin_lock_irqsave(&rq->lock, flags); |
7975 | 7981 | ||
7976 | if (rq->rd) { | 7982 | if (rq->rd) { |
7977 | old_rd = rq->rd; | 7983 | old_rd = rq->rd; |
@@ -7997,7 +8003,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
7997 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) | 8003 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) |
7998 | set_rq_online(rq); | 8004 | set_rq_online(rq); |
7999 | 8005 | ||
8000 | spin_unlock_irqrestore(&rq->lock, flags); | 8006 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
8001 | 8007 | ||
8002 | if (old_rd) | 8008 | if (old_rd) |
8003 | free_rootdomain(old_rd); | 8009 | free_rootdomain(old_rd); |
@@ -8283,14 +8289,14 @@ enum s_alloc { | |||
8283 | */ | 8289 | */ |
8284 | #ifdef CONFIG_SCHED_SMT | 8290 | #ifdef CONFIG_SCHED_SMT |
8285 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); | 8291 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); |
8286 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); | 8292 | static DEFINE_PER_CPU(struct static_sched_group, sched_groups); |
8287 | 8293 | ||
8288 | static int | 8294 | static int |
8289 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, | 8295 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
8290 | struct sched_group **sg, struct cpumask *unused) | 8296 | struct sched_group **sg, struct cpumask *unused) |
8291 | { | 8297 | { |
8292 | if (sg) | 8298 | if (sg) |
8293 | *sg = &per_cpu(sched_group_cpus, cpu).sg; | 8299 | *sg = &per_cpu(sched_groups, cpu).sg; |
8294 | return cpu; | 8300 | return cpu; |
8295 | } | 8301 | } |
8296 | #endif /* CONFIG_SCHED_SMT */ | 8302 | #endif /* CONFIG_SCHED_SMT */ |
@@ -9100,7 +9106,7 @@ match1: | |||
9100 | if (doms_new == NULL) { | 9106 | if (doms_new == NULL) { |
9101 | ndoms_cur = 0; | 9107 | ndoms_cur = 0; |
9102 | doms_new = &fallback_doms; | 9108 | doms_new = &fallback_doms; |
9103 | cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map); | 9109 | cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); |
9104 | WARN_ON_ONCE(dattr_new); | 9110 | WARN_ON_ONCE(dattr_new); |
9105 | } | 9111 | } |
9106 | 9112 | ||
@@ -9231,8 +9237,10 @@ static int update_sched_domains(struct notifier_block *nfb, | |||
9231 | switch (action) { | 9237 | switch (action) { |
9232 | case CPU_ONLINE: | 9238 | case CPU_ONLINE: |
9233 | case CPU_ONLINE_FROZEN: | 9239 | case CPU_ONLINE_FROZEN: |
9234 | case CPU_DEAD: | 9240 | case CPU_DOWN_PREPARE: |
9235 | case CPU_DEAD_FROZEN: | 9241 | case CPU_DOWN_PREPARE_FROZEN: |
9242 | case CPU_DOWN_FAILED: | ||
9243 | case CPU_DOWN_FAILED_FROZEN: | ||
9236 | partition_sched_domains(1, NULL, NULL); | 9244 | partition_sched_domains(1, NULL, NULL); |
9237 | return NOTIFY_OK; | 9245 | return NOTIFY_OK; |
9238 | 9246 | ||
@@ -9279,7 +9287,7 @@ void __init sched_init_smp(void) | |||
9279 | #endif | 9287 | #endif |
9280 | get_online_cpus(); | 9288 | get_online_cpus(); |
9281 | mutex_lock(&sched_domains_mutex); | 9289 | mutex_lock(&sched_domains_mutex); |
9282 | arch_init_sched_domains(cpu_online_mask); | 9290 | arch_init_sched_domains(cpu_active_mask); |
9283 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); | 9291 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
9284 | if (cpumask_empty(non_isolated_cpus)) | 9292 | if (cpumask_empty(non_isolated_cpus)) |
9285 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); | 9293 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
@@ -9352,13 +9360,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | |||
9352 | #ifdef CONFIG_SMP | 9360 | #ifdef CONFIG_SMP |
9353 | rt_rq->rt_nr_migratory = 0; | 9361 | rt_rq->rt_nr_migratory = 0; |
9354 | rt_rq->overloaded = 0; | 9362 | rt_rq->overloaded = 0; |
9355 | plist_head_init(&rt_rq->pushable_tasks, &rq->lock); | 9363 | plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock); |
9356 | #endif | 9364 | #endif |
9357 | 9365 | ||
9358 | rt_rq->rt_time = 0; | 9366 | rt_rq->rt_time = 0; |
9359 | rt_rq->rt_throttled = 0; | 9367 | rt_rq->rt_throttled = 0; |
9360 | rt_rq->rt_runtime = 0; | 9368 | rt_rq->rt_runtime = 0; |
9361 | spin_lock_init(&rt_rq->rt_runtime_lock); | 9369 | raw_spin_lock_init(&rt_rq->rt_runtime_lock); |
9362 | 9370 | ||
9363 | #ifdef CONFIG_RT_GROUP_SCHED | 9371 | #ifdef CONFIG_RT_GROUP_SCHED |
9364 | rt_rq->rt_nr_boosted = 0; | 9372 | rt_rq->rt_nr_boosted = 0; |
@@ -9518,7 +9526,7 @@ void __init sched_init(void) | |||
9518 | struct rq *rq; | 9526 | struct rq *rq; |
9519 | 9527 | ||
9520 | rq = cpu_rq(i); | 9528 | rq = cpu_rq(i); |
9521 | spin_lock_init(&rq->lock); | 9529 | raw_spin_lock_init(&rq->lock); |
9522 | rq->nr_running = 0; | 9530 | rq->nr_running = 0; |
9523 | rq->calc_load_active = 0; | 9531 | rq->calc_load_active = 0; |
9524 | rq->calc_load_update = jiffies + LOAD_FREQ; | 9532 | rq->calc_load_update = jiffies + LOAD_FREQ; |
@@ -9578,7 +9586,7 @@ void __init sched_init(void) | |||
9578 | #elif defined CONFIG_USER_SCHED | 9586 | #elif defined CONFIG_USER_SCHED |
9579 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); | 9587 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); |
9580 | init_tg_rt_entry(&init_task_group, | 9588 | init_tg_rt_entry(&init_task_group, |
9581 | &per_cpu(init_rt_rq, i), | 9589 | &per_cpu(init_rt_rq_var, i), |
9582 | &per_cpu(init_sched_rt_entity, i), i, 1, | 9590 | &per_cpu(init_sched_rt_entity, i), i, 1, |
9583 | root_task_group.rt_se[i]); | 9591 | root_task_group.rt_se[i]); |
9584 | #endif | 9592 | #endif |
@@ -9616,7 +9624,7 @@ void __init sched_init(void) | |||
9616 | #endif | 9624 | #endif |
9617 | 9625 | ||
9618 | #ifdef CONFIG_RT_MUTEXES | 9626 | #ifdef CONFIG_RT_MUTEXES |
9619 | plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); | 9627 | plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock); |
9620 | #endif | 9628 | #endif |
9621 | 9629 | ||
9622 | /* | 9630 | /* |
@@ -9741,13 +9749,13 @@ void normalize_rt_tasks(void) | |||
9741 | continue; | 9749 | continue; |
9742 | } | 9750 | } |
9743 | 9751 | ||
9744 | spin_lock(&p->pi_lock); | 9752 | raw_spin_lock(&p->pi_lock); |
9745 | rq = __task_rq_lock(p); | 9753 | rq = __task_rq_lock(p); |
9746 | 9754 | ||
9747 | normalize_task(rq, p); | 9755 | normalize_task(rq, p); |
9748 | 9756 | ||
9749 | __task_rq_unlock(rq); | 9757 | __task_rq_unlock(rq); |
9750 | spin_unlock(&p->pi_lock); | 9758 | raw_spin_unlock(&p->pi_lock); |
9751 | } while_each_thread(g, p); | 9759 | } while_each_thread(g, p); |
9752 | 9760 | ||
9753 | read_unlock_irqrestore(&tasklist_lock, flags); | 9761 | read_unlock_irqrestore(&tasklist_lock, flags); |
@@ -9843,13 +9851,15 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
9843 | se = kzalloc_node(sizeof(struct sched_entity), | 9851 | se = kzalloc_node(sizeof(struct sched_entity), |
9844 | GFP_KERNEL, cpu_to_node(i)); | 9852 | GFP_KERNEL, cpu_to_node(i)); |
9845 | if (!se) | 9853 | if (!se) |
9846 | goto err; | 9854 | goto err_free_rq; |
9847 | 9855 | ||
9848 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); | 9856 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); |
9849 | } | 9857 | } |
9850 | 9858 | ||
9851 | return 1; | 9859 | return 1; |
9852 | 9860 | ||
9861 | err_free_rq: | ||
9862 | kfree(cfs_rq); | ||
9853 | err: | 9863 | err: |
9854 | return 0; | 9864 | return 0; |
9855 | } | 9865 | } |
@@ -9931,13 +9941,15 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
9931 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), | 9941 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), |
9932 | GFP_KERNEL, cpu_to_node(i)); | 9942 | GFP_KERNEL, cpu_to_node(i)); |
9933 | if (!rt_se) | 9943 | if (!rt_se) |
9934 | goto err; | 9944 | goto err_free_rq; |
9935 | 9945 | ||
9936 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); | 9946 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); |
9937 | } | 9947 | } |
9938 | 9948 | ||
9939 | return 1; | 9949 | return 1; |
9940 | 9950 | ||
9951 | err_free_rq: | ||
9952 | kfree(rt_rq); | ||
9941 | err: | 9953 | err: |
9942 | return 0; | 9954 | return 0; |
9943 | } | 9955 | } |
@@ -10106,9 +10118,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares) | |||
10106 | struct rq *rq = cfs_rq->rq; | 10118 | struct rq *rq = cfs_rq->rq; |
10107 | unsigned long flags; | 10119 | unsigned long flags; |
10108 | 10120 | ||
10109 | spin_lock_irqsave(&rq->lock, flags); | 10121 | raw_spin_lock_irqsave(&rq->lock, flags); |
10110 | __set_se_shares(se, shares); | 10122 | __set_se_shares(se, shares); |
10111 | spin_unlock_irqrestore(&rq->lock, flags); | 10123 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
10112 | } | 10124 | } |
10113 | 10125 | ||
10114 | static DEFINE_MUTEX(shares_mutex); | 10126 | static DEFINE_MUTEX(shares_mutex); |
@@ -10293,18 +10305,18 @@ static int tg_set_bandwidth(struct task_group *tg, | |||
10293 | if (err) | 10305 | if (err) |
10294 | goto unlock; | 10306 | goto unlock; |
10295 | 10307 | ||
10296 | spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); | 10308 | raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
10297 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); | 10309 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); |
10298 | tg->rt_bandwidth.rt_runtime = rt_runtime; | 10310 | tg->rt_bandwidth.rt_runtime = rt_runtime; |
10299 | 10311 | ||
10300 | for_each_possible_cpu(i) { | 10312 | for_each_possible_cpu(i) { |
10301 | struct rt_rq *rt_rq = tg->rt_rq[i]; | 10313 | struct rt_rq *rt_rq = tg->rt_rq[i]; |
10302 | 10314 | ||
10303 | spin_lock(&rt_rq->rt_runtime_lock); | 10315 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
10304 | rt_rq->rt_runtime = rt_runtime; | 10316 | rt_rq->rt_runtime = rt_runtime; |
10305 | spin_unlock(&rt_rq->rt_runtime_lock); | 10317 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
10306 | } | 10318 | } |
10307 | spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); | 10319 | raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
10308 | unlock: | 10320 | unlock: |
10309 | read_unlock(&tasklist_lock); | 10321 | read_unlock(&tasklist_lock); |
10310 | mutex_unlock(&rt_constraints_mutex); | 10322 | mutex_unlock(&rt_constraints_mutex); |
@@ -10409,15 +10421,15 @@ static int sched_rt_global_constraints(void) | |||
10409 | if (sysctl_sched_rt_runtime == 0) | 10421 | if (sysctl_sched_rt_runtime == 0) |
10410 | return -EBUSY; | 10422 | return -EBUSY; |
10411 | 10423 | ||
10412 | spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); | 10424 | raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); |
10413 | for_each_possible_cpu(i) { | 10425 | for_each_possible_cpu(i) { |
10414 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; | 10426 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; |
10415 | 10427 | ||
10416 | spin_lock(&rt_rq->rt_runtime_lock); | 10428 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
10417 | rt_rq->rt_runtime = global_rt_runtime(); | 10429 | rt_rq->rt_runtime = global_rt_runtime(); |
10418 | spin_unlock(&rt_rq->rt_runtime_lock); | 10430 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
10419 | } | 10431 | } |
10420 | spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); | 10432 | raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); |
10421 | 10433 | ||
10422 | return 0; | 10434 | return 0; |
10423 | } | 10435 | } |
@@ -10708,9 +10720,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) | |||
10708 | /* | 10720 | /* |
10709 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. | 10721 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. |
10710 | */ | 10722 | */ |
10711 | spin_lock_irq(&cpu_rq(cpu)->lock); | 10723 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
10712 | data = *cpuusage; | 10724 | data = *cpuusage; |
10713 | spin_unlock_irq(&cpu_rq(cpu)->lock); | 10725 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
10714 | #else | 10726 | #else |
10715 | data = *cpuusage; | 10727 | data = *cpuusage; |
10716 | #endif | 10728 | #endif |
@@ -10726,9 +10738,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) | |||
10726 | /* | 10738 | /* |
10727 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. | 10739 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. |
10728 | */ | 10740 | */ |
10729 | spin_lock_irq(&cpu_rq(cpu)->lock); | 10741 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
10730 | *cpuusage = val; | 10742 | *cpuusage = val; |
10731 | spin_unlock_irq(&cpu_rq(cpu)->lock); | 10743 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
10732 | #else | 10744 | #else |
10733 | *cpuusage = val; | 10745 | *cpuusage = val; |
10734 | #endif | 10746 | #endif |
@@ -10962,9 +10974,9 @@ void synchronize_sched_expedited(void) | |||
10962 | init_completion(&req->done); | 10974 | init_completion(&req->done); |
10963 | req->task = NULL; | 10975 | req->task = NULL; |
10964 | req->dest_cpu = RCU_MIGRATION_NEED_QS; | 10976 | req->dest_cpu = RCU_MIGRATION_NEED_QS; |
10965 | spin_lock_irqsave(&rq->lock, flags); | 10977 | raw_spin_lock_irqsave(&rq->lock, flags); |
10966 | list_add(&req->list, &rq->migration_queue); | 10978 | list_add(&req->list, &rq->migration_queue); |
10967 | spin_unlock_irqrestore(&rq->lock, flags); | 10979 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
10968 | wake_up_process(rq->migration_thread); | 10980 | wake_up_process(rq->migration_thread); |
10969 | } | 10981 | } |
10970 | for_each_online_cpu(cpu) { | 10982 | for_each_online_cpu(cpu) { |
@@ -10972,11 +10984,11 @@ void synchronize_sched_expedited(void) | |||
10972 | req = &per_cpu(rcu_migration_req, cpu); | 10984 | req = &per_cpu(rcu_migration_req, cpu); |
10973 | rq = cpu_rq(cpu); | 10985 | rq = cpu_rq(cpu); |
10974 | wait_for_completion(&req->done); | 10986 | wait_for_completion(&req->done); |
10975 | spin_lock_irqsave(&rq->lock, flags); | 10987 | raw_spin_lock_irqsave(&rq->lock, flags); |
10976 | if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) | 10988 | if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) |
10977 | need_full_sync = 1; | 10989 | need_full_sync = 1; |
10978 | req->dest_cpu = RCU_MIGRATION_IDLE; | 10990 | req->dest_cpu = RCU_MIGRATION_IDLE; |
10979 | spin_unlock_irqrestore(&rq->lock, flags); | 10991 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
10980 | } | 10992 | } |
10981 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; | 10993 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; |
10982 | synchronize_sched_expedited_count++; | 10994 | synchronize_sched_expedited_count++; |