diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 742 |
1 files changed, 388 insertions, 354 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index e7f2cfa6a257..c535cc4f6428 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -141,7 +141,7 @@ struct rt_prio_array { | |||
141 | 141 | ||
142 | struct rt_bandwidth { | 142 | struct rt_bandwidth { |
143 | /* nests inside the rq lock: */ | 143 | /* nests inside the rq lock: */ |
144 | spinlock_t rt_runtime_lock; | 144 | raw_spinlock_t rt_runtime_lock; |
145 | ktime_t rt_period; | 145 | ktime_t rt_period; |
146 | u64 rt_runtime; | 146 | u64 rt_runtime; |
147 | struct hrtimer rt_period_timer; | 147 | struct hrtimer rt_period_timer; |
@@ -178,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) | |||
178 | rt_b->rt_period = ns_to_ktime(period); | 178 | rt_b->rt_period = ns_to_ktime(period); |
179 | rt_b->rt_runtime = runtime; | 179 | rt_b->rt_runtime = runtime; |
180 | 180 | ||
181 | spin_lock_init(&rt_b->rt_runtime_lock); | 181 | raw_spin_lock_init(&rt_b->rt_runtime_lock); |
182 | 182 | ||
183 | hrtimer_init(&rt_b->rt_period_timer, | 183 | hrtimer_init(&rt_b->rt_period_timer, |
184 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 184 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
@@ -200,7 +200,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
200 | if (hrtimer_active(&rt_b->rt_period_timer)) | 200 | if (hrtimer_active(&rt_b->rt_period_timer)) |
201 | return; | 201 | return; |
202 | 202 | ||
203 | spin_lock(&rt_b->rt_runtime_lock); | 203 | raw_spin_lock(&rt_b->rt_runtime_lock); |
204 | for (;;) { | 204 | for (;;) { |
205 | unsigned long delta; | 205 | unsigned long delta; |
206 | ktime_t soft, hard; | 206 | ktime_t soft, hard; |
@@ -217,7 +217,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
217 | __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, | 217 | __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, |
218 | HRTIMER_MODE_ABS_PINNED, 0); | 218 | HRTIMER_MODE_ABS_PINNED, 0); |
219 | } | 219 | } |
220 | spin_unlock(&rt_b->rt_runtime_lock); | 220 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
221 | } | 221 | } |
222 | 222 | ||
223 | #ifdef CONFIG_RT_GROUP_SCHED | 223 | #ifdef CONFIG_RT_GROUP_SCHED |
@@ -298,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq); | |||
298 | 298 | ||
299 | #ifdef CONFIG_RT_GROUP_SCHED | 299 | #ifdef CONFIG_RT_GROUP_SCHED |
300 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | 300 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); |
301 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq); | 301 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var); |
302 | #endif /* CONFIG_RT_GROUP_SCHED */ | 302 | #endif /* CONFIG_RT_GROUP_SCHED */ |
303 | #else /* !CONFIG_USER_SCHED */ | 303 | #else /* !CONFIG_USER_SCHED */ |
304 | #define root_task_group init_task_group | 304 | #define root_task_group init_task_group |
@@ -470,7 +470,7 @@ struct rt_rq { | |||
470 | u64 rt_time; | 470 | u64 rt_time; |
471 | u64 rt_runtime; | 471 | u64 rt_runtime; |
472 | /* Nests inside the rq lock: */ | 472 | /* Nests inside the rq lock: */ |
473 | spinlock_t rt_runtime_lock; | 473 | raw_spinlock_t rt_runtime_lock; |
474 | 474 | ||
475 | #ifdef CONFIG_RT_GROUP_SCHED | 475 | #ifdef CONFIG_RT_GROUP_SCHED |
476 | unsigned long rt_nr_boosted; | 476 | unsigned long rt_nr_boosted; |
@@ -525,7 +525,7 @@ static struct root_domain def_root_domain; | |||
525 | */ | 525 | */ |
526 | struct rq { | 526 | struct rq { |
527 | /* runqueue lock: */ | 527 | /* runqueue lock: */ |
528 | spinlock_t lock; | 528 | raw_spinlock_t lock; |
529 | 529 | ||
530 | /* | 530 | /* |
531 | * nr_running and cpu_load should be in the same cacheline because | 531 | * nr_running and cpu_load should be in the same cacheline because |
@@ -685,7 +685,7 @@ inline void update_rq_clock(struct rq *rq) | |||
685 | */ | 685 | */ |
686 | int runqueue_is_locked(int cpu) | 686 | int runqueue_is_locked(int cpu) |
687 | { | 687 | { |
688 | return spin_is_locked(&cpu_rq(cpu)->lock); | 688 | return raw_spin_is_locked(&cpu_rq(cpu)->lock); |
689 | } | 689 | } |
690 | 690 | ||
691 | /* | 691 | /* |
@@ -814,6 +814,7 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; | |||
814 | * default: 0.25ms | 814 | * default: 0.25ms |
815 | */ | 815 | */ |
816 | unsigned int sysctl_sched_shares_ratelimit = 250000; | 816 | unsigned int sysctl_sched_shares_ratelimit = 250000; |
817 | unsigned int normalized_sysctl_sched_shares_ratelimit = 250000; | ||
817 | 818 | ||
818 | /* | 819 | /* |
819 | * Inject some fuzzyness into changing the per-cpu group shares | 820 | * Inject some fuzzyness into changing the per-cpu group shares |
@@ -892,7 +893,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
892 | */ | 893 | */ |
893 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); | 894 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); |
894 | 895 | ||
895 | spin_unlock_irq(&rq->lock); | 896 | raw_spin_unlock_irq(&rq->lock); |
896 | } | 897 | } |
897 | 898 | ||
898 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ | 899 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ |
@@ -916,9 +917,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | |||
916 | next->oncpu = 1; | 917 | next->oncpu = 1; |
917 | #endif | 918 | #endif |
918 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 919 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
919 | spin_unlock_irq(&rq->lock); | 920 | raw_spin_unlock_irq(&rq->lock); |
920 | #else | 921 | #else |
921 | spin_unlock(&rq->lock); | 922 | raw_spin_unlock(&rq->lock); |
922 | #endif | 923 | #endif |
923 | } | 924 | } |
924 | 925 | ||
@@ -948,10 +949,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p) | |||
948 | { | 949 | { |
949 | for (;;) { | 950 | for (;;) { |
950 | struct rq *rq = task_rq(p); | 951 | struct rq *rq = task_rq(p); |
951 | spin_lock(&rq->lock); | 952 | raw_spin_lock(&rq->lock); |
952 | if (likely(rq == task_rq(p))) | 953 | if (likely(rq == task_rq(p))) |
953 | return rq; | 954 | return rq; |
954 | spin_unlock(&rq->lock); | 955 | raw_spin_unlock(&rq->lock); |
955 | } | 956 | } |
956 | } | 957 | } |
957 | 958 | ||
@@ -968,10 +969,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
968 | for (;;) { | 969 | for (;;) { |
969 | local_irq_save(*flags); | 970 | local_irq_save(*flags); |
970 | rq = task_rq(p); | 971 | rq = task_rq(p); |
971 | spin_lock(&rq->lock); | 972 | raw_spin_lock(&rq->lock); |
972 | if (likely(rq == task_rq(p))) | 973 | if (likely(rq == task_rq(p))) |
973 | return rq; | 974 | return rq; |
974 | spin_unlock_irqrestore(&rq->lock, *flags); | 975 | raw_spin_unlock_irqrestore(&rq->lock, *flags); |
975 | } | 976 | } |
976 | } | 977 | } |
977 | 978 | ||
@@ -980,19 +981,19 @@ void task_rq_unlock_wait(struct task_struct *p) | |||
980 | struct rq *rq = task_rq(p); | 981 | struct rq *rq = task_rq(p); |
981 | 982 | ||
982 | smp_mb(); /* spin-unlock-wait is not a full memory barrier */ | 983 | smp_mb(); /* spin-unlock-wait is not a full memory barrier */ |
983 | spin_unlock_wait(&rq->lock); | 984 | raw_spin_unlock_wait(&rq->lock); |
984 | } | 985 | } |
985 | 986 | ||
986 | static void __task_rq_unlock(struct rq *rq) | 987 | static void __task_rq_unlock(struct rq *rq) |
987 | __releases(rq->lock) | 988 | __releases(rq->lock) |
988 | { | 989 | { |
989 | spin_unlock(&rq->lock); | 990 | raw_spin_unlock(&rq->lock); |
990 | } | 991 | } |
991 | 992 | ||
992 | static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) | 993 | static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) |
993 | __releases(rq->lock) | 994 | __releases(rq->lock) |
994 | { | 995 | { |
995 | spin_unlock_irqrestore(&rq->lock, *flags); | 996 | raw_spin_unlock_irqrestore(&rq->lock, *flags); |
996 | } | 997 | } |
997 | 998 | ||
998 | /* | 999 | /* |
@@ -1005,7 +1006,7 @@ static struct rq *this_rq_lock(void) | |||
1005 | 1006 | ||
1006 | local_irq_disable(); | 1007 | local_irq_disable(); |
1007 | rq = this_rq(); | 1008 | rq = this_rq(); |
1008 | spin_lock(&rq->lock); | 1009 | raw_spin_lock(&rq->lock); |
1009 | 1010 | ||
1010 | return rq; | 1011 | return rq; |
1011 | } | 1012 | } |
@@ -1052,10 +1053,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) | |||
1052 | 1053 | ||
1053 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); | 1054 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); |
1054 | 1055 | ||
1055 | spin_lock(&rq->lock); | 1056 | raw_spin_lock(&rq->lock); |
1056 | update_rq_clock(rq); | 1057 | update_rq_clock(rq); |
1057 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); | 1058 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); |
1058 | spin_unlock(&rq->lock); | 1059 | raw_spin_unlock(&rq->lock); |
1059 | 1060 | ||
1060 | return HRTIMER_NORESTART; | 1061 | return HRTIMER_NORESTART; |
1061 | } | 1062 | } |
@@ -1068,10 +1069,10 @@ static void __hrtick_start(void *arg) | |||
1068 | { | 1069 | { |
1069 | struct rq *rq = arg; | 1070 | struct rq *rq = arg; |
1070 | 1071 | ||
1071 | spin_lock(&rq->lock); | 1072 | raw_spin_lock(&rq->lock); |
1072 | hrtimer_restart(&rq->hrtick_timer); | 1073 | hrtimer_restart(&rq->hrtick_timer); |
1073 | rq->hrtick_csd_pending = 0; | 1074 | rq->hrtick_csd_pending = 0; |
1074 | spin_unlock(&rq->lock); | 1075 | raw_spin_unlock(&rq->lock); |
1075 | } | 1076 | } |
1076 | 1077 | ||
1077 | /* | 1078 | /* |
@@ -1178,7 +1179,7 @@ static void resched_task(struct task_struct *p) | |||
1178 | { | 1179 | { |
1179 | int cpu; | 1180 | int cpu; |
1180 | 1181 | ||
1181 | assert_spin_locked(&task_rq(p)->lock); | 1182 | assert_raw_spin_locked(&task_rq(p)->lock); |
1182 | 1183 | ||
1183 | if (test_tsk_need_resched(p)) | 1184 | if (test_tsk_need_resched(p)) |
1184 | return; | 1185 | return; |
@@ -1200,10 +1201,10 @@ static void resched_cpu(int cpu) | |||
1200 | struct rq *rq = cpu_rq(cpu); | 1201 | struct rq *rq = cpu_rq(cpu); |
1201 | unsigned long flags; | 1202 | unsigned long flags; |
1202 | 1203 | ||
1203 | if (!spin_trylock_irqsave(&rq->lock, flags)) | 1204 | if (!raw_spin_trylock_irqsave(&rq->lock, flags)) |
1204 | return; | 1205 | return; |
1205 | resched_task(cpu_curr(cpu)); | 1206 | resched_task(cpu_curr(cpu)); |
1206 | spin_unlock_irqrestore(&rq->lock, flags); | 1207 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
1207 | } | 1208 | } |
1208 | 1209 | ||
1209 | #ifdef CONFIG_NO_HZ | 1210 | #ifdef CONFIG_NO_HZ |
@@ -1272,7 +1273,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) | |||
1272 | #else /* !CONFIG_SMP */ | 1273 | #else /* !CONFIG_SMP */ |
1273 | static void resched_task(struct task_struct *p) | 1274 | static void resched_task(struct task_struct *p) |
1274 | { | 1275 | { |
1275 | assert_spin_locked(&task_rq(p)->lock); | 1276 | assert_raw_spin_locked(&task_rq(p)->lock); |
1276 | set_tsk_need_resched(p); | 1277 | set_tsk_need_resched(p); |
1277 | } | 1278 | } |
1278 | 1279 | ||
@@ -1599,11 +1600,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1599 | struct rq *rq = cpu_rq(cpu); | 1600 | struct rq *rq = cpu_rq(cpu); |
1600 | unsigned long flags; | 1601 | unsigned long flags; |
1601 | 1602 | ||
1602 | spin_lock_irqsave(&rq->lock, flags); | 1603 | raw_spin_lock_irqsave(&rq->lock, flags); |
1603 | tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; | 1604 | tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; |
1604 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | 1605 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; |
1605 | __set_se_shares(tg->se[cpu], shares); | 1606 | __set_se_shares(tg->se[cpu], shares); |
1606 | spin_unlock_irqrestore(&rq->lock, flags); | 1607 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
1607 | } | 1608 | } |
1608 | } | 1609 | } |
1609 | 1610 | ||
@@ -1614,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1614 | */ | 1615 | */ |
1615 | static int tg_shares_up(struct task_group *tg, void *data) | 1616 | static int tg_shares_up(struct task_group *tg, void *data) |
1616 | { | 1617 | { |
1617 | unsigned long weight, rq_weight = 0, shares = 0; | 1618 | unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0; |
1618 | unsigned long *usd_rq_weight; | 1619 | unsigned long *usd_rq_weight; |
1619 | struct sched_domain *sd = data; | 1620 | struct sched_domain *sd = data; |
1620 | unsigned long flags; | 1621 | unsigned long flags; |
@@ -1630,6 +1631,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1630 | weight = tg->cfs_rq[i]->load.weight; | 1631 | weight = tg->cfs_rq[i]->load.weight; |
1631 | usd_rq_weight[i] = weight; | 1632 | usd_rq_weight[i] = weight; |
1632 | 1633 | ||
1634 | rq_weight += weight; | ||
1633 | /* | 1635 | /* |
1634 | * If there are currently no tasks on the cpu pretend there | 1636 | * If there are currently no tasks on the cpu pretend there |
1635 | * is one of average load so that when a new task gets to | 1637 | * is one of average load so that when a new task gets to |
@@ -1638,10 +1640,13 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1638 | if (!weight) | 1640 | if (!weight) |
1639 | weight = NICE_0_LOAD; | 1641 | weight = NICE_0_LOAD; |
1640 | 1642 | ||
1641 | rq_weight += weight; | 1643 | sum_weight += weight; |
1642 | shares += tg->cfs_rq[i]->shares; | 1644 | shares += tg->cfs_rq[i]->shares; |
1643 | } | 1645 | } |
1644 | 1646 | ||
1647 | if (!rq_weight) | ||
1648 | rq_weight = sum_weight; | ||
1649 | |||
1645 | if ((!shares && rq_weight) || shares > tg->shares) | 1650 | if ((!shares && rq_weight) || shares > tg->shares) |
1646 | shares = tg->shares; | 1651 | shares = tg->shares; |
1647 | 1652 | ||
@@ -1701,9 +1706,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd) | |||
1701 | if (root_task_group_empty()) | 1706 | if (root_task_group_empty()) |
1702 | return; | 1707 | return; |
1703 | 1708 | ||
1704 | spin_unlock(&rq->lock); | 1709 | raw_spin_unlock(&rq->lock); |
1705 | update_shares(sd); | 1710 | update_shares(sd); |
1706 | spin_lock(&rq->lock); | 1711 | raw_spin_lock(&rq->lock); |
1707 | } | 1712 | } |
1708 | 1713 | ||
1709 | static void update_h_load(long cpu) | 1714 | static void update_h_load(long cpu) |
@@ -1743,7 +1748,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1743 | __acquires(busiest->lock) | 1748 | __acquires(busiest->lock) |
1744 | __acquires(this_rq->lock) | 1749 | __acquires(this_rq->lock) |
1745 | { | 1750 | { |
1746 | spin_unlock(&this_rq->lock); | 1751 | raw_spin_unlock(&this_rq->lock); |
1747 | double_rq_lock(this_rq, busiest); | 1752 | double_rq_lock(this_rq, busiest); |
1748 | 1753 | ||
1749 | return 1; | 1754 | return 1; |
@@ -1764,14 +1769,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1764 | { | 1769 | { |
1765 | int ret = 0; | 1770 | int ret = 0; |
1766 | 1771 | ||
1767 | if (unlikely(!spin_trylock(&busiest->lock))) { | 1772 | if (unlikely(!raw_spin_trylock(&busiest->lock))) { |
1768 | if (busiest < this_rq) { | 1773 | if (busiest < this_rq) { |
1769 | spin_unlock(&this_rq->lock); | 1774 | raw_spin_unlock(&this_rq->lock); |
1770 | spin_lock(&busiest->lock); | 1775 | raw_spin_lock(&busiest->lock); |
1771 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | 1776 | raw_spin_lock_nested(&this_rq->lock, |
1777 | SINGLE_DEPTH_NESTING); | ||
1772 | ret = 1; | 1778 | ret = 1; |
1773 | } else | 1779 | } else |
1774 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | 1780 | raw_spin_lock_nested(&busiest->lock, |
1781 | SINGLE_DEPTH_NESTING); | ||
1775 | } | 1782 | } |
1776 | return ret; | 1783 | return ret; |
1777 | } | 1784 | } |
@@ -1785,7 +1792,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1785 | { | 1792 | { |
1786 | if (unlikely(!irqs_disabled())) { | 1793 | if (unlikely(!irqs_disabled())) { |
1787 | /* printk() doesn't work good under rq->lock */ | 1794 | /* printk() doesn't work good under rq->lock */ |
1788 | spin_unlock(&this_rq->lock); | 1795 | raw_spin_unlock(&this_rq->lock); |
1789 | BUG_ON(1); | 1796 | BUG_ON(1); |
1790 | } | 1797 | } |
1791 | 1798 | ||
@@ -1795,7 +1802,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1795 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | 1802 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
1796 | __releases(busiest->lock) | 1803 | __releases(busiest->lock) |
1797 | { | 1804 | { |
1798 | spin_unlock(&busiest->lock); | 1805 | raw_spin_unlock(&busiest->lock); |
1799 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | 1806 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
1800 | } | 1807 | } |
1801 | #endif | 1808 | #endif |
@@ -1810,6 +1817,22 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) | |||
1810 | #endif | 1817 | #endif |
1811 | 1818 | ||
1812 | static void calc_load_account_active(struct rq *this_rq); | 1819 | static void calc_load_account_active(struct rq *this_rq); |
1820 | static void update_sysctl(void); | ||
1821 | static int get_update_sysctl_factor(void); | ||
1822 | |||
1823 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | ||
1824 | { | ||
1825 | set_task_rq(p, cpu); | ||
1826 | #ifdef CONFIG_SMP | ||
1827 | /* | ||
1828 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be | ||
1829 | * successfuly executed on another CPU. We must ensure that updates of | ||
1830 | * per-task data have been completed by this moment. | ||
1831 | */ | ||
1832 | smp_wmb(); | ||
1833 | task_thread_info(p)->cpu = cpu; | ||
1834 | #endif | ||
1835 | } | ||
1813 | 1836 | ||
1814 | #include "sched_stats.h" | 1837 | #include "sched_stats.h" |
1815 | #include "sched_idletask.c" | 1838 | #include "sched_idletask.c" |
@@ -1967,20 +1990,6 @@ inline int task_curr(const struct task_struct *p) | |||
1967 | return cpu_curr(task_cpu(p)) == p; | 1990 | return cpu_curr(task_cpu(p)) == p; |
1968 | } | 1991 | } |
1969 | 1992 | ||
1970 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | ||
1971 | { | ||
1972 | set_task_rq(p, cpu); | ||
1973 | #ifdef CONFIG_SMP | ||
1974 | /* | ||
1975 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be | ||
1976 | * successfuly executed on another CPU. We must ensure that updates of | ||
1977 | * per-task data have been completed by this moment. | ||
1978 | */ | ||
1979 | smp_wmb(); | ||
1980 | task_thread_info(p)->cpu = cpu; | ||
1981 | #endif | ||
1982 | } | ||
1983 | |||
1984 | static inline void check_class_changed(struct rq *rq, struct task_struct *p, | 1993 | static inline void check_class_changed(struct rq *rq, struct task_struct *p, |
1985 | const struct sched_class *prev_class, | 1994 | const struct sched_class *prev_class, |
1986 | int oldprio, int running) | 1995 | int oldprio, int running) |
@@ -1993,39 +2002,6 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p, | |||
1993 | p->sched_class->prio_changed(rq, p, oldprio, running); | 2002 | p->sched_class->prio_changed(rq, p, oldprio, running); |
1994 | } | 2003 | } |
1995 | 2004 | ||
1996 | /** | ||
1997 | * kthread_bind - bind a just-created kthread to a cpu. | ||
1998 | * @p: thread created by kthread_create(). | ||
1999 | * @cpu: cpu (might not be online, must be possible) for @k to run on. | ||
2000 | * | ||
2001 | * Description: This function is equivalent to set_cpus_allowed(), | ||
2002 | * except that @cpu doesn't need to be online, and the thread must be | ||
2003 | * stopped (i.e., just returned from kthread_create()). | ||
2004 | * | ||
2005 | * Function lives here instead of kthread.c because it messes with | ||
2006 | * scheduler internals which require locking. | ||
2007 | */ | ||
2008 | void kthread_bind(struct task_struct *p, unsigned int cpu) | ||
2009 | { | ||
2010 | struct rq *rq = cpu_rq(cpu); | ||
2011 | unsigned long flags; | ||
2012 | |||
2013 | /* Must have done schedule() in kthread() before we set_task_cpu */ | ||
2014 | if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { | ||
2015 | WARN_ON(1); | ||
2016 | return; | ||
2017 | } | ||
2018 | |||
2019 | spin_lock_irqsave(&rq->lock, flags); | ||
2020 | update_rq_clock(rq); | ||
2021 | set_task_cpu(p, cpu); | ||
2022 | p->cpus_allowed = cpumask_of_cpu(cpu); | ||
2023 | p->rt.nr_cpus_allowed = 1; | ||
2024 | p->flags |= PF_THREAD_BOUND; | ||
2025 | spin_unlock_irqrestore(&rq->lock, flags); | ||
2026 | } | ||
2027 | EXPORT_SYMBOL(kthread_bind); | ||
2028 | |||
2029 | #ifdef CONFIG_SMP | 2005 | #ifdef CONFIG_SMP |
2030 | /* | 2006 | /* |
2031 | * Is this task likely cache-hot: | 2007 | * Is this task likely cache-hot: |
@@ -2035,6 +2011,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
2035 | { | 2011 | { |
2036 | s64 delta; | 2012 | s64 delta; |
2037 | 2013 | ||
2014 | if (p->sched_class != &fair_sched_class) | ||
2015 | return 0; | ||
2016 | |||
2038 | /* | 2017 | /* |
2039 | * Buddy candidates are cache hot: | 2018 | * Buddy candidates are cache hot: |
2040 | */ | 2019 | */ |
@@ -2043,9 +2022,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
2043 | &p->se == cfs_rq_of(&p->se)->last)) | 2022 | &p->se == cfs_rq_of(&p->se)->last)) |
2044 | return 1; | 2023 | return 1; |
2045 | 2024 | ||
2046 | if (p->sched_class != &fair_sched_class) | ||
2047 | return 0; | ||
2048 | |||
2049 | if (sysctl_sched_migration_cost == -1) | 2025 | if (sysctl_sched_migration_cost == -1) |
2050 | return 1; | 2026 | return 1; |
2051 | if (sysctl_sched_migration_cost == 0) | 2027 | if (sysctl_sched_migration_cost == 0) |
@@ -2056,38 +2032,23 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
2056 | return delta < (s64)sysctl_sched_migration_cost; | 2032 | return delta < (s64)sysctl_sched_migration_cost; |
2057 | } | 2033 | } |
2058 | 2034 | ||
2059 | |||
2060 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | 2035 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
2061 | { | 2036 | { |
2062 | int old_cpu = task_cpu(p); | 2037 | #ifdef CONFIG_SCHED_DEBUG |
2063 | struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu); | 2038 | /* |
2064 | struct cfs_rq *old_cfsrq = task_cfs_rq(p), | 2039 | * We should never call set_task_cpu() on a blocked task, |
2065 | *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu); | 2040 | * ttwu() will sort out the placement. |
2066 | u64 clock_offset; | 2041 | */ |
2067 | 2042 | WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && | |
2068 | clock_offset = old_rq->clock - new_rq->clock; | 2043 | !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); |
2044 | #endif | ||
2069 | 2045 | ||
2070 | trace_sched_migrate_task(p, new_cpu); | 2046 | trace_sched_migrate_task(p, new_cpu); |
2071 | 2047 | ||
2072 | #ifdef CONFIG_SCHEDSTATS | 2048 | if (task_cpu(p) != new_cpu) { |
2073 | if (p->se.wait_start) | ||
2074 | p->se.wait_start -= clock_offset; | ||
2075 | if (p->se.sleep_start) | ||
2076 | p->se.sleep_start -= clock_offset; | ||
2077 | if (p->se.block_start) | ||
2078 | p->se.block_start -= clock_offset; | ||
2079 | #endif | ||
2080 | if (old_cpu != new_cpu) { | ||
2081 | p->se.nr_migrations++; | 2049 | p->se.nr_migrations++; |
2082 | #ifdef CONFIG_SCHEDSTATS | 2050 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); |
2083 | if (task_hot(p, old_rq->clock, NULL)) | ||
2084 | schedstat_inc(p, se.nr_forced2_migrations); | ||
2085 | #endif | ||
2086 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, | ||
2087 | 1, 1, NULL, 0); | ||
2088 | } | 2051 | } |
2089 | p->se.vruntime -= old_cfsrq->min_vruntime - | ||
2090 | new_cfsrq->min_vruntime; | ||
2091 | 2052 | ||
2092 | __set_task_cpu(p, new_cpu); | 2053 | __set_task_cpu(p, new_cpu); |
2093 | } | 2054 | } |
@@ -2112,13 +2073,10 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) | |||
2112 | 2073 | ||
2113 | /* | 2074 | /* |
2114 | * If the task is not on a runqueue (and not running), then | 2075 | * If the task is not on a runqueue (and not running), then |
2115 | * it is sufficient to simply update the task's cpu field. | 2076 | * the next wake-up will properly place the task. |
2116 | */ | 2077 | */ |
2117 | if (!p->se.on_rq && !task_running(rq, p)) { | 2078 | if (!p->se.on_rq && !task_running(rq, p)) |
2118 | update_rq_clock(rq); | ||
2119 | set_task_cpu(p, dest_cpu); | ||
2120 | return 0; | 2079 | return 0; |
2121 | } | ||
2122 | 2080 | ||
2123 | init_completion(&req->done); | 2081 | init_completion(&req->done); |
2124 | req->task = p; | 2082 | req->task = p; |
@@ -2323,6 +2281,77 @@ void task_oncpu_function_call(struct task_struct *p, | |||
2323 | preempt_enable(); | 2281 | preempt_enable(); |
2324 | } | 2282 | } |
2325 | 2283 | ||
2284 | #ifdef CONFIG_SMP | ||
2285 | static int select_fallback_rq(int cpu, struct task_struct *p) | ||
2286 | { | ||
2287 | int dest_cpu; | ||
2288 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu)); | ||
2289 | |||
2290 | /* Look for allowed, online CPU in same node. */ | ||
2291 | for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) | ||
2292 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) | ||
2293 | return dest_cpu; | ||
2294 | |||
2295 | /* Any allowed, online CPU? */ | ||
2296 | dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask); | ||
2297 | if (dest_cpu < nr_cpu_ids) | ||
2298 | return dest_cpu; | ||
2299 | |||
2300 | /* No more Mr. Nice Guy. */ | ||
2301 | if (dest_cpu >= nr_cpu_ids) { | ||
2302 | rcu_read_lock(); | ||
2303 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); | ||
2304 | rcu_read_unlock(); | ||
2305 | dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); | ||
2306 | |||
2307 | /* | ||
2308 | * Don't tell them about moving exiting tasks or | ||
2309 | * kernel threads (both mm NULL), since they never | ||
2310 | * leave kernel. | ||
2311 | */ | ||
2312 | if (p->mm && printk_ratelimit()) { | ||
2313 | printk(KERN_INFO "process %d (%s) no " | ||
2314 | "longer affine to cpu%d\n", | ||
2315 | task_pid_nr(p), p->comm, cpu); | ||
2316 | } | ||
2317 | } | ||
2318 | |||
2319 | return dest_cpu; | ||
2320 | } | ||
2321 | |||
2322 | /* | ||
2323 | * Called from: | ||
2324 | * | ||
2325 | * - fork, @p is stable because it isn't on the tasklist yet | ||
2326 | * | ||
2327 | * - exec, @p is unstable, retry loop | ||
2328 | * | ||
2329 | * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so | ||
2330 | * we should be good. | ||
2331 | */ | ||
2332 | static inline | ||
2333 | int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) | ||
2334 | { | ||
2335 | int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); | ||
2336 | |||
2337 | /* | ||
2338 | * In order not to call set_task_cpu() on a blocking task we need | ||
2339 | * to rely on ttwu() to place the task on a valid ->cpus_allowed | ||
2340 | * cpu. | ||
2341 | * | ||
2342 | * Since this is common to all placement strategies, this lives here. | ||
2343 | * | ||
2344 | * [ this allows ->select_task() to simply return task_cpu(p) and | ||
2345 | * not worry about this generic constraint ] | ||
2346 | */ | ||
2347 | if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || | ||
2348 | !cpu_online(cpu))) | ||
2349 | cpu = select_fallback_rq(task_cpu(p), p); | ||
2350 | |||
2351 | return cpu; | ||
2352 | } | ||
2353 | #endif | ||
2354 | |||
2326 | /*** | 2355 | /*** |
2327 | * try_to_wake_up - wake up a thread | 2356 | * try_to_wake_up - wake up a thread |
2328 | * @p: the to-be-woken-up thread | 2357 | * @p: the to-be-woken-up thread |
@@ -2374,17 +2403,18 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2374 | if (task_contributes_to_load(p)) | 2403 | if (task_contributes_to_load(p)) |
2375 | rq->nr_uninterruptible--; | 2404 | rq->nr_uninterruptible--; |
2376 | p->state = TASK_WAKING; | 2405 | p->state = TASK_WAKING; |
2377 | task_rq_unlock(rq, &flags); | ||
2378 | 2406 | ||
2379 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags); | 2407 | if (p->sched_class->task_waking) |
2380 | if (cpu != orig_cpu) { | 2408 | p->sched_class->task_waking(rq, p); |
2381 | local_irq_save(flags); | 2409 | |
2382 | rq = cpu_rq(cpu); | 2410 | __task_rq_unlock(rq); |
2383 | update_rq_clock(rq); | 2411 | |
2412 | cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); | ||
2413 | if (cpu != orig_cpu) | ||
2384 | set_task_cpu(p, cpu); | 2414 | set_task_cpu(p, cpu); |
2385 | local_irq_restore(flags); | 2415 | |
2386 | } | 2416 | rq = __task_rq_lock(p); |
2387 | rq = task_rq_lock(p, &flags); | 2417 | update_rq_clock(rq); |
2388 | 2418 | ||
2389 | WARN_ON(p->state != TASK_WAKING); | 2419 | WARN_ON(p->state != TASK_WAKING); |
2390 | cpu = task_cpu(p); | 2420 | cpu = task_cpu(p); |
@@ -2440,8 +2470,8 @@ out_running: | |||
2440 | 2470 | ||
2441 | p->state = TASK_RUNNING; | 2471 | p->state = TASK_RUNNING; |
2442 | #ifdef CONFIG_SMP | 2472 | #ifdef CONFIG_SMP |
2443 | if (p->sched_class->task_wake_up) | 2473 | if (p->sched_class->task_woken) |
2444 | p->sched_class->task_wake_up(rq, p); | 2474 | p->sched_class->task_woken(rq, p); |
2445 | 2475 | ||
2446 | if (unlikely(rq->idle_stamp)) { | 2476 | if (unlikely(rq->idle_stamp)) { |
2447 | u64 delta = rq->clock - rq->idle_stamp; | 2477 | u64 delta = rq->clock - rq->idle_stamp; |
@@ -2499,7 +2529,6 @@ static void __sched_fork(struct task_struct *p) | |||
2499 | p->se.avg_overlap = 0; | 2529 | p->se.avg_overlap = 0; |
2500 | p->se.start_runtime = 0; | 2530 | p->se.start_runtime = 0; |
2501 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; | 2531 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; |
2502 | p->se.avg_running = 0; | ||
2503 | 2532 | ||
2504 | #ifdef CONFIG_SCHEDSTATS | 2533 | #ifdef CONFIG_SCHEDSTATS |
2505 | p->se.wait_start = 0; | 2534 | p->se.wait_start = 0; |
@@ -2521,7 +2550,6 @@ static void __sched_fork(struct task_struct *p) | |||
2521 | p->se.nr_failed_migrations_running = 0; | 2550 | p->se.nr_failed_migrations_running = 0; |
2522 | p->se.nr_failed_migrations_hot = 0; | 2551 | p->se.nr_failed_migrations_hot = 0; |
2523 | p->se.nr_forced_migrations = 0; | 2552 | p->se.nr_forced_migrations = 0; |
2524 | p->se.nr_forced2_migrations = 0; | ||
2525 | 2553 | ||
2526 | p->se.nr_wakeups = 0; | 2554 | p->se.nr_wakeups = 0; |
2527 | p->se.nr_wakeups_sync = 0; | 2555 | p->se.nr_wakeups_sync = 0; |
@@ -2542,14 +2570,6 @@ static void __sched_fork(struct task_struct *p) | |||
2542 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 2570 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
2543 | INIT_HLIST_HEAD(&p->preempt_notifiers); | 2571 | INIT_HLIST_HEAD(&p->preempt_notifiers); |
2544 | #endif | 2572 | #endif |
2545 | |||
2546 | /* | ||
2547 | * We mark the process as running here, but have not actually | ||
2548 | * inserted it onto the runqueue yet. This guarantees that | ||
2549 | * nobody will actually run it, and a signal or other external | ||
2550 | * event cannot wake it up and insert it on the runqueue either. | ||
2551 | */ | ||
2552 | p->state = TASK_RUNNING; | ||
2553 | } | 2573 | } |
2554 | 2574 | ||
2555 | /* | 2575 | /* |
@@ -2558,9 +2578,14 @@ static void __sched_fork(struct task_struct *p) | |||
2558 | void sched_fork(struct task_struct *p, int clone_flags) | 2578 | void sched_fork(struct task_struct *p, int clone_flags) |
2559 | { | 2579 | { |
2560 | int cpu = get_cpu(); | 2580 | int cpu = get_cpu(); |
2561 | unsigned long flags; | ||
2562 | 2581 | ||
2563 | __sched_fork(p); | 2582 | __sched_fork(p); |
2583 | /* | ||
2584 | * We mark the process as waking here. This guarantees that | ||
2585 | * nobody will actually run it, and a signal or other external | ||
2586 | * event cannot wake it up and insert it on the runqueue either. | ||
2587 | */ | ||
2588 | p->state = TASK_WAKING; | ||
2564 | 2589 | ||
2565 | /* | 2590 | /* |
2566 | * Revert to default priority/policy on fork if requested. | 2591 | * Revert to default priority/policy on fork if requested. |
@@ -2592,13 +2617,13 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2592 | if (!rt_prio(p->prio)) | 2617 | if (!rt_prio(p->prio)) |
2593 | p->sched_class = &fair_sched_class; | 2618 | p->sched_class = &fair_sched_class; |
2594 | 2619 | ||
2620 | if (p->sched_class->task_fork) | ||
2621 | p->sched_class->task_fork(p); | ||
2622 | |||
2595 | #ifdef CONFIG_SMP | 2623 | #ifdef CONFIG_SMP |
2596 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0); | 2624 | cpu = select_task_rq(p, SD_BALANCE_FORK, 0); |
2597 | #endif | 2625 | #endif |
2598 | local_irq_save(flags); | ||
2599 | update_rq_clock(cpu_rq(cpu)); | ||
2600 | set_task_cpu(p, cpu); | 2626 | set_task_cpu(p, cpu); |
2601 | local_irq_restore(flags); | ||
2602 | 2627 | ||
2603 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 2628 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
2604 | if (likely(sched_info_on())) | 2629 | if (likely(sched_info_on())) |
@@ -2629,24 +2654,15 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2629 | struct rq *rq; | 2654 | struct rq *rq; |
2630 | 2655 | ||
2631 | rq = task_rq_lock(p, &flags); | 2656 | rq = task_rq_lock(p, &flags); |
2632 | BUG_ON(p->state != TASK_RUNNING); | 2657 | BUG_ON(p->state != TASK_WAKING); |
2658 | p->state = TASK_RUNNING; | ||
2633 | update_rq_clock(rq); | 2659 | update_rq_clock(rq); |
2634 | 2660 | activate_task(rq, p, 0); | |
2635 | if (!p->sched_class->task_new || !current->se.on_rq) { | ||
2636 | activate_task(rq, p, 0); | ||
2637 | } else { | ||
2638 | /* | ||
2639 | * Let the scheduling class do new task startup | ||
2640 | * management (if any): | ||
2641 | */ | ||
2642 | p->sched_class->task_new(rq, p); | ||
2643 | inc_nr_running(rq); | ||
2644 | } | ||
2645 | trace_sched_wakeup_new(rq, p, 1); | 2661 | trace_sched_wakeup_new(rq, p, 1); |
2646 | check_preempt_curr(rq, p, WF_FORK); | 2662 | check_preempt_curr(rq, p, WF_FORK); |
2647 | #ifdef CONFIG_SMP | 2663 | #ifdef CONFIG_SMP |
2648 | if (p->sched_class->task_wake_up) | 2664 | if (p->sched_class->task_woken) |
2649 | p->sched_class->task_wake_up(rq, p); | 2665 | p->sched_class->task_woken(rq, p); |
2650 | #endif | 2666 | #endif |
2651 | task_rq_unlock(rq, &flags); | 2667 | task_rq_unlock(rq, &flags); |
2652 | } | 2668 | } |
@@ -2798,10 +2814,10 @@ static inline void post_schedule(struct rq *rq) | |||
2798 | if (rq->post_schedule) { | 2814 | if (rq->post_schedule) { |
2799 | unsigned long flags; | 2815 | unsigned long flags; |
2800 | 2816 | ||
2801 | spin_lock_irqsave(&rq->lock, flags); | 2817 | raw_spin_lock_irqsave(&rq->lock, flags); |
2802 | if (rq->curr->sched_class->post_schedule) | 2818 | if (rq->curr->sched_class->post_schedule) |
2803 | rq->curr->sched_class->post_schedule(rq); | 2819 | rq->curr->sched_class->post_schedule(rq); |
2804 | spin_unlock_irqrestore(&rq->lock, flags); | 2820 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
2805 | 2821 | ||
2806 | rq->post_schedule = 0; | 2822 | rq->post_schedule = 0; |
2807 | } | 2823 | } |
@@ -3083,15 +3099,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) | |||
3083 | { | 3099 | { |
3084 | BUG_ON(!irqs_disabled()); | 3100 | BUG_ON(!irqs_disabled()); |
3085 | if (rq1 == rq2) { | 3101 | if (rq1 == rq2) { |
3086 | spin_lock(&rq1->lock); | 3102 | raw_spin_lock(&rq1->lock); |
3087 | __acquire(rq2->lock); /* Fake it out ;) */ | 3103 | __acquire(rq2->lock); /* Fake it out ;) */ |
3088 | } else { | 3104 | } else { |
3089 | if (rq1 < rq2) { | 3105 | if (rq1 < rq2) { |
3090 | spin_lock(&rq1->lock); | 3106 | raw_spin_lock(&rq1->lock); |
3091 | spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); | 3107 | raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
3092 | } else { | 3108 | } else { |
3093 | spin_lock(&rq2->lock); | 3109 | raw_spin_lock(&rq2->lock); |
3094 | spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); | 3110 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
3095 | } | 3111 | } |
3096 | } | 3112 | } |
3097 | update_rq_clock(rq1); | 3113 | update_rq_clock(rq1); |
@@ -3108,29 +3124,44 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) | |||
3108 | __releases(rq1->lock) | 3124 | __releases(rq1->lock) |
3109 | __releases(rq2->lock) | 3125 | __releases(rq2->lock) |
3110 | { | 3126 | { |
3111 | spin_unlock(&rq1->lock); | 3127 | raw_spin_unlock(&rq1->lock); |
3112 | if (rq1 != rq2) | 3128 | if (rq1 != rq2) |
3113 | spin_unlock(&rq2->lock); | 3129 | raw_spin_unlock(&rq2->lock); |
3114 | else | 3130 | else |
3115 | __release(rq2->lock); | 3131 | __release(rq2->lock); |
3116 | } | 3132 | } |
3117 | 3133 | ||
3118 | /* | 3134 | /* |
3119 | * If dest_cpu is allowed for this process, migrate the task to it. | 3135 | * sched_exec - execve() is a valuable balancing opportunity, because at |
3120 | * This is accomplished by forcing the cpu_allowed mask to only | 3136 | * this point the task has the smallest effective memory and cache footprint. |
3121 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then | ||
3122 | * the cpu_allowed mask is restored. | ||
3123 | */ | 3137 | */ |
3124 | static void sched_migrate_task(struct task_struct *p, int dest_cpu) | 3138 | void sched_exec(void) |
3125 | { | 3139 | { |
3140 | struct task_struct *p = current; | ||
3126 | struct migration_req req; | 3141 | struct migration_req req; |
3142 | int dest_cpu, this_cpu; | ||
3127 | unsigned long flags; | 3143 | unsigned long flags; |
3128 | struct rq *rq; | 3144 | struct rq *rq; |
3129 | 3145 | ||
3146 | again: | ||
3147 | this_cpu = get_cpu(); | ||
3148 | dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0); | ||
3149 | if (dest_cpu == this_cpu) { | ||
3150 | put_cpu(); | ||
3151 | return; | ||
3152 | } | ||
3153 | |||
3130 | rq = task_rq_lock(p, &flags); | 3154 | rq = task_rq_lock(p, &flags); |
3155 | put_cpu(); | ||
3156 | |||
3157 | /* | ||
3158 | * select_task_rq() can race against ->cpus_allowed | ||
3159 | */ | ||
3131 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) | 3160 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) |
3132 | || unlikely(!cpu_active(dest_cpu))) | 3161 | || unlikely(!cpu_active(dest_cpu))) { |
3133 | goto out; | 3162 | task_rq_unlock(rq, &flags); |
3163 | goto again; | ||
3164 | } | ||
3134 | 3165 | ||
3135 | /* force the process onto the specified CPU */ | 3166 | /* force the process onto the specified CPU */ |
3136 | if (migrate_task(p, dest_cpu, &req)) { | 3167 | if (migrate_task(p, dest_cpu, &req)) { |
@@ -3145,24 +3176,10 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
3145 | 3176 | ||
3146 | return; | 3177 | return; |
3147 | } | 3178 | } |
3148 | out: | ||
3149 | task_rq_unlock(rq, &flags); | 3179 | task_rq_unlock(rq, &flags); |
3150 | } | 3180 | } |
3151 | 3181 | ||
3152 | /* | 3182 | /* |
3153 | * sched_exec - execve() is a valuable balancing opportunity, because at | ||
3154 | * this point the task has the smallest effective memory and cache footprint. | ||
3155 | */ | ||
3156 | void sched_exec(void) | ||
3157 | { | ||
3158 | int new_cpu, this_cpu = get_cpu(); | ||
3159 | new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0); | ||
3160 | put_cpu(); | ||
3161 | if (new_cpu != this_cpu) | ||
3162 | sched_migrate_task(current, new_cpu); | ||
3163 | } | ||
3164 | |||
3165 | /* | ||
3166 | * pull_task - move a task from a remote runqueue to the local runqueue. | 3183 | * pull_task - move a task from a remote runqueue to the local runqueue. |
3167 | * Both runqueues must be locked. | 3184 | * Both runqueues must be locked. |
3168 | */ | 3185 | */ |
@@ -3172,10 +3189,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, | |||
3172 | deactivate_task(src_rq, p, 0); | 3189 | deactivate_task(src_rq, p, 0); |
3173 | set_task_cpu(p, this_cpu); | 3190 | set_task_cpu(p, this_cpu); |
3174 | activate_task(this_rq, p, 0); | 3191 | activate_task(this_rq, p, 0); |
3175 | /* | ||
3176 | * Note that idle threads have a prio of MAX_PRIO, for this test | ||
3177 | * to be always true for them. | ||
3178 | */ | ||
3179 | check_preempt_curr(this_rq, p, 0); | 3192 | check_preempt_curr(this_rq, p, 0); |
3180 | } | 3193 | } |
3181 | 3194 | ||
@@ -4134,7 +4147,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
4134 | unsigned long flags; | 4147 | unsigned long flags; |
4135 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | 4148 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); |
4136 | 4149 | ||
4137 | cpumask_copy(cpus, cpu_online_mask); | 4150 | cpumask_copy(cpus, cpu_active_mask); |
4138 | 4151 | ||
4139 | /* | 4152 | /* |
4140 | * When power savings policy is enabled for the parent domain, idle | 4153 | * When power savings policy is enabled for the parent domain, idle |
@@ -4207,14 +4220,15 @@ redo: | |||
4207 | 4220 | ||
4208 | if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { | 4221 | if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { |
4209 | 4222 | ||
4210 | spin_lock_irqsave(&busiest->lock, flags); | 4223 | raw_spin_lock_irqsave(&busiest->lock, flags); |
4211 | 4224 | ||
4212 | /* don't kick the migration_thread, if the curr | 4225 | /* don't kick the migration_thread, if the curr |
4213 | * task on busiest cpu can't be moved to this_cpu | 4226 | * task on busiest cpu can't be moved to this_cpu |
4214 | */ | 4227 | */ |
4215 | if (!cpumask_test_cpu(this_cpu, | 4228 | if (!cpumask_test_cpu(this_cpu, |
4216 | &busiest->curr->cpus_allowed)) { | 4229 | &busiest->curr->cpus_allowed)) { |
4217 | spin_unlock_irqrestore(&busiest->lock, flags); | 4230 | raw_spin_unlock_irqrestore(&busiest->lock, |
4231 | flags); | ||
4218 | all_pinned = 1; | 4232 | all_pinned = 1; |
4219 | goto out_one_pinned; | 4233 | goto out_one_pinned; |
4220 | } | 4234 | } |
@@ -4224,7 +4238,7 @@ redo: | |||
4224 | busiest->push_cpu = this_cpu; | 4238 | busiest->push_cpu = this_cpu; |
4225 | active_balance = 1; | 4239 | active_balance = 1; |
4226 | } | 4240 | } |
4227 | spin_unlock_irqrestore(&busiest->lock, flags); | 4241 | raw_spin_unlock_irqrestore(&busiest->lock, flags); |
4228 | if (active_balance) | 4242 | if (active_balance) |
4229 | wake_up_process(busiest->migration_thread); | 4243 | wake_up_process(busiest->migration_thread); |
4230 | 4244 | ||
@@ -4297,7 +4311,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) | |||
4297 | int all_pinned = 0; | 4311 | int all_pinned = 0; |
4298 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | 4312 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); |
4299 | 4313 | ||
4300 | cpumask_copy(cpus, cpu_online_mask); | 4314 | cpumask_copy(cpus, cpu_active_mask); |
4301 | 4315 | ||
4302 | /* | 4316 | /* |
4303 | * When power savings policy is enabled for the parent domain, idle | 4317 | * When power savings policy is enabled for the parent domain, idle |
@@ -4406,10 +4420,10 @@ redo: | |||
4406 | /* | 4420 | /* |
4407 | * Should not call ttwu while holding a rq->lock | 4421 | * Should not call ttwu while holding a rq->lock |
4408 | */ | 4422 | */ |
4409 | spin_unlock(&this_rq->lock); | 4423 | raw_spin_unlock(&this_rq->lock); |
4410 | if (active_balance) | 4424 | if (active_balance) |
4411 | wake_up_process(busiest->migration_thread); | 4425 | wake_up_process(busiest->migration_thread); |
4412 | spin_lock(&this_rq->lock); | 4426 | raw_spin_lock(&this_rq->lock); |
4413 | 4427 | ||
4414 | } else | 4428 | } else |
4415 | sd->nr_balance_failed = 0; | 4429 | sd->nr_balance_failed = 0; |
@@ -4694,7 +4708,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
4694 | cpumask_set_cpu(cpu, nohz.cpu_mask); | 4708 | cpumask_set_cpu(cpu, nohz.cpu_mask); |
4695 | 4709 | ||
4696 | /* time for ilb owner also to sleep */ | 4710 | /* time for ilb owner also to sleep */ |
4697 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { | 4711 | if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) { |
4698 | if (atomic_read(&nohz.load_balancer) == cpu) | 4712 | if (atomic_read(&nohz.load_balancer) == cpu) |
4699 | atomic_set(&nohz.load_balancer, -1); | 4713 | atomic_set(&nohz.load_balancer, -1); |
4700 | return 0; | 4714 | return 0; |
@@ -5278,11 +5292,11 @@ void scheduler_tick(void) | |||
5278 | 5292 | ||
5279 | sched_clock_tick(); | 5293 | sched_clock_tick(); |
5280 | 5294 | ||
5281 | spin_lock(&rq->lock); | 5295 | raw_spin_lock(&rq->lock); |
5282 | update_rq_clock(rq); | 5296 | update_rq_clock(rq); |
5283 | update_cpu_load(rq); | 5297 | update_cpu_load(rq); |
5284 | curr->sched_class->task_tick(rq, curr, 0); | 5298 | curr->sched_class->task_tick(rq, curr, 0); |
5285 | spin_unlock(&rq->lock); | 5299 | raw_spin_unlock(&rq->lock); |
5286 | 5300 | ||
5287 | perf_event_task_tick(curr, cpu); | 5301 | perf_event_task_tick(curr, cpu); |
5288 | 5302 | ||
@@ -5396,13 +5410,14 @@ static inline void schedule_debug(struct task_struct *prev) | |||
5396 | #endif | 5410 | #endif |
5397 | } | 5411 | } |
5398 | 5412 | ||
5399 | static void put_prev_task(struct rq *rq, struct task_struct *p) | 5413 | static void put_prev_task(struct rq *rq, struct task_struct *prev) |
5400 | { | 5414 | { |
5401 | u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime; | 5415 | if (prev->state == TASK_RUNNING) { |
5416 | u64 runtime = prev->se.sum_exec_runtime; | ||
5402 | 5417 | ||
5403 | update_avg(&p->se.avg_running, runtime); | 5418 | runtime -= prev->se.prev_sum_exec_runtime; |
5419 | runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); | ||
5404 | 5420 | ||
5405 | if (p->state == TASK_RUNNING) { | ||
5406 | /* | 5421 | /* |
5407 | * In order to avoid avg_overlap growing stale when we are | 5422 | * In order to avoid avg_overlap growing stale when we are |
5408 | * indeed overlapping and hence not getting put to sleep, grow | 5423 | * indeed overlapping and hence not getting put to sleep, grow |
@@ -5412,12 +5427,9 @@ static void put_prev_task(struct rq *rq, struct task_struct *p) | |||
5412 | * correlates to the amount of cache footprint a task can | 5427 | * correlates to the amount of cache footprint a task can |
5413 | * build up. | 5428 | * build up. |
5414 | */ | 5429 | */ |
5415 | runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); | 5430 | update_avg(&prev->se.avg_overlap, runtime); |
5416 | update_avg(&p->se.avg_overlap, runtime); | ||
5417 | } else { | ||
5418 | update_avg(&p->se.avg_running, 0); | ||
5419 | } | 5431 | } |
5420 | p->sched_class->put_prev_task(rq, p); | 5432 | prev->sched_class->put_prev_task(rq, prev); |
5421 | } | 5433 | } |
5422 | 5434 | ||
5423 | /* | 5435 | /* |
@@ -5478,7 +5490,7 @@ need_resched_nonpreemptible: | |||
5478 | if (sched_feat(HRTICK)) | 5490 | if (sched_feat(HRTICK)) |
5479 | hrtick_clear(rq); | 5491 | hrtick_clear(rq); |
5480 | 5492 | ||
5481 | spin_lock_irq(&rq->lock); | 5493 | raw_spin_lock_irq(&rq->lock); |
5482 | update_rq_clock(rq); | 5494 | update_rq_clock(rq); |
5483 | clear_tsk_need_resched(prev); | 5495 | clear_tsk_need_resched(prev); |
5484 | 5496 | ||
@@ -5514,7 +5526,7 @@ need_resched_nonpreemptible: | |||
5514 | cpu = smp_processor_id(); | 5526 | cpu = smp_processor_id(); |
5515 | rq = cpu_rq(cpu); | 5527 | rq = cpu_rq(cpu); |
5516 | } else | 5528 | } else |
5517 | spin_unlock_irq(&rq->lock); | 5529 | raw_spin_unlock_irq(&rq->lock); |
5518 | 5530 | ||
5519 | post_schedule(rq); | 5531 | post_schedule(rq); |
5520 | 5532 | ||
@@ -5931,14 +5943,15 @@ EXPORT_SYMBOL(wait_for_completion_killable); | |||
5931 | */ | 5943 | */ |
5932 | bool try_wait_for_completion(struct completion *x) | 5944 | bool try_wait_for_completion(struct completion *x) |
5933 | { | 5945 | { |
5946 | unsigned long flags; | ||
5934 | int ret = 1; | 5947 | int ret = 1; |
5935 | 5948 | ||
5936 | spin_lock_irq(&x->wait.lock); | 5949 | spin_lock_irqsave(&x->wait.lock, flags); |
5937 | if (!x->done) | 5950 | if (!x->done) |
5938 | ret = 0; | 5951 | ret = 0; |
5939 | else | 5952 | else |
5940 | x->done--; | 5953 | x->done--; |
5941 | spin_unlock_irq(&x->wait.lock); | 5954 | spin_unlock_irqrestore(&x->wait.lock, flags); |
5942 | return ret; | 5955 | return ret; |
5943 | } | 5956 | } |
5944 | EXPORT_SYMBOL(try_wait_for_completion); | 5957 | EXPORT_SYMBOL(try_wait_for_completion); |
@@ -5953,12 +5966,13 @@ EXPORT_SYMBOL(try_wait_for_completion); | |||
5953 | */ | 5966 | */ |
5954 | bool completion_done(struct completion *x) | 5967 | bool completion_done(struct completion *x) |
5955 | { | 5968 | { |
5969 | unsigned long flags; | ||
5956 | int ret = 1; | 5970 | int ret = 1; |
5957 | 5971 | ||
5958 | spin_lock_irq(&x->wait.lock); | 5972 | spin_lock_irqsave(&x->wait.lock, flags); |
5959 | if (!x->done) | 5973 | if (!x->done) |
5960 | ret = 0; | 5974 | ret = 0; |
5961 | spin_unlock_irq(&x->wait.lock); | 5975 | spin_unlock_irqrestore(&x->wait.lock, flags); |
5962 | return ret; | 5976 | return ret; |
5963 | } | 5977 | } |
5964 | EXPORT_SYMBOL(completion_done); | 5978 | EXPORT_SYMBOL(completion_done); |
@@ -6343,7 +6357,7 @@ recheck: | |||
6343 | * make sure no PI-waiters arrive (or leave) while we are | 6357 | * make sure no PI-waiters arrive (or leave) while we are |
6344 | * changing the priority of the task: | 6358 | * changing the priority of the task: |
6345 | */ | 6359 | */ |
6346 | spin_lock_irqsave(&p->pi_lock, flags); | 6360 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
6347 | /* | 6361 | /* |
6348 | * To be able to change p->policy safely, the apropriate | 6362 | * To be able to change p->policy safely, the apropriate |
6349 | * runqueue lock must be held. | 6363 | * runqueue lock must be held. |
@@ -6353,7 +6367,7 @@ recheck: | |||
6353 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { | 6367 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
6354 | policy = oldpolicy = -1; | 6368 | policy = oldpolicy = -1; |
6355 | __task_rq_unlock(rq); | 6369 | __task_rq_unlock(rq); |
6356 | spin_unlock_irqrestore(&p->pi_lock, flags); | 6370 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
6357 | goto recheck; | 6371 | goto recheck; |
6358 | } | 6372 | } |
6359 | update_rq_clock(rq); | 6373 | update_rq_clock(rq); |
@@ -6377,7 +6391,7 @@ recheck: | |||
6377 | check_class_changed(rq, p, prev_class, oldprio, running); | 6391 | check_class_changed(rq, p, prev_class, oldprio, running); |
6378 | } | 6392 | } |
6379 | __task_rq_unlock(rq); | 6393 | __task_rq_unlock(rq); |
6380 | spin_unlock_irqrestore(&p->pi_lock, flags); | 6394 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
6381 | 6395 | ||
6382 | rt_mutex_adjust_pi(p); | 6396 | rt_mutex_adjust_pi(p); |
6383 | 6397 | ||
@@ -6477,7 +6491,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) | |||
6477 | return -EINVAL; | 6491 | return -EINVAL; |
6478 | 6492 | ||
6479 | retval = -ESRCH; | 6493 | retval = -ESRCH; |
6480 | read_lock(&tasklist_lock); | 6494 | rcu_read_lock(); |
6481 | p = find_process_by_pid(pid); | 6495 | p = find_process_by_pid(pid); |
6482 | if (p) { | 6496 | if (p) { |
6483 | retval = security_task_getscheduler(p); | 6497 | retval = security_task_getscheduler(p); |
@@ -6485,7 +6499,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) | |||
6485 | retval = p->policy | 6499 | retval = p->policy |
6486 | | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); | 6500 | | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); |
6487 | } | 6501 | } |
6488 | read_unlock(&tasklist_lock); | 6502 | rcu_read_unlock(); |
6489 | return retval; | 6503 | return retval; |
6490 | } | 6504 | } |
6491 | 6505 | ||
@@ -6503,7 +6517,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) | |||
6503 | if (!param || pid < 0) | 6517 | if (!param || pid < 0) |
6504 | return -EINVAL; | 6518 | return -EINVAL; |
6505 | 6519 | ||
6506 | read_lock(&tasklist_lock); | 6520 | rcu_read_lock(); |
6507 | p = find_process_by_pid(pid); | 6521 | p = find_process_by_pid(pid); |
6508 | retval = -ESRCH; | 6522 | retval = -ESRCH; |
6509 | if (!p) | 6523 | if (!p) |
@@ -6514,7 +6528,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) | |||
6514 | goto out_unlock; | 6528 | goto out_unlock; |
6515 | 6529 | ||
6516 | lp.sched_priority = p->rt_priority; | 6530 | lp.sched_priority = p->rt_priority; |
6517 | read_unlock(&tasklist_lock); | 6531 | rcu_read_unlock(); |
6518 | 6532 | ||
6519 | /* | 6533 | /* |
6520 | * This one might sleep, we cannot do it with a spinlock held ... | 6534 | * This one might sleep, we cannot do it with a spinlock held ... |
@@ -6524,7 +6538,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) | |||
6524 | return retval; | 6538 | return retval; |
6525 | 6539 | ||
6526 | out_unlock: | 6540 | out_unlock: |
6527 | read_unlock(&tasklist_lock); | 6541 | rcu_read_unlock(); |
6528 | return retval; | 6542 | return retval; |
6529 | } | 6543 | } |
6530 | 6544 | ||
@@ -6535,22 +6549,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) | |||
6535 | int retval; | 6549 | int retval; |
6536 | 6550 | ||
6537 | get_online_cpus(); | 6551 | get_online_cpus(); |
6538 | read_lock(&tasklist_lock); | 6552 | rcu_read_lock(); |
6539 | 6553 | ||
6540 | p = find_process_by_pid(pid); | 6554 | p = find_process_by_pid(pid); |
6541 | if (!p) { | 6555 | if (!p) { |
6542 | read_unlock(&tasklist_lock); | 6556 | rcu_read_unlock(); |
6543 | put_online_cpus(); | 6557 | put_online_cpus(); |
6544 | return -ESRCH; | 6558 | return -ESRCH; |
6545 | } | 6559 | } |
6546 | 6560 | ||
6547 | /* | 6561 | /* Prevent p going away */ |
6548 | * It is not safe to call set_cpus_allowed with the | ||
6549 | * tasklist_lock held. We will bump the task_struct's | ||
6550 | * usage count and then drop tasklist_lock. | ||
6551 | */ | ||
6552 | get_task_struct(p); | 6562 | get_task_struct(p); |
6553 | read_unlock(&tasklist_lock); | 6563 | rcu_read_unlock(); |
6554 | 6564 | ||
6555 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { | 6565 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { |
6556 | retval = -ENOMEM; | 6566 | retval = -ENOMEM; |
@@ -6631,10 +6641,12 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, | |||
6631 | long sched_getaffinity(pid_t pid, struct cpumask *mask) | 6641 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
6632 | { | 6642 | { |
6633 | struct task_struct *p; | 6643 | struct task_struct *p; |
6644 | unsigned long flags; | ||
6645 | struct rq *rq; | ||
6634 | int retval; | 6646 | int retval; |
6635 | 6647 | ||
6636 | get_online_cpus(); | 6648 | get_online_cpus(); |
6637 | read_lock(&tasklist_lock); | 6649 | rcu_read_lock(); |
6638 | 6650 | ||
6639 | retval = -ESRCH; | 6651 | retval = -ESRCH; |
6640 | p = find_process_by_pid(pid); | 6652 | p = find_process_by_pid(pid); |
@@ -6645,10 +6657,12 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) | |||
6645 | if (retval) | 6657 | if (retval) |
6646 | goto out_unlock; | 6658 | goto out_unlock; |
6647 | 6659 | ||
6660 | rq = task_rq_lock(p, &flags); | ||
6648 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); | 6661 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
6662 | task_rq_unlock(rq, &flags); | ||
6649 | 6663 | ||
6650 | out_unlock: | 6664 | out_unlock: |
6651 | read_unlock(&tasklist_lock); | 6665 | rcu_read_unlock(); |
6652 | put_online_cpus(); | 6666 | put_online_cpus(); |
6653 | 6667 | ||
6654 | return retval; | 6668 | return retval; |
@@ -6703,7 +6717,7 @@ SYSCALL_DEFINE0(sched_yield) | |||
6703 | */ | 6717 | */ |
6704 | __release(rq->lock); | 6718 | __release(rq->lock); |
6705 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); | 6719 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
6706 | _raw_spin_unlock(&rq->lock); | 6720 | do_raw_spin_unlock(&rq->lock); |
6707 | preempt_enable_no_resched(); | 6721 | preempt_enable_no_resched(); |
6708 | 6722 | ||
6709 | schedule(); | 6723 | schedule(); |
@@ -6883,6 +6897,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, | |||
6883 | { | 6897 | { |
6884 | struct task_struct *p; | 6898 | struct task_struct *p; |
6885 | unsigned int time_slice; | 6899 | unsigned int time_slice; |
6900 | unsigned long flags; | ||
6901 | struct rq *rq; | ||
6886 | int retval; | 6902 | int retval; |
6887 | struct timespec t; | 6903 | struct timespec t; |
6888 | 6904 | ||
@@ -6890,7 +6906,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, | |||
6890 | return -EINVAL; | 6906 | return -EINVAL; |
6891 | 6907 | ||
6892 | retval = -ESRCH; | 6908 | retval = -ESRCH; |
6893 | read_lock(&tasklist_lock); | 6909 | rcu_read_lock(); |
6894 | p = find_process_by_pid(pid); | 6910 | p = find_process_by_pid(pid); |
6895 | if (!p) | 6911 | if (!p) |
6896 | goto out_unlock; | 6912 | goto out_unlock; |
@@ -6899,15 +6915,17 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, | |||
6899 | if (retval) | 6915 | if (retval) |
6900 | goto out_unlock; | 6916 | goto out_unlock; |
6901 | 6917 | ||
6902 | time_slice = p->sched_class->get_rr_interval(p); | 6918 | rq = task_rq_lock(p, &flags); |
6919 | time_slice = p->sched_class->get_rr_interval(rq, p); | ||
6920 | task_rq_unlock(rq, &flags); | ||
6903 | 6921 | ||
6904 | read_unlock(&tasklist_lock); | 6922 | rcu_read_unlock(); |
6905 | jiffies_to_timespec(time_slice, &t); | 6923 | jiffies_to_timespec(time_slice, &t); |
6906 | retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; | 6924 | retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; |
6907 | return retval; | 6925 | return retval; |
6908 | 6926 | ||
6909 | out_unlock: | 6927 | out_unlock: |
6910 | read_unlock(&tasklist_lock); | 6928 | rcu_read_unlock(); |
6911 | return retval; | 6929 | return retval; |
6912 | } | 6930 | } |
6913 | 6931 | ||
@@ -6995,12 +7013,12 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
6995 | struct rq *rq = cpu_rq(cpu); | 7013 | struct rq *rq = cpu_rq(cpu); |
6996 | unsigned long flags; | 7014 | unsigned long flags; |
6997 | 7015 | ||
6998 | spin_lock_irqsave(&rq->lock, flags); | 7016 | raw_spin_lock_irqsave(&rq->lock, flags); |
6999 | 7017 | ||
7000 | __sched_fork(idle); | 7018 | __sched_fork(idle); |
7019 | idle->state = TASK_RUNNING; | ||
7001 | idle->se.exec_start = sched_clock(); | 7020 | idle->se.exec_start = sched_clock(); |
7002 | 7021 | ||
7003 | idle->prio = idle->normal_prio = MAX_PRIO; | ||
7004 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); | 7022 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); |
7005 | __set_task_cpu(idle, cpu); | 7023 | __set_task_cpu(idle, cpu); |
7006 | 7024 | ||
@@ -7008,7 +7026,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
7008 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 7026 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
7009 | idle->oncpu = 1; | 7027 | idle->oncpu = 1; |
7010 | #endif | 7028 | #endif |
7011 | spin_unlock_irqrestore(&rq->lock, flags); | 7029 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7012 | 7030 | ||
7013 | /* Set the preempt count _outside_ the spinlocks! */ | 7031 | /* Set the preempt count _outside_ the spinlocks! */ |
7014 | #if defined(CONFIG_PREEMPT) | 7032 | #if defined(CONFIG_PREEMPT) |
@@ -7041,22 +7059,43 @@ cpumask_var_t nohz_cpu_mask; | |||
7041 | * | 7059 | * |
7042 | * This idea comes from the SD scheduler of Con Kolivas: | 7060 | * This idea comes from the SD scheduler of Con Kolivas: |
7043 | */ | 7061 | */ |
7044 | static inline void sched_init_granularity(void) | 7062 | static int get_update_sysctl_factor(void) |
7045 | { | 7063 | { |
7046 | unsigned int factor = 1 + ilog2(num_online_cpus()); | 7064 | unsigned int cpus = min_t(int, num_online_cpus(), 8); |
7047 | const unsigned long limit = 200000000; | 7065 | unsigned int factor; |
7048 | 7066 | ||
7049 | sysctl_sched_min_granularity *= factor; | 7067 | switch (sysctl_sched_tunable_scaling) { |
7050 | if (sysctl_sched_min_granularity > limit) | 7068 | case SCHED_TUNABLESCALING_NONE: |
7051 | sysctl_sched_min_granularity = limit; | 7069 | factor = 1; |
7070 | break; | ||
7071 | case SCHED_TUNABLESCALING_LINEAR: | ||
7072 | factor = cpus; | ||
7073 | break; | ||
7074 | case SCHED_TUNABLESCALING_LOG: | ||
7075 | default: | ||
7076 | factor = 1 + ilog2(cpus); | ||
7077 | break; | ||
7078 | } | ||
7052 | 7079 | ||
7053 | sysctl_sched_latency *= factor; | 7080 | return factor; |
7054 | if (sysctl_sched_latency > limit) | 7081 | } |
7055 | sysctl_sched_latency = limit; | ||
7056 | 7082 | ||
7057 | sysctl_sched_wakeup_granularity *= factor; | 7083 | static void update_sysctl(void) |
7084 | { | ||
7085 | unsigned int factor = get_update_sysctl_factor(); | ||
7086 | |||
7087 | #define SET_SYSCTL(name) \ | ||
7088 | (sysctl_##name = (factor) * normalized_sysctl_##name) | ||
7089 | SET_SYSCTL(sched_min_granularity); | ||
7090 | SET_SYSCTL(sched_latency); | ||
7091 | SET_SYSCTL(sched_wakeup_granularity); | ||
7092 | SET_SYSCTL(sched_shares_ratelimit); | ||
7093 | #undef SET_SYSCTL | ||
7094 | } | ||
7058 | 7095 | ||
7059 | sysctl_sched_shares_ratelimit *= factor; | 7096 | static inline void sched_init_granularity(void) |
7097 | { | ||
7098 | update_sysctl(); | ||
7060 | } | 7099 | } |
7061 | 7100 | ||
7062 | #ifdef CONFIG_SMP | 7101 | #ifdef CONFIG_SMP |
@@ -7092,8 +7131,24 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
7092 | struct rq *rq; | 7131 | struct rq *rq; |
7093 | int ret = 0; | 7132 | int ret = 0; |
7094 | 7133 | ||
7134 | /* | ||
7135 | * Since we rely on wake-ups to migrate sleeping tasks, don't change | ||
7136 | * the ->cpus_allowed mask from under waking tasks, which would be | ||
7137 | * possible when we change rq->lock in ttwu(), so synchronize against | ||
7138 | * TASK_WAKING to avoid that. | ||
7139 | */ | ||
7140 | again: | ||
7141 | while (p->state == TASK_WAKING) | ||
7142 | cpu_relax(); | ||
7143 | |||
7095 | rq = task_rq_lock(p, &flags); | 7144 | rq = task_rq_lock(p, &flags); |
7096 | if (!cpumask_intersects(new_mask, cpu_online_mask)) { | 7145 | |
7146 | if (p->state == TASK_WAKING) { | ||
7147 | task_rq_unlock(rq, &flags); | ||
7148 | goto again; | ||
7149 | } | ||
7150 | |||
7151 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { | ||
7097 | ret = -EINVAL; | 7152 | ret = -EINVAL; |
7098 | goto out; | 7153 | goto out; |
7099 | } | 7154 | } |
@@ -7115,7 +7170,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
7115 | if (cpumask_test_cpu(task_cpu(p), new_mask)) | 7170 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
7116 | goto out; | 7171 | goto out; |
7117 | 7172 | ||
7118 | if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { | 7173 | if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) { |
7119 | /* Need help from migration thread: drop lock and wait. */ | 7174 | /* Need help from migration thread: drop lock and wait. */ |
7120 | struct task_struct *mt = rq->migration_thread; | 7175 | struct task_struct *mt = rq->migration_thread; |
7121 | 7176 | ||
@@ -7148,7 +7203,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); | |||
7148 | static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | 7203 | static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) |
7149 | { | 7204 | { |
7150 | struct rq *rq_dest, *rq_src; | 7205 | struct rq *rq_dest, *rq_src; |
7151 | int ret = 0, on_rq; | 7206 | int ret = 0; |
7152 | 7207 | ||
7153 | if (unlikely(!cpu_active(dest_cpu))) | 7208 | if (unlikely(!cpu_active(dest_cpu))) |
7154 | return ret; | 7209 | return ret; |
@@ -7164,12 +7219,13 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
7164 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) | 7219 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
7165 | goto fail; | 7220 | goto fail; |
7166 | 7221 | ||
7167 | on_rq = p->se.on_rq; | 7222 | /* |
7168 | if (on_rq) | 7223 | * If we're not on a rq, the next wake-up will ensure we're |
7224 | * placed properly. | ||
7225 | */ | ||
7226 | if (p->se.on_rq) { | ||
7169 | deactivate_task(rq_src, p, 0); | 7227 | deactivate_task(rq_src, p, 0); |
7170 | 7228 | set_task_cpu(p, dest_cpu); | |
7171 | set_task_cpu(p, dest_cpu); | ||
7172 | if (on_rq) { | ||
7173 | activate_task(rq_dest, p, 0); | 7229 | activate_task(rq_dest, p, 0); |
7174 | check_preempt_curr(rq_dest, p, 0); | 7230 | check_preempt_curr(rq_dest, p, 0); |
7175 | } | 7231 | } |
@@ -7204,10 +7260,10 @@ static int migration_thread(void *data) | |||
7204 | struct migration_req *req; | 7260 | struct migration_req *req; |
7205 | struct list_head *head; | 7261 | struct list_head *head; |
7206 | 7262 | ||
7207 | spin_lock_irq(&rq->lock); | 7263 | raw_spin_lock_irq(&rq->lock); |
7208 | 7264 | ||
7209 | if (cpu_is_offline(cpu)) { | 7265 | if (cpu_is_offline(cpu)) { |
7210 | spin_unlock_irq(&rq->lock); | 7266 | raw_spin_unlock_irq(&rq->lock); |
7211 | break; | 7267 | break; |
7212 | } | 7268 | } |
7213 | 7269 | ||
@@ -7219,7 +7275,7 @@ static int migration_thread(void *data) | |||
7219 | head = &rq->migration_queue; | 7275 | head = &rq->migration_queue; |
7220 | 7276 | ||
7221 | if (list_empty(head)) { | 7277 | if (list_empty(head)) { |
7222 | spin_unlock_irq(&rq->lock); | 7278 | raw_spin_unlock_irq(&rq->lock); |
7223 | schedule(); | 7279 | schedule(); |
7224 | set_current_state(TASK_INTERRUPTIBLE); | 7280 | set_current_state(TASK_INTERRUPTIBLE); |
7225 | continue; | 7281 | continue; |
@@ -7228,14 +7284,14 @@ static int migration_thread(void *data) | |||
7228 | list_del_init(head->next); | 7284 | list_del_init(head->next); |
7229 | 7285 | ||
7230 | if (req->task != NULL) { | 7286 | if (req->task != NULL) { |
7231 | spin_unlock(&rq->lock); | 7287 | raw_spin_unlock(&rq->lock); |
7232 | __migrate_task(req->task, cpu, req->dest_cpu); | 7288 | __migrate_task(req->task, cpu, req->dest_cpu); |
7233 | } else if (likely(cpu == (badcpu = smp_processor_id()))) { | 7289 | } else if (likely(cpu == (badcpu = smp_processor_id()))) { |
7234 | req->dest_cpu = RCU_MIGRATION_GOT_QS; | 7290 | req->dest_cpu = RCU_MIGRATION_GOT_QS; |
7235 | spin_unlock(&rq->lock); | 7291 | raw_spin_unlock(&rq->lock); |
7236 | } else { | 7292 | } else { |
7237 | req->dest_cpu = RCU_MIGRATION_MUST_SYNC; | 7293 | req->dest_cpu = RCU_MIGRATION_MUST_SYNC; |
7238 | spin_unlock(&rq->lock); | 7294 | raw_spin_unlock(&rq->lock); |
7239 | WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); | 7295 | WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); |
7240 | } | 7296 | } |
7241 | local_irq_enable(); | 7297 | local_irq_enable(); |
@@ -7265,37 +7321,10 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | |||
7265 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 7321 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
7266 | { | 7322 | { |
7267 | int dest_cpu; | 7323 | int dest_cpu; |
7268 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu)); | ||
7269 | 7324 | ||
7270 | again: | 7325 | again: |
7271 | /* Look for allowed, online CPU in same node. */ | 7326 | dest_cpu = select_fallback_rq(dead_cpu, p); |
7272 | for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask) | ||
7273 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) | ||
7274 | goto move; | ||
7275 | |||
7276 | /* Any allowed, online CPU? */ | ||
7277 | dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask); | ||
7278 | if (dest_cpu < nr_cpu_ids) | ||
7279 | goto move; | ||
7280 | 7327 | ||
7281 | /* No more Mr. Nice Guy. */ | ||
7282 | if (dest_cpu >= nr_cpu_ids) { | ||
7283 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); | ||
7284 | dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed); | ||
7285 | |||
7286 | /* | ||
7287 | * Don't tell them about moving exiting tasks or | ||
7288 | * kernel threads (both mm NULL), since they never | ||
7289 | * leave kernel. | ||
7290 | */ | ||
7291 | if (p->mm && printk_ratelimit()) { | ||
7292 | printk(KERN_INFO "process %d (%s) no " | ||
7293 | "longer affine to cpu%d\n", | ||
7294 | task_pid_nr(p), p->comm, dead_cpu); | ||
7295 | } | ||
7296 | } | ||
7297 | |||
7298 | move: | ||
7299 | /* It can have affinity changed while we were choosing. */ | 7328 | /* It can have affinity changed while we were choosing. */ |
7300 | if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) | 7329 | if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) |
7301 | goto again; | 7330 | goto again; |
@@ -7310,7 +7339,7 @@ move: | |||
7310 | */ | 7339 | */ |
7311 | static void migrate_nr_uninterruptible(struct rq *rq_src) | 7340 | static void migrate_nr_uninterruptible(struct rq *rq_src) |
7312 | { | 7341 | { |
7313 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); | 7342 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); |
7314 | unsigned long flags; | 7343 | unsigned long flags; |
7315 | 7344 | ||
7316 | local_irq_save(flags); | 7345 | local_irq_save(flags); |
@@ -7358,14 +7387,14 @@ void sched_idle_next(void) | |||
7358 | * Strictly not necessary since rest of the CPUs are stopped by now | 7387 | * Strictly not necessary since rest of the CPUs are stopped by now |
7359 | * and interrupts disabled on the current cpu. | 7388 | * and interrupts disabled on the current cpu. |
7360 | */ | 7389 | */ |
7361 | spin_lock_irqsave(&rq->lock, flags); | 7390 | raw_spin_lock_irqsave(&rq->lock, flags); |
7362 | 7391 | ||
7363 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); | 7392 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); |
7364 | 7393 | ||
7365 | update_rq_clock(rq); | 7394 | update_rq_clock(rq); |
7366 | activate_task(rq, p, 0); | 7395 | activate_task(rq, p, 0); |
7367 | 7396 | ||
7368 | spin_unlock_irqrestore(&rq->lock, flags); | 7397 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7369 | } | 7398 | } |
7370 | 7399 | ||
7371 | /* | 7400 | /* |
@@ -7401,9 +7430,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) | |||
7401 | * that's OK. No task can be added to this CPU, so iteration is | 7430 | * that's OK. No task can be added to this CPU, so iteration is |
7402 | * fine. | 7431 | * fine. |
7403 | */ | 7432 | */ |
7404 | spin_unlock_irq(&rq->lock); | 7433 | raw_spin_unlock_irq(&rq->lock); |
7405 | move_task_off_dead_cpu(dead_cpu, p); | 7434 | move_task_off_dead_cpu(dead_cpu, p); |
7406 | spin_lock_irq(&rq->lock); | 7435 | raw_spin_lock_irq(&rq->lock); |
7407 | 7436 | ||
7408 | put_task_struct(p); | 7437 | put_task_struct(p); |
7409 | } | 7438 | } |
@@ -7563,7 +7592,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu) | |||
7563 | static struct ctl_table_header *sd_sysctl_header; | 7592 | static struct ctl_table_header *sd_sysctl_header; |
7564 | static void register_sched_domain_sysctl(void) | 7593 | static void register_sched_domain_sysctl(void) |
7565 | { | 7594 | { |
7566 | int i, cpu_num = num_online_cpus(); | 7595 | int i, cpu_num = num_possible_cpus(); |
7567 | struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); | 7596 | struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); |
7568 | char buf[32]; | 7597 | char buf[32]; |
7569 | 7598 | ||
@@ -7573,7 +7602,7 @@ static void register_sched_domain_sysctl(void) | |||
7573 | if (entry == NULL) | 7602 | if (entry == NULL) |
7574 | return; | 7603 | return; |
7575 | 7604 | ||
7576 | for_each_online_cpu(i) { | 7605 | for_each_possible_cpu(i) { |
7577 | snprintf(buf, 32, "cpu%d", i); | 7606 | snprintf(buf, 32, "cpu%d", i); |
7578 | entry->procname = kstrdup(buf, GFP_KERNEL); | 7607 | entry->procname = kstrdup(buf, GFP_KERNEL); |
7579 | entry->mode = 0555; | 7608 | entry->mode = 0555; |
@@ -7669,13 +7698,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7669 | 7698 | ||
7670 | /* Update our root-domain */ | 7699 | /* Update our root-domain */ |
7671 | rq = cpu_rq(cpu); | 7700 | rq = cpu_rq(cpu); |
7672 | spin_lock_irqsave(&rq->lock, flags); | 7701 | raw_spin_lock_irqsave(&rq->lock, flags); |
7673 | if (rq->rd) { | 7702 | if (rq->rd) { |
7674 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); | 7703 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
7675 | 7704 | ||
7676 | set_rq_online(rq); | 7705 | set_rq_online(rq); |
7677 | } | 7706 | } |
7678 | spin_unlock_irqrestore(&rq->lock, flags); | 7707 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7679 | break; | 7708 | break; |
7680 | 7709 | ||
7681 | #ifdef CONFIG_HOTPLUG_CPU | 7710 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -7700,14 +7729,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7700 | put_task_struct(rq->migration_thread); | 7729 | put_task_struct(rq->migration_thread); |
7701 | rq->migration_thread = NULL; | 7730 | rq->migration_thread = NULL; |
7702 | /* Idle task back to normal (off runqueue, low prio) */ | 7731 | /* Idle task back to normal (off runqueue, low prio) */ |
7703 | spin_lock_irq(&rq->lock); | 7732 | raw_spin_lock_irq(&rq->lock); |
7704 | update_rq_clock(rq); | 7733 | update_rq_clock(rq); |
7705 | deactivate_task(rq, rq->idle, 0); | 7734 | deactivate_task(rq, rq->idle, 0); |
7706 | rq->idle->static_prio = MAX_PRIO; | ||
7707 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); | 7735 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); |
7708 | rq->idle->sched_class = &idle_sched_class; | 7736 | rq->idle->sched_class = &idle_sched_class; |
7709 | migrate_dead_tasks(cpu); | 7737 | migrate_dead_tasks(cpu); |
7710 | spin_unlock_irq(&rq->lock); | 7738 | raw_spin_unlock_irq(&rq->lock); |
7711 | cpuset_unlock(); | 7739 | cpuset_unlock(); |
7712 | migrate_nr_uninterruptible(rq); | 7740 | migrate_nr_uninterruptible(rq); |
7713 | BUG_ON(rq->nr_running != 0); | 7741 | BUG_ON(rq->nr_running != 0); |
@@ -7717,30 +7745,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7717 | * they didn't take sched_hotcpu_mutex. Just wake up | 7745 | * they didn't take sched_hotcpu_mutex. Just wake up |
7718 | * the requestors. | 7746 | * the requestors. |
7719 | */ | 7747 | */ |
7720 | spin_lock_irq(&rq->lock); | 7748 | raw_spin_lock_irq(&rq->lock); |
7721 | while (!list_empty(&rq->migration_queue)) { | 7749 | while (!list_empty(&rq->migration_queue)) { |
7722 | struct migration_req *req; | 7750 | struct migration_req *req; |
7723 | 7751 | ||
7724 | req = list_entry(rq->migration_queue.next, | 7752 | req = list_entry(rq->migration_queue.next, |
7725 | struct migration_req, list); | 7753 | struct migration_req, list); |
7726 | list_del_init(&req->list); | 7754 | list_del_init(&req->list); |
7727 | spin_unlock_irq(&rq->lock); | 7755 | raw_spin_unlock_irq(&rq->lock); |
7728 | complete(&req->done); | 7756 | complete(&req->done); |
7729 | spin_lock_irq(&rq->lock); | 7757 | raw_spin_lock_irq(&rq->lock); |
7730 | } | 7758 | } |
7731 | spin_unlock_irq(&rq->lock); | 7759 | raw_spin_unlock_irq(&rq->lock); |
7732 | break; | 7760 | break; |
7733 | 7761 | ||
7734 | case CPU_DYING: | 7762 | case CPU_DYING: |
7735 | case CPU_DYING_FROZEN: | 7763 | case CPU_DYING_FROZEN: |
7736 | /* Update our root-domain */ | 7764 | /* Update our root-domain */ |
7737 | rq = cpu_rq(cpu); | 7765 | rq = cpu_rq(cpu); |
7738 | spin_lock_irqsave(&rq->lock, flags); | 7766 | raw_spin_lock_irqsave(&rq->lock, flags); |
7739 | if (rq->rd) { | 7767 | if (rq->rd) { |
7740 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); | 7768 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
7741 | set_rq_offline(rq); | 7769 | set_rq_offline(rq); |
7742 | } | 7770 | } |
7743 | spin_unlock_irqrestore(&rq->lock, flags); | 7771 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7744 | break; | 7772 | break; |
7745 | #endif | 7773 | #endif |
7746 | } | 7774 | } |
@@ -7970,7 +7998,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
7970 | struct root_domain *old_rd = NULL; | 7998 | struct root_domain *old_rd = NULL; |
7971 | unsigned long flags; | 7999 | unsigned long flags; |
7972 | 8000 | ||
7973 | spin_lock_irqsave(&rq->lock, flags); | 8001 | raw_spin_lock_irqsave(&rq->lock, flags); |
7974 | 8002 | ||
7975 | if (rq->rd) { | 8003 | if (rq->rd) { |
7976 | old_rd = rq->rd; | 8004 | old_rd = rq->rd; |
@@ -7996,7 +8024,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
7996 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) | 8024 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) |
7997 | set_rq_online(rq); | 8025 | set_rq_online(rq); |
7998 | 8026 | ||
7999 | spin_unlock_irqrestore(&rq->lock, flags); | 8027 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
8000 | 8028 | ||
8001 | if (old_rd) | 8029 | if (old_rd) |
8002 | free_rootdomain(old_rd); | 8030 | free_rootdomain(old_rd); |
@@ -8282,14 +8310,14 @@ enum s_alloc { | |||
8282 | */ | 8310 | */ |
8283 | #ifdef CONFIG_SCHED_SMT | 8311 | #ifdef CONFIG_SCHED_SMT |
8284 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); | 8312 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); |
8285 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); | 8313 | static DEFINE_PER_CPU(struct static_sched_group, sched_groups); |
8286 | 8314 | ||
8287 | static int | 8315 | static int |
8288 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, | 8316 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
8289 | struct sched_group **sg, struct cpumask *unused) | 8317 | struct sched_group **sg, struct cpumask *unused) |
8290 | { | 8318 | { |
8291 | if (sg) | 8319 | if (sg) |
8292 | *sg = &per_cpu(sched_group_cpus, cpu).sg; | 8320 | *sg = &per_cpu(sched_groups, cpu).sg; |
8293 | return cpu; | 8321 | return cpu; |
8294 | } | 8322 | } |
8295 | #endif /* CONFIG_SCHED_SMT */ | 8323 | #endif /* CONFIG_SCHED_SMT */ |
@@ -9099,7 +9127,7 @@ match1: | |||
9099 | if (doms_new == NULL) { | 9127 | if (doms_new == NULL) { |
9100 | ndoms_cur = 0; | 9128 | ndoms_cur = 0; |
9101 | doms_new = &fallback_doms; | 9129 | doms_new = &fallback_doms; |
9102 | cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map); | 9130 | cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); |
9103 | WARN_ON_ONCE(dattr_new); | 9131 | WARN_ON_ONCE(dattr_new); |
9104 | } | 9132 | } |
9105 | 9133 | ||
@@ -9230,8 +9258,10 @@ static int update_sched_domains(struct notifier_block *nfb, | |||
9230 | switch (action) { | 9258 | switch (action) { |
9231 | case CPU_ONLINE: | 9259 | case CPU_ONLINE: |
9232 | case CPU_ONLINE_FROZEN: | 9260 | case CPU_ONLINE_FROZEN: |
9233 | case CPU_DEAD: | 9261 | case CPU_DOWN_PREPARE: |
9234 | case CPU_DEAD_FROZEN: | 9262 | case CPU_DOWN_PREPARE_FROZEN: |
9263 | case CPU_DOWN_FAILED: | ||
9264 | case CPU_DOWN_FAILED_FROZEN: | ||
9235 | partition_sched_domains(1, NULL, NULL); | 9265 | partition_sched_domains(1, NULL, NULL); |
9236 | return NOTIFY_OK; | 9266 | return NOTIFY_OK; |
9237 | 9267 | ||
@@ -9278,7 +9308,7 @@ void __init sched_init_smp(void) | |||
9278 | #endif | 9308 | #endif |
9279 | get_online_cpus(); | 9309 | get_online_cpus(); |
9280 | mutex_lock(&sched_domains_mutex); | 9310 | mutex_lock(&sched_domains_mutex); |
9281 | arch_init_sched_domains(cpu_online_mask); | 9311 | arch_init_sched_domains(cpu_active_mask); |
9282 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); | 9312 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
9283 | if (cpumask_empty(non_isolated_cpus)) | 9313 | if (cpumask_empty(non_isolated_cpus)) |
9284 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); | 9314 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
@@ -9351,13 +9381,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | |||
9351 | #ifdef CONFIG_SMP | 9381 | #ifdef CONFIG_SMP |
9352 | rt_rq->rt_nr_migratory = 0; | 9382 | rt_rq->rt_nr_migratory = 0; |
9353 | rt_rq->overloaded = 0; | 9383 | rt_rq->overloaded = 0; |
9354 | plist_head_init(&rt_rq->pushable_tasks, &rq->lock); | 9384 | plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock); |
9355 | #endif | 9385 | #endif |
9356 | 9386 | ||
9357 | rt_rq->rt_time = 0; | 9387 | rt_rq->rt_time = 0; |
9358 | rt_rq->rt_throttled = 0; | 9388 | rt_rq->rt_throttled = 0; |
9359 | rt_rq->rt_runtime = 0; | 9389 | rt_rq->rt_runtime = 0; |
9360 | spin_lock_init(&rt_rq->rt_runtime_lock); | 9390 | raw_spin_lock_init(&rt_rq->rt_runtime_lock); |
9361 | 9391 | ||
9362 | #ifdef CONFIG_RT_GROUP_SCHED | 9392 | #ifdef CONFIG_RT_GROUP_SCHED |
9363 | rt_rq->rt_nr_boosted = 0; | 9393 | rt_rq->rt_nr_boosted = 0; |
@@ -9517,7 +9547,7 @@ void __init sched_init(void) | |||
9517 | struct rq *rq; | 9547 | struct rq *rq; |
9518 | 9548 | ||
9519 | rq = cpu_rq(i); | 9549 | rq = cpu_rq(i); |
9520 | spin_lock_init(&rq->lock); | 9550 | raw_spin_lock_init(&rq->lock); |
9521 | rq->nr_running = 0; | 9551 | rq->nr_running = 0; |
9522 | rq->calc_load_active = 0; | 9552 | rq->calc_load_active = 0; |
9523 | rq->calc_load_update = jiffies + LOAD_FREQ; | 9553 | rq->calc_load_update = jiffies + LOAD_FREQ; |
@@ -9577,7 +9607,7 @@ void __init sched_init(void) | |||
9577 | #elif defined CONFIG_USER_SCHED | 9607 | #elif defined CONFIG_USER_SCHED |
9578 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); | 9608 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); |
9579 | init_tg_rt_entry(&init_task_group, | 9609 | init_tg_rt_entry(&init_task_group, |
9580 | &per_cpu(init_rt_rq, i), | 9610 | &per_cpu(init_rt_rq_var, i), |
9581 | &per_cpu(init_sched_rt_entity, i), i, 1, | 9611 | &per_cpu(init_sched_rt_entity, i), i, 1, |
9582 | root_task_group.rt_se[i]); | 9612 | root_task_group.rt_se[i]); |
9583 | #endif | 9613 | #endif |
@@ -9615,7 +9645,7 @@ void __init sched_init(void) | |||
9615 | #endif | 9645 | #endif |
9616 | 9646 | ||
9617 | #ifdef CONFIG_RT_MUTEXES | 9647 | #ifdef CONFIG_RT_MUTEXES |
9618 | plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); | 9648 | plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock); |
9619 | #endif | 9649 | #endif |
9620 | 9650 | ||
9621 | /* | 9651 | /* |
@@ -9659,7 +9689,7 @@ void __init sched_init(void) | |||
9659 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP | 9689 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP |
9660 | static inline int preempt_count_equals(int preempt_offset) | 9690 | static inline int preempt_count_equals(int preempt_offset) |
9661 | { | 9691 | { |
9662 | int nested = preempt_count() & ~PREEMPT_ACTIVE; | 9692 | int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); |
9663 | 9693 | ||
9664 | return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); | 9694 | return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); |
9665 | } | 9695 | } |
@@ -9740,13 +9770,13 @@ void normalize_rt_tasks(void) | |||
9740 | continue; | 9770 | continue; |
9741 | } | 9771 | } |
9742 | 9772 | ||
9743 | spin_lock(&p->pi_lock); | 9773 | raw_spin_lock(&p->pi_lock); |
9744 | rq = __task_rq_lock(p); | 9774 | rq = __task_rq_lock(p); |
9745 | 9775 | ||
9746 | normalize_task(rq, p); | 9776 | normalize_task(rq, p); |
9747 | 9777 | ||
9748 | __task_rq_unlock(rq); | 9778 | __task_rq_unlock(rq); |
9749 | spin_unlock(&p->pi_lock); | 9779 | raw_spin_unlock(&p->pi_lock); |
9750 | } while_each_thread(g, p); | 9780 | } while_each_thread(g, p); |
9751 | 9781 | ||
9752 | read_unlock_irqrestore(&tasklist_lock, flags); | 9782 | read_unlock_irqrestore(&tasklist_lock, flags); |
@@ -9842,13 +9872,15 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
9842 | se = kzalloc_node(sizeof(struct sched_entity), | 9872 | se = kzalloc_node(sizeof(struct sched_entity), |
9843 | GFP_KERNEL, cpu_to_node(i)); | 9873 | GFP_KERNEL, cpu_to_node(i)); |
9844 | if (!se) | 9874 | if (!se) |
9845 | goto err; | 9875 | goto err_free_rq; |
9846 | 9876 | ||
9847 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); | 9877 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); |
9848 | } | 9878 | } |
9849 | 9879 | ||
9850 | return 1; | 9880 | return 1; |
9851 | 9881 | ||
9882 | err_free_rq: | ||
9883 | kfree(cfs_rq); | ||
9852 | err: | 9884 | err: |
9853 | return 0; | 9885 | return 0; |
9854 | } | 9886 | } |
@@ -9930,13 +9962,15 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
9930 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), | 9962 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), |
9931 | GFP_KERNEL, cpu_to_node(i)); | 9963 | GFP_KERNEL, cpu_to_node(i)); |
9932 | if (!rt_se) | 9964 | if (!rt_se) |
9933 | goto err; | 9965 | goto err_free_rq; |
9934 | 9966 | ||
9935 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); | 9967 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); |
9936 | } | 9968 | } |
9937 | 9969 | ||
9938 | return 1; | 9970 | return 1; |
9939 | 9971 | ||
9972 | err_free_rq: | ||
9973 | kfree(rt_rq); | ||
9940 | err: | 9974 | err: |
9941 | return 0; | 9975 | return 0; |
9942 | } | 9976 | } |
@@ -10070,7 +10104,7 @@ void sched_move_task(struct task_struct *tsk) | |||
10070 | 10104 | ||
10071 | #ifdef CONFIG_FAIR_GROUP_SCHED | 10105 | #ifdef CONFIG_FAIR_GROUP_SCHED |
10072 | if (tsk->sched_class->moved_group) | 10106 | if (tsk->sched_class->moved_group) |
10073 | tsk->sched_class->moved_group(tsk); | 10107 | tsk->sched_class->moved_group(tsk, on_rq); |
10074 | #endif | 10108 | #endif |
10075 | 10109 | ||
10076 | if (unlikely(running)) | 10110 | if (unlikely(running)) |
@@ -10105,9 +10139,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares) | |||
10105 | struct rq *rq = cfs_rq->rq; | 10139 | struct rq *rq = cfs_rq->rq; |
10106 | unsigned long flags; | 10140 | unsigned long flags; |
10107 | 10141 | ||
10108 | spin_lock_irqsave(&rq->lock, flags); | 10142 | raw_spin_lock_irqsave(&rq->lock, flags); |
10109 | __set_se_shares(se, shares); | 10143 | __set_se_shares(se, shares); |
10110 | spin_unlock_irqrestore(&rq->lock, flags); | 10144 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
10111 | } | 10145 | } |
10112 | 10146 | ||
10113 | static DEFINE_MUTEX(shares_mutex); | 10147 | static DEFINE_MUTEX(shares_mutex); |
@@ -10292,18 +10326,18 @@ static int tg_set_bandwidth(struct task_group *tg, | |||
10292 | if (err) | 10326 | if (err) |
10293 | goto unlock; | 10327 | goto unlock; |
10294 | 10328 | ||
10295 | spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); | 10329 | raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
10296 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); | 10330 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); |
10297 | tg->rt_bandwidth.rt_runtime = rt_runtime; | 10331 | tg->rt_bandwidth.rt_runtime = rt_runtime; |
10298 | 10332 | ||
10299 | for_each_possible_cpu(i) { | 10333 | for_each_possible_cpu(i) { |
10300 | struct rt_rq *rt_rq = tg->rt_rq[i]; | 10334 | struct rt_rq *rt_rq = tg->rt_rq[i]; |
10301 | 10335 | ||
10302 | spin_lock(&rt_rq->rt_runtime_lock); | 10336 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
10303 | rt_rq->rt_runtime = rt_runtime; | 10337 | rt_rq->rt_runtime = rt_runtime; |
10304 | spin_unlock(&rt_rq->rt_runtime_lock); | 10338 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
10305 | } | 10339 | } |
10306 | spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); | 10340 | raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
10307 | unlock: | 10341 | unlock: |
10308 | read_unlock(&tasklist_lock); | 10342 | read_unlock(&tasklist_lock); |
10309 | mutex_unlock(&rt_constraints_mutex); | 10343 | mutex_unlock(&rt_constraints_mutex); |
@@ -10408,15 +10442,15 @@ static int sched_rt_global_constraints(void) | |||
10408 | if (sysctl_sched_rt_runtime == 0) | 10442 | if (sysctl_sched_rt_runtime == 0) |
10409 | return -EBUSY; | 10443 | return -EBUSY; |
10410 | 10444 | ||
10411 | spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); | 10445 | raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); |
10412 | for_each_possible_cpu(i) { | 10446 | for_each_possible_cpu(i) { |
10413 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; | 10447 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; |
10414 | 10448 | ||
10415 | spin_lock(&rt_rq->rt_runtime_lock); | 10449 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
10416 | rt_rq->rt_runtime = global_rt_runtime(); | 10450 | rt_rq->rt_runtime = global_rt_runtime(); |
10417 | spin_unlock(&rt_rq->rt_runtime_lock); | 10451 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
10418 | } | 10452 | } |
10419 | spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); | 10453 | raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); |
10420 | 10454 | ||
10421 | return 0; | 10455 | return 0; |
10422 | } | 10456 | } |
@@ -10707,9 +10741,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) | |||
10707 | /* | 10741 | /* |
10708 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. | 10742 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. |
10709 | */ | 10743 | */ |
10710 | spin_lock_irq(&cpu_rq(cpu)->lock); | 10744 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
10711 | data = *cpuusage; | 10745 | data = *cpuusage; |
10712 | spin_unlock_irq(&cpu_rq(cpu)->lock); | 10746 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
10713 | #else | 10747 | #else |
10714 | data = *cpuusage; | 10748 | data = *cpuusage; |
10715 | #endif | 10749 | #endif |
@@ -10725,9 +10759,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) | |||
10725 | /* | 10759 | /* |
10726 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. | 10760 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. |
10727 | */ | 10761 | */ |
10728 | spin_lock_irq(&cpu_rq(cpu)->lock); | 10762 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
10729 | *cpuusage = val; | 10763 | *cpuusage = val; |
10730 | spin_unlock_irq(&cpu_rq(cpu)->lock); | 10764 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
10731 | #else | 10765 | #else |
10732 | *cpuusage = val; | 10766 | *cpuusage = val; |
10733 | #endif | 10767 | #endif |
@@ -10961,9 +10995,9 @@ void synchronize_sched_expedited(void) | |||
10961 | init_completion(&req->done); | 10995 | init_completion(&req->done); |
10962 | req->task = NULL; | 10996 | req->task = NULL; |
10963 | req->dest_cpu = RCU_MIGRATION_NEED_QS; | 10997 | req->dest_cpu = RCU_MIGRATION_NEED_QS; |
10964 | spin_lock_irqsave(&rq->lock, flags); | 10998 | raw_spin_lock_irqsave(&rq->lock, flags); |
10965 | list_add(&req->list, &rq->migration_queue); | 10999 | list_add(&req->list, &rq->migration_queue); |
10966 | spin_unlock_irqrestore(&rq->lock, flags); | 11000 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
10967 | wake_up_process(rq->migration_thread); | 11001 | wake_up_process(rq->migration_thread); |
10968 | } | 11002 | } |
10969 | for_each_online_cpu(cpu) { | 11003 | for_each_online_cpu(cpu) { |
@@ -10971,11 +11005,11 @@ void synchronize_sched_expedited(void) | |||
10971 | req = &per_cpu(rcu_migration_req, cpu); | 11005 | req = &per_cpu(rcu_migration_req, cpu); |
10972 | rq = cpu_rq(cpu); | 11006 | rq = cpu_rq(cpu); |
10973 | wait_for_completion(&req->done); | 11007 | wait_for_completion(&req->done); |
10974 | spin_lock_irqsave(&rq->lock, flags); | 11008 | raw_spin_lock_irqsave(&rq->lock, flags); |
10975 | if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) | 11009 | if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) |
10976 | need_full_sync = 1; | 11010 | need_full_sync = 1; |
10977 | req->dest_cpu = RCU_MIGRATION_IDLE; | 11011 | req->dest_cpu = RCU_MIGRATION_IDLE; |
10978 | spin_unlock_irqrestore(&rq->lock, flags); | 11012 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
10979 | } | 11013 | } |
10980 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; | 11014 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; |
10981 | synchronize_sched_expedited_count++; | 11015 | synchronize_sched_expedited_count++; |