diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 1000 |
1 files changed, 573 insertions, 427 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 854ab418fd42..c535cc4f6428 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -141,7 +141,7 @@ struct rt_prio_array { | |||
141 | 141 | ||
142 | struct rt_bandwidth { | 142 | struct rt_bandwidth { |
143 | /* nests inside the rq lock: */ | 143 | /* nests inside the rq lock: */ |
144 | spinlock_t rt_runtime_lock; | 144 | raw_spinlock_t rt_runtime_lock; |
145 | ktime_t rt_period; | 145 | ktime_t rt_period; |
146 | u64 rt_runtime; | 146 | u64 rt_runtime; |
147 | struct hrtimer rt_period_timer; | 147 | struct hrtimer rt_period_timer; |
@@ -178,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) | |||
178 | rt_b->rt_period = ns_to_ktime(period); | 178 | rt_b->rt_period = ns_to_ktime(period); |
179 | rt_b->rt_runtime = runtime; | 179 | rt_b->rt_runtime = runtime; |
180 | 180 | ||
181 | spin_lock_init(&rt_b->rt_runtime_lock); | 181 | raw_spin_lock_init(&rt_b->rt_runtime_lock); |
182 | 182 | ||
183 | hrtimer_init(&rt_b->rt_period_timer, | 183 | hrtimer_init(&rt_b->rt_period_timer, |
184 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 184 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
@@ -200,7 +200,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
200 | if (hrtimer_active(&rt_b->rt_period_timer)) | 200 | if (hrtimer_active(&rt_b->rt_period_timer)) |
201 | return; | 201 | return; |
202 | 202 | ||
203 | spin_lock(&rt_b->rt_runtime_lock); | 203 | raw_spin_lock(&rt_b->rt_runtime_lock); |
204 | for (;;) { | 204 | for (;;) { |
205 | unsigned long delta; | 205 | unsigned long delta; |
206 | ktime_t soft, hard; | 206 | ktime_t soft, hard; |
@@ -217,7 +217,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
217 | __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, | 217 | __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, |
218 | HRTIMER_MODE_ABS_PINNED, 0); | 218 | HRTIMER_MODE_ABS_PINNED, 0); |
219 | } | 219 | } |
220 | spin_unlock(&rt_b->rt_runtime_lock); | 220 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
221 | } | 221 | } |
222 | 222 | ||
223 | #ifdef CONFIG_RT_GROUP_SCHED | 223 | #ifdef CONFIG_RT_GROUP_SCHED |
@@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var); | |||
309 | */ | 309 | */ |
310 | static DEFINE_SPINLOCK(task_group_lock); | 310 | static DEFINE_SPINLOCK(task_group_lock); |
311 | 311 | ||
312 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
313 | |||
312 | #ifdef CONFIG_SMP | 314 | #ifdef CONFIG_SMP |
313 | static int root_task_group_empty(void) | 315 | static int root_task_group_empty(void) |
314 | { | 316 | { |
@@ -316,7 +318,6 @@ static int root_task_group_empty(void) | |||
316 | } | 318 | } |
317 | #endif | 319 | #endif |
318 | 320 | ||
319 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
320 | #ifdef CONFIG_USER_SCHED | 321 | #ifdef CONFIG_USER_SCHED |
321 | # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) | 322 | # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) |
322 | #else /* !CONFIG_USER_SCHED */ | 323 | #else /* !CONFIG_USER_SCHED */ |
@@ -469,7 +470,7 @@ struct rt_rq { | |||
469 | u64 rt_time; | 470 | u64 rt_time; |
470 | u64 rt_runtime; | 471 | u64 rt_runtime; |
471 | /* Nests inside the rq lock: */ | 472 | /* Nests inside the rq lock: */ |
472 | spinlock_t rt_runtime_lock; | 473 | raw_spinlock_t rt_runtime_lock; |
473 | 474 | ||
474 | #ifdef CONFIG_RT_GROUP_SCHED | 475 | #ifdef CONFIG_RT_GROUP_SCHED |
475 | unsigned long rt_nr_boosted; | 476 | unsigned long rt_nr_boosted; |
@@ -524,7 +525,7 @@ static struct root_domain def_root_domain; | |||
524 | */ | 525 | */ |
525 | struct rq { | 526 | struct rq { |
526 | /* runqueue lock: */ | 527 | /* runqueue lock: */ |
527 | spinlock_t lock; | 528 | raw_spinlock_t lock; |
528 | 529 | ||
529 | /* | 530 | /* |
530 | * nr_running and cpu_load should be in the same cacheline because | 531 | * nr_running and cpu_load should be in the same cacheline because |
@@ -534,14 +535,12 @@ struct rq { | |||
534 | #define CPU_LOAD_IDX_MAX 5 | 535 | #define CPU_LOAD_IDX_MAX 5 |
535 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; | 536 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
536 | #ifdef CONFIG_NO_HZ | 537 | #ifdef CONFIG_NO_HZ |
537 | unsigned long last_tick_seen; | ||
538 | unsigned char in_nohz_recently; | 538 | unsigned char in_nohz_recently; |
539 | #endif | 539 | #endif |
540 | /* capture load from *all* tasks on this cpu: */ | 540 | /* capture load from *all* tasks on this cpu: */ |
541 | struct load_weight load; | 541 | struct load_weight load; |
542 | unsigned long nr_load_updates; | 542 | unsigned long nr_load_updates; |
543 | u64 nr_switches; | 543 | u64 nr_switches; |
544 | u64 nr_migrations_in; | ||
545 | 544 | ||
546 | struct cfs_rq cfs; | 545 | struct cfs_rq cfs; |
547 | struct rt_rq rt; | 546 | struct rt_rq rt; |
@@ -590,6 +589,8 @@ struct rq { | |||
590 | 589 | ||
591 | u64 rt_avg; | 590 | u64 rt_avg; |
592 | u64 age_stamp; | 591 | u64 age_stamp; |
592 | u64 idle_stamp; | ||
593 | u64 avg_idle; | ||
593 | #endif | 594 | #endif |
594 | 595 | ||
595 | /* calc_load related fields */ | 596 | /* calc_load related fields */ |
@@ -676,6 +677,7 @@ inline void update_rq_clock(struct rq *rq) | |||
676 | 677 | ||
677 | /** | 678 | /** |
678 | * runqueue_is_locked | 679 | * runqueue_is_locked |
680 | * @cpu: the processor in question. | ||
679 | * | 681 | * |
680 | * Returns true if the current cpu runqueue is locked. | 682 | * Returns true if the current cpu runqueue is locked. |
681 | * This interface allows printk to be called with the runqueue lock | 683 | * This interface allows printk to be called with the runqueue lock |
@@ -683,7 +685,7 @@ inline void update_rq_clock(struct rq *rq) | |||
683 | */ | 685 | */ |
684 | int runqueue_is_locked(int cpu) | 686 | int runqueue_is_locked(int cpu) |
685 | { | 687 | { |
686 | return spin_is_locked(&cpu_rq(cpu)->lock); | 688 | return raw_spin_is_locked(&cpu_rq(cpu)->lock); |
687 | } | 689 | } |
688 | 690 | ||
689 | /* | 691 | /* |
@@ -770,7 +772,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf, | |||
770 | if (!sched_feat_names[i]) | 772 | if (!sched_feat_names[i]) |
771 | return -EINVAL; | 773 | return -EINVAL; |
772 | 774 | ||
773 | filp->f_pos += cnt; | 775 | *ppos += cnt; |
774 | 776 | ||
775 | return cnt; | 777 | return cnt; |
776 | } | 778 | } |
@@ -812,6 +814,7 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; | |||
812 | * default: 0.25ms | 814 | * default: 0.25ms |
813 | */ | 815 | */ |
814 | unsigned int sysctl_sched_shares_ratelimit = 250000; | 816 | unsigned int sysctl_sched_shares_ratelimit = 250000; |
817 | unsigned int normalized_sysctl_sched_shares_ratelimit = 250000; | ||
815 | 818 | ||
816 | /* | 819 | /* |
817 | * Inject some fuzzyness into changing the per-cpu group shares | 820 | * Inject some fuzzyness into changing the per-cpu group shares |
@@ -890,7 +893,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
890 | */ | 893 | */ |
891 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); | 894 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); |
892 | 895 | ||
893 | spin_unlock_irq(&rq->lock); | 896 | raw_spin_unlock_irq(&rq->lock); |
894 | } | 897 | } |
895 | 898 | ||
896 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ | 899 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ |
@@ -914,9 +917,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | |||
914 | next->oncpu = 1; | 917 | next->oncpu = 1; |
915 | #endif | 918 | #endif |
916 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 919 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
917 | spin_unlock_irq(&rq->lock); | 920 | raw_spin_unlock_irq(&rq->lock); |
918 | #else | 921 | #else |
919 | spin_unlock(&rq->lock); | 922 | raw_spin_unlock(&rq->lock); |
920 | #endif | 923 | #endif |
921 | } | 924 | } |
922 | 925 | ||
@@ -946,10 +949,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p) | |||
946 | { | 949 | { |
947 | for (;;) { | 950 | for (;;) { |
948 | struct rq *rq = task_rq(p); | 951 | struct rq *rq = task_rq(p); |
949 | spin_lock(&rq->lock); | 952 | raw_spin_lock(&rq->lock); |
950 | if (likely(rq == task_rq(p))) | 953 | if (likely(rq == task_rq(p))) |
951 | return rq; | 954 | return rq; |
952 | spin_unlock(&rq->lock); | 955 | raw_spin_unlock(&rq->lock); |
953 | } | 956 | } |
954 | } | 957 | } |
955 | 958 | ||
@@ -966,10 +969,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
966 | for (;;) { | 969 | for (;;) { |
967 | local_irq_save(*flags); | 970 | local_irq_save(*flags); |
968 | rq = task_rq(p); | 971 | rq = task_rq(p); |
969 | spin_lock(&rq->lock); | 972 | raw_spin_lock(&rq->lock); |
970 | if (likely(rq == task_rq(p))) | 973 | if (likely(rq == task_rq(p))) |
971 | return rq; | 974 | return rq; |
972 | spin_unlock_irqrestore(&rq->lock, *flags); | 975 | raw_spin_unlock_irqrestore(&rq->lock, *flags); |
973 | } | 976 | } |
974 | } | 977 | } |
975 | 978 | ||
@@ -978,19 +981,19 @@ void task_rq_unlock_wait(struct task_struct *p) | |||
978 | struct rq *rq = task_rq(p); | 981 | struct rq *rq = task_rq(p); |
979 | 982 | ||
980 | smp_mb(); /* spin-unlock-wait is not a full memory barrier */ | 983 | smp_mb(); /* spin-unlock-wait is not a full memory barrier */ |
981 | spin_unlock_wait(&rq->lock); | 984 | raw_spin_unlock_wait(&rq->lock); |
982 | } | 985 | } |
983 | 986 | ||
984 | static void __task_rq_unlock(struct rq *rq) | 987 | static void __task_rq_unlock(struct rq *rq) |
985 | __releases(rq->lock) | 988 | __releases(rq->lock) |
986 | { | 989 | { |
987 | spin_unlock(&rq->lock); | 990 | raw_spin_unlock(&rq->lock); |
988 | } | 991 | } |
989 | 992 | ||
990 | static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) | 993 | static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) |
991 | __releases(rq->lock) | 994 | __releases(rq->lock) |
992 | { | 995 | { |
993 | spin_unlock_irqrestore(&rq->lock, *flags); | 996 | raw_spin_unlock_irqrestore(&rq->lock, *flags); |
994 | } | 997 | } |
995 | 998 | ||
996 | /* | 999 | /* |
@@ -1003,7 +1006,7 @@ static struct rq *this_rq_lock(void) | |||
1003 | 1006 | ||
1004 | local_irq_disable(); | 1007 | local_irq_disable(); |
1005 | rq = this_rq(); | 1008 | rq = this_rq(); |
1006 | spin_lock(&rq->lock); | 1009 | raw_spin_lock(&rq->lock); |
1007 | 1010 | ||
1008 | return rq; | 1011 | return rq; |
1009 | } | 1012 | } |
@@ -1050,10 +1053,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) | |||
1050 | 1053 | ||
1051 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); | 1054 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); |
1052 | 1055 | ||
1053 | spin_lock(&rq->lock); | 1056 | raw_spin_lock(&rq->lock); |
1054 | update_rq_clock(rq); | 1057 | update_rq_clock(rq); |
1055 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); | 1058 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); |
1056 | spin_unlock(&rq->lock); | 1059 | raw_spin_unlock(&rq->lock); |
1057 | 1060 | ||
1058 | return HRTIMER_NORESTART; | 1061 | return HRTIMER_NORESTART; |
1059 | } | 1062 | } |
@@ -1066,10 +1069,10 @@ static void __hrtick_start(void *arg) | |||
1066 | { | 1069 | { |
1067 | struct rq *rq = arg; | 1070 | struct rq *rq = arg; |
1068 | 1071 | ||
1069 | spin_lock(&rq->lock); | 1072 | raw_spin_lock(&rq->lock); |
1070 | hrtimer_restart(&rq->hrtick_timer); | 1073 | hrtimer_restart(&rq->hrtick_timer); |
1071 | rq->hrtick_csd_pending = 0; | 1074 | rq->hrtick_csd_pending = 0; |
1072 | spin_unlock(&rq->lock); | 1075 | raw_spin_unlock(&rq->lock); |
1073 | } | 1076 | } |
1074 | 1077 | ||
1075 | /* | 1078 | /* |
@@ -1176,7 +1179,7 @@ static void resched_task(struct task_struct *p) | |||
1176 | { | 1179 | { |
1177 | int cpu; | 1180 | int cpu; |
1178 | 1181 | ||
1179 | assert_spin_locked(&task_rq(p)->lock); | 1182 | assert_raw_spin_locked(&task_rq(p)->lock); |
1180 | 1183 | ||
1181 | if (test_tsk_need_resched(p)) | 1184 | if (test_tsk_need_resched(p)) |
1182 | return; | 1185 | return; |
@@ -1198,10 +1201,10 @@ static void resched_cpu(int cpu) | |||
1198 | struct rq *rq = cpu_rq(cpu); | 1201 | struct rq *rq = cpu_rq(cpu); |
1199 | unsigned long flags; | 1202 | unsigned long flags; |
1200 | 1203 | ||
1201 | if (!spin_trylock_irqsave(&rq->lock, flags)) | 1204 | if (!raw_spin_trylock_irqsave(&rq->lock, flags)) |
1202 | return; | 1205 | return; |
1203 | resched_task(cpu_curr(cpu)); | 1206 | resched_task(cpu_curr(cpu)); |
1204 | spin_unlock_irqrestore(&rq->lock, flags); | 1207 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
1205 | } | 1208 | } |
1206 | 1209 | ||
1207 | #ifdef CONFIG_NO_HZ | 1210 | #ifdef CONFIG_NO_HZ |
@@ -1270,7 +1273,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) | |||
1270 | #else /* !CONFIG_SMP */ | 1273 | #else /* !CONFIG_SMP */ |
1271 | static void resched_task(struct task_struct *p) | 1274 | static void resched_task(struct task_struct *p) |
1272 | { | 1275 | { |
1273 | assert_spin_locked(&task_rq(p)->lock); | 1276 | assert_raw_spin_locked(&task_rq(p)->lock); |
1274 | set_tsk_need_resched(p); | 1277 | set_tsk_need_resched(p); |
1275 | } | 1278 | } |
1276 | 1279 | ||
@@ -1563,11 +1566,7 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
1563 | 1566 | ||
1564 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1567 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1565 | 1568 | ||
1566 | struct update_shares_data { | 1569 | static __read_mostly unsigned long *update_shares_data; |
1567 | unsigned long rq_weight[NR_CPUS]; | ||
1568 | }; | ||
1569 | |||
1570 | static DEFINE_PER_CPU(struct update_shares_data, update_shares_data); | ||
1571 | 1570 | ||
1572 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); | 1571 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); |
1573 | 1572 | ||
@@ -1577,12 +1576,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares); | |||
1577 | static void update_group_shares_cpu(struct task_group *tg, int cpu, | 1576 | static void update_group_shares_cpu(struct task_group *tg, int cpu, |
1578 | unsigned long sd_shares, | 1577 | unsigned long sd_shares, |
1579 | unsigned long sd_rq_weight, | 1578 | unsigned long sd_rq_weight, |
1580 | struct update_shares_data *usd) | 1579 | unsigned long *usd_rq_weight) |
1581 | { | 1580 | { |
1582 | unsigned long shares, rq_weight; | 1581 | unsigned long shares, rq_weight; |
1583 | int boost = 0; | 1582 | int boost = 0; |
1584 | 1583 | ||
1585 | rq_weight = usd->rq_weight[cpu]; | 1584 | rq_weight = usd_rq_weight[cpu]; |
1586 | if (!rq_weight) { | 1585 | if (!rq_weight) { |
1587 | boost = 1; | 1586 | boost = 1; |
1588 | rq_weight = NICE_0_LOAD; | 1587 | rq_weight = NICE_0_LOAD; |
@@ -1601,11 +1600,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1601 | struct rq *rq = cpu_rq(cpu); | 1600 | struct rq *rq = cpu_rq(cpu); |
1602 | unsigned long flags; | 1601 | unsigned long flags; |
1603 | 1602 | ||
1604 | spin_lock_irqsave(&rq->lock, flags); | 1603 | raw_spin_lock_irqsave(&rq->lock, flags); |
1605 | tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; | 1604 | tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; |
1606 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | 1605 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; |
1607 | __set_se_shares(tg->se[cpu], shares); | 1606 | __set_se_shares(tg->se[cpu], shares); |
1608 | spin_unlock_irqrestore(&rq->lock, flags); | 1607 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
1609 | } | 1608 | } |
1610 | } | 1609 | } |
1611 | 1610 | ||
@@ -1616,8 +1615,8 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1616 | */ | 1615 | */ |
1617 | static int tg_shares_up(struct task_group *tg, void *data) | 1616 | static int tg_shares_up(struct task_group *tg, void *data) |
1618 | { | 1617 | { |
1619 | unsigned long weight, rq_weight = 0, shares = 0; | 1618 | unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0; |
1620 | struct update_shares_data *usd; | 1619 | unsigned long *usd_rq_weight; |
1621 | struct sched_domain *sd = data; | 1620 | struct sched_domain *sd = data; |
1622 | unsigned long flags; | 1621 | unsigned long flags; |
1623 | int i; | 1622 | int i; |
@@ -1626,12 +1625,13 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1626 | return 0; | 1625 | return 0; |
1627 | 1626 | ||
1628 | local_irq_save(flags); | 1627 | local_irq_save(flags); |
1629 | usd = &__get_cpu_var(update_shares_data); | 1628 | usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id()); |
1630 | 1629 | ||
1631 | for_each_cpu(i, sched_domain_span(sd)) { | 1630 | for_each_cpu(i, sched_domain_span(sd)) { |
1632 | weight = tg->cfs_rq[i]->load.weight; | 1631 | weight = tg->cfs_rq[i]->load.weight; |
1633 | usd->rq_weight[i] = weight; | 1632 | usd_rq_weight[i] = weight; |
1634 | 1633 | ||
1634 | rq_weight += weight; | ||
1635 | /* | 1635 | /* |
1636 | * If there are currently no tasks on the cpu pretend there | 1636 | * If there are currently no tasks on the cpu pretend there |
1637 | * is one of average load so that when a new task gets to | 1637 | * is one of average load so that when a new task gets to |
@@ -1640,10 +1640,13 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1640 | if (!weight) | 1640 | if (!weight) |
1641 | weight = NICE_0_LOAD; | 1641 | weight = NICE_0_LOAD; |
1642 | 1642 | ||
1643 | rq_weight += weight; | 1643 | sum_weight += weight; |
1644 | shares += tg->cfs_rq[i]->shares; | 1644 | shares += tg->cfs_rq[i]->shares; |
1645 | } | 1645 | } |
1646 | 1646 | ||
1647 | if (!rq_weight) | ||
1648 | rq_weight = sum_weight; | ||
1649 | |||
1647 | if ((!shares && rq_weight) || shares > tg->shares) | 1650 | if ((!shares && rq_weight) || shares > tg->shares) |
1648 | shares = tg->shares; | 1651 | shares = tg->shares; |
1649 | 1652 | ||
@@ -1651,7 +1654,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1651 | shares = tg->shares; | 1654 | shares = tg->shares; |
1652 | 1655 | ||
1653 | for_each_cpu(i, sched_domain_span(sd)) | 1656 | for_each_cpu(i, sched_domain_span(sd)) |
1654 | update_group_shares_cpu(tg, i, shares, rq_weight, usd); | 1657 | update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight); |
1655 | 1658 | ||
1656 | local_irq_restore(flags); | 1659 | local_irq_restore(flags); |
1657 | 1660 | ||
@@ -1703,9 +1706,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd) | |||
1703 | if (root_task_group_empty()) | 1706 | if (root_task_group_empty()) |
1704 | return; | 1707 | return; |
1705 | 1708 | ||
1706 | spin_unlock(&rq->lock); | 1709 | raw_spin_unlock(&rq->lock); |
1707 | update_shares(sd); | 1710 | update_shares(sd); |
1708 | spin_lock(&rq->lock); | 1711 | raw_spin_lock(&rq->lock); |
1709 | } | 1712 | } |
1710 | 1713 | ||
1711 | static void update_h_load(long cpu) | 1714 | static void update_h_load(long cpu) |
@@ -1745,7 +1748,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1745 | __acquires(busiest->lock) | 1748 | __acquires(busiest->lock) |
1746 | __acquires(this_rq->lock) | 1749 | __acquires(this_rq->lock) |
1747 | { | 1750 | { |
1748 | spin_unlock(&this_rq->lock); | 1751 | raw_spin_unlock(&this_rq->lock); |
1749 | double_rq_lock(this_rq, busiest); | 1752 | double_rq_lock(this_rq, busiest); |
1750 | 1753 | ||
1751 | return 1; | 1754 | return 1; |
@@ -1766,14 +1769,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1766 | { | 1769 | { |
1767 | int ret = 0; | 1770 | int ret = 0; |
1768 | 1771 | ||
1769 | if (unlikely(!spin_trylock(&busiest->lock))) { | 1772 | if (unlikely(!raw_spin_trylock(&busiest->lock))) { |
1770 | if (busiest < this_rq) { | 1773 | if (busiest < this_rq) { |
1771 | spin_unlock(&this_rq->lock); | 1774 | raw_spin_unlock(&this_rq->lock); |
1772 | spin_lock(&busiest->lock); | 1775 | raw_spin_lock(&busiest->lock); |
1773 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | 1776 | raw_spin_lock_nested(&this_rq->lock, |
1777 | SINGLE_DEPTH_NESTING); | ||
1774 | ret = 1; | 1778 | ret = 1; |
1775 | } else | 1779 | } else |
1776 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | 1780 | raw_spin_lock_nested(&busiest->lock, |
1781 | SINGLE_DEPTH_NESTING); | ||
1777 | } | 1782 | } |
1778 | return ret; | 1783 | return ret; |
1779 | } | 1784 | } |
@@ -1787,7 +1792,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1787 | { | 1792 | { |
1788 | if (unlikely(!irqs_disabled())) { | 1793 | if (unlikely(!irqs_disabled())) { |
1789 | /* printk() doesn't work good under rq->lock */ | 1794 | /* printk() doesn't work good under rq->lock */ |
1790 | spin_unlock(&this_rq->lock); | 1795 | raw_spin_unlock(&this_rq->lock); |
1791 | BUG_ON(1); | 1796 | BUG_ON(1); |
1792 | } | 1797 | } |
1793 | 1798 | ||
@@ -1797,7 +1802,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1797 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | 1802 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
1798 | __releases(busiest->lock) | 1803 | __releases(busiest->lock) |
1799 | { | 1804 | { |
1800 | spin_unlock(&busiest->lock); | 1805 | raw_spin_unlock(&busiest->lock); |
1801 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | 1806 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
1802 | } | 1807 | } |
1803 | #endif | 1808 | #endif |
@@ -1812,6 +1817,22 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) | |||
1812 | #endif | 1817 | #endif |
1813 | 1818 | ||
1814 | static void calc_load_account_active(struct rq *this_rq); | 1819 | static void calc_load_account_active(struct rq *this_rq); |
1820 | static void update_sysctl(void); | ||
1821 | static int get_update_sysctl_factor(void); | ||
1822 | |||
1823 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | ||
1824 | { | ||
1825 | set_task_rq(p, cpu); | ||
1826 | #ifdef CONFIG_SMP | ||
1827 | /* | ||
1828 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be | ||
1829 | * successfuly executed on another CPU. We must ensure that updates of | ||
1830 | * per-task data have been completed by this moment. | ||
1831 | */ | ||
1832 | smp_wmb(); | ||
1833 | task_thread_info(p)->cpu = cpu; | ||
1834 | #endif | ||
1835 | } | ||
1815 | 1836 | ||
1816 | #include "sched_stats.h" | 1837 | #include "sched_stats.h" |
1817 | #include "sched_idletask.c" | 1838 | #include "sched_idletask.c" |
@@ -1969,20 +1990,6 @@ inline int task_curr(const struct task_struct *p) | |||
1969 | return cpu_curr(task_cpu(p)) == p; | 1990 | return cpu_curr(task_cpu(p)) == p; |
1970 | } | 1991 | } |
1971 | 1992 | ||
1972 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | ||
1973 | { | ||
1974 | set_task_rq(p, cpu); | ||
1975 | #ifdef CONFIG_SMP | ||
1976 | /* | ||
1977 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be | ||
1978 | * successfuly executed on another CPU. We must ensure that updates of | ||
1979 | * per-task data have been completed by this moment. | ||
1980 | */ | ||
1981 | smp_wmb(); | ||
1982 | task_thread_info(p)->cpu = cpu; | ||
1983 | #endif | ||
1984 | } | ||
1985 | |||
1986 | static inline void check_class_changed(struct rq *rq, struct task_struct *p, | 1993 | static inline void check_class_changed(struct rq *rq, struct task_struct *p, |
1987 | const struct sched_class *prev_class, | 1994 | const struct sched_class *prev_class, |
1988 | int oldprio, int running) | 1995 | int oldprio, int running) |
@@ -2004,17 +2011,17 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
2004 | { | 2011 | { |
2005 | s64 delta; | 2012 | s64 delta; |
2006 | 2013 | ||
2014 | if (p->sched_class != &fair_sched_class) | ||
2015 | return 0; | ||
2016 | |||
2007 | /* | 2017 | /* |
2008 | * Buddy candidates are cache hot: | 2018 | * Buddy candidates are cache hot: |
2009 | */ | 2019 | */ |
2010 | if (sched_feat(CACHE_HOT_BUDDY) && | 2020 | if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running && |
2011 | (&p->se == cfs_rq_of(&p->se)->next || | 2021 | (&p->se == cfs_rq_of(&p->se)->next || |
2012 | &p->se == cfs_rq_of(&p->se)->last)) | 2022 | &p->se == cfs_rq_of(&p->se)->last)) |
2013 | return 1; | 2023 | return 1; |
2014 | 2024 | ||
2015 | if (p->sched_class != &fair_sched_class) | ||
2016 | return 0; | ||
2017 | |||
2018 | if (sysctl_sched_migration_cost == -1) | 2025 | if (sysctl_sched_migration_cost == -1) |
2019 | return 1; | 2026 | return 1; |
2020 | if (sysctl_sched_migration_cost == 0) | 2027 | if (sysctl_sched_migration_cost == 0) |
@@ -2025,39 +2032,23 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
2025 | return delta < (s64)sysctl_sched_migration_cost; | 2032 | return delta < (s64)sysctl_sched_migration_cost; |
2026 | } | 2033 | } |
2027 | 2034 | ||
2028 | |||
2029 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | 2035 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
2030 | { | 2036 | { |
2031 | int old_cpu = task_cpu(p); | 2037 | #ifdef CONFIG_SCHED_DEBUG |
2032 | struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu); | 2038 | /* |
2033 | struct cfs_rq *old_cfsrq = task_cfs_rq(p), | 2039 | * We should never call set_task_cpu() on a blocked task, |
2034 | *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu); | 2040 | * ttwu() will sort out the placement. |
2035 | u64 clock_offset; | 2041 | */ |
2036 | 2042 | WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && | |
2037 | clock_offset = old_rq->clock - new_rq->clock; | 2043 | !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); |
2044 | #endif | ||
2038 | 2045 | ||
2039 | trace_sched_migrate_task(p, new_cpu); | 2046 | trace_sched_migrate_task(p, new_cpu); |
2040 | 2047 | ||
2041 | #ifdef CONFIG_SCHEDSTATS | 2048 | if (task_cpu(p) != new_cpu) { |
2042 | if (p->se.wait_start) | ||
2043 | p->se.wait_start -= clock_offset; | ||
2044 | if (p->se.sleep_start) | ||
2045 | p->se.sleep_start -= clock_offset; | ||
2046 | if (p->se.block_start) | ||
2047 | p->se.block_start -= clock_offset; | ||
2048 | #endif | ||
2049 | if (old_cpu != new_cpu) { | ||
2050 | p->se.nr_migrations++; | 2049 | p->se.nr_migrations++; |
2051 | new_rq->nr_migrations_in++; | 2050 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); |
2052 | #ifdef CONFIG_SCHEDSTATS | ||
2053 | if (task_hot(p, old_rq->clock, NULL)) | ||
2054 | schedstat_inc(p, se.nr_forced2_migrations); | ||
2055 | #endif | ||
2056 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, | ||
2057 | 1, 1, NULL, 0); | ||
2058 | } | 2051 | } |
2059 | p->se.vruntime -= old_cfsrq->min_vruntime - | ||
2060 | new_cfsrq->min_vruntime; | ||
2061 | 2052 | ||
2062 | __set_task_cpu(p, new_cpu); | 2053 | __set_task_cpu(p, new_cpu); |
2063 | } | 2054 | } |
@@ -2082,12 +2073,10 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) | |||
2082 | 2073 | ||
2083 | /* | 2074 | /* |
2084 | * If the task is not on a runqueue (and not running), then | 2075 | * If the task is not on a runqueue (and not running), then |
2085 | * it is sufficient to simply update the task's cpu field. | 2076 | * the next wake-up will properly place the task. |
2086 | */ | 2077 | */ |
2087 | if (!p->se.on_rq && !task_running(rq, p)) { | 2078 | if (!p->se.on_rq && !task_running(rq, p)) |
2088 | set_task_cpu(p, dest_cpu); | ||
2089 | return 0; | 2079 | return 0; |
2090 | } | ||
2091 | 2080 | ||
2092 | init_completion(&req->done); | 2081 | init_completion(&req->done); |
2093 | req->task = p; | 2082 | req->task = p; |
@@ -2292,6 +2281,77 @@ void task_oncpu_function_call(struct task_struct *p, | |||
2292 | preempt_enable(); | 2281 | preempt_enable(); |
2293 | } | 2282 | } |
2294 | 2283 | ||
2284 | #ifdef CONFIG_SMP | ||
2285 | static int select_fallback_rq(int cpu, struct task_struct *p) | ||
2286 | { | ||
2287 | int dest_cpu; | ||
2288 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu)); | ||
2289 | |||
2290 | /* Look for allowed, online CPU in same node. */ | ||
2291 | for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) | ||
2292 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) | ||
2293 | return dest_cpu; | ||
2294 | |||
2295 | /* Any allowed, online CPU? */ | ||
2296 | dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask); | ||
2297 | if (dest_cpu < nr_cpu_ids) | ||
2298 | return dest_cpu; | ||
2299 | |||
2300 | /* No more Mr. Nice Guy. */ | ||
2301 | if (dest_cpu >= nr_cpu_ids) { | ||
2302 | rcu_read_lock(); | ||
2303 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); | ||
2304 | rcu_read_unlock(); | ||
2305 | dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); | ||
2306 | |||
2307 | /* | ||
2308 | * Don't tell them about moving exiting tasks or | ||
2309 | * kernel threads (both mm NULL), since they never | ||
2310 | * leave kernel. | ||
2311 | */ | ||
2312 | if (p->mm && printk_ratelimit()) { | ||
2313 | printk(KERN_INFO "process %d (%s) no " | ||
2314 | "longer affine to cpu%d\n", | ||
2315 | task_pid_nr(p), p->comm, cpu); | ||
2316 | } | ||
2317 | } | ||
2318 | |||
2319 | return dest_cpu; | ||
2320 | } | ||
2321 | |||
2322 | /* | ||
2323 | * Called from: | ||
2324 | * | ||
2325 | * - fork, @p is stable because it isn't on the tasklist yet | ||
2326 | * | ||
2327 | * - exec, @p is unstable, retry loop | ||
2328 | * | ||
2329 | * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so | ||
2330 | * we should be good. | ||
2331 | */ | ||
2332 | static inline | ||
2333 | int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) | ||
2334 | { | ||
2335 | int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); | ||
2336 | |||
2337 | /* | ||
2338 | * In order not to call set_task_cpu() on a blocking task we need | ||
2339 | * to rely on ttwu() to place the task on a valid ->cpus_allowed | ||
2340 | * cpu. | ||
2341 | * | ||
2342 | * Since this is common to all placement strategies, this lives here. | ||
2343 | * | ||
2344 | * [ this allows ->select_task() to simply return task_cpu(p) and | ||
2345 | * not worry about this generic constraint ] | ||
2346 | */ | ||
2347 | if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || | ||
2348 | !cpu_online(cpu))) | ||
2349 | cpu = select_fallback_rq(task_cpu(p), p); | ||
2350 | |||
2351 | return cpu; | ||
2352 | } | ||
2353 | #endif | ||
2354 | |||
2295 | /*** | 2355 | /*** |
2296 | * try_to_wake_up - wake up a thread | 2356 | * try_to_wake_up - wake up a thread |
2297 | * @p: the to-be-woken-up thread | 2357 | * @p: the to-be-woken-up thread |
@@ -2311,7 +2371,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2311 | { | 2371 | { |
2312 | int cpu, orig_cpu, this_cpu, success = 0; | 2372 | int cpu, orig_cpu, this_cpu, success = 0; |
2313 | unsigned long flags; | 2373 | unsigned long flags; |
2314 | struct rq *rq; | 2374 | struct rq *rq, *orig_rq; |
2315 | 2375 | ||
2316 | if (!sched_feat(SYNC_WAKEUPS)) | 2376 | if (!sched_feat(SYNC_WAKEUPS)) |
2317 | wake_flags &= ~WF_SYNC; | 2377 | wake_flags &= ~WF_SYNC; |
@@ -2319,7 +2379,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2319 | this_cpu = get_cpu(); | 2379 | this_cpu = get_cpu(); |
2320 | 2380 | ||
2321 | smp_wmb(); | 2381 | smp_wmb(); |
2322 | rq = task_rq_lock(p, &flags); | 2382 | rq = orig_rq = task_rq_lock(p, &flags); |
2323 | update_rq_clock(rq); | 2383 | update_rq_clock(rq); |
2324 | if (!(p->state & state)) | 2384 | if (!(p->state & state)) |
2325 | goto out; | 2385 | goto out; |
@@ -2343,13 +2403,19 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2343 | if (task_contributes_to_load(p)) | 2403 | if (task_contributes_to_load(p)) |
2344 | rq->nr_uninterruptible--; | 2404 | rq->nr_uninterruptible--; |
2345 | p->state = TASK_WAKING; | 2405 | p->state = TASK_WAKING; |
2346 | task_rq_unlock(rq, &flags); | ||
2347 | 2406 | ||
2348 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags); | 2407 | if (p->sched_class->task_waking) |
2408 | p->sched_class->task_waking(rq, p); | ||
2409 | |||
2410 | __task_rq_unlock(rq); | ||
2411 | |||
2412 | cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); | ||
2349 | if (cpu != orig_cpu) | 2413 | if (cpu != orig_cpu) |
2350 | set_task_cpu(p, cpu); | 2414 | set_task_cpu(p, cpu); |
2351 | 2415 | ||
2352 | rq = task_rq_lock(p, &flags); | 2416 | rq = __task_rq_lock(p); |
2417 | update_rq_clock(rq); | ||
2418 | |||
2353 | WARN_ON(p->state != TASK_WAKING); | 2419 | WARN_ON(p->state != TASK_WAKING); |
2354 | cpu = task_cpu(p); | 2420 | cpu = task_cpu(p); |
2355 | 2421 | ||
@@ -2404,8 +2470,19 @@ out_running: | |||
2404 | 2470 | ||
2405 | p->state = TASK_RUNNING; | 2471 | p->state = TASK_RUNNING; |
2406 | #ifdef CONFIG_SMP | 2472 | #ifdef CONFIG_SMP |
2407 | if (p->sched_class->task_wake_up) | 2473 | if (p->sched_class->task_woken) |
2408 | p->sched_class->task_wake_up(rq, p); | 2474 | p->sched_class->task_woken(rq, p); |
2475 | |||
2476 | if (unlikely(rq->idle_stamp)) { | ||
2477 | u64 delta = rq->clock - rq->idle_stamp; | ||
2478 | u64 max = 2*sysctl_sched_migration_cost; | ||
2479 | |||
2480 | if (delta > max) | ||
2481 | rq->avg_idle = max; | ||
2482 | else | ||
2483 | update_avg(&rq->avg_idle, delta); | ||
2484 | rq->idle_stamp = 0; | ||
2485 | } | ||
2409 | #endif | 2486 | #endif |
2410 | out: | 2487 | out: |
2411 | task_rq_unlock(rq, &flags); | 2488 | task_rq_unlock(rq, &flags); |
@@ -2452,7 +2529,6 @@ static void __sched_fork(struct task_struct *p) | |||
2452 | p->se.avg_overlap = 0; | 2529 | p->se.avg_overlap = 0; |
2453 | p->se.start_runtime = 0; | 2530 | p->se.start_runtime = 0; |
2454 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; | 2531 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; |
2455 | p->se.avg_running = 0; | ||
2456 | 2532 | ||
2457 | #ifdef CONFIG_SCHEDSTATS | 2533 | #ifdef CONFIG_SCHEDSTATS |
2458 | p->se.wait_start = 0; | 2534 | p->se.wait_start = 0; |
@@ -2474,7 +2550,6 @@ static void __sched_fork(struct task_struct *p) | |||
2474 | p->se.nr_failed_migrations_running = 0; | 2550 | p->se.nr_failed_migrations_running = 0; |
2475 | p->se.nr_failed_migrations_hot = 0; | 2551 | p->se.nr_failed_migrations_hot = 0; |
2476 | p->se.nr_forced_migrations = 0; | 2552 | p->se.nr_forced_migrations = 0; |
2477 | p->se.nr_forced2_migrations = 0; | ||
2478 | 2553 | ||
2479 | p->se.nr_wakeups = 0; | 2554 | p->se.nr_wakeups = 0; |
2480 | p->se.nr_wakeups_sync = 0; | 2555 | p->se.nr_wakeups_sync = 0; |
@@ -2495,14 +2570,6 @@ static void __sched_fork(struct task_struct *p) | |||
2495 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 2570 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
2496 | INIT_HLIST_HEAD(&p->preempt_notifiers); | 2571 | INIT_HLIST_HEAD(&p->preempt_notifiers); |
2497 | #endif | 2572 | #endif |
2498 | |||
2499 | /* | ||
2500 | * We mark the process as running here, but have not actually | ||
2501 | * inserted it onto the runqueue yet. This guarantees that | ||
2502 | * nobody will actually run it, and a signal or other external | ||
2503 | * event cannot wake it up and insert it on the runqueue either. | ||
2504 | */ | ||
2505 | p->state = TASK_RUNNING; | ||
2506 | } | 2573 | } |
2507 | 2574 | ||
2508 | /* | 2575 | /* |
@@ -2513,24 +2580,25 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2513 | int cpu = get_cpu(); | 2580 | int cpu = get_cpu(); |
2514 | 2581 | ||
2515 | __sched_fork(p); | 2582 | __sched_fork(p); |
2516 | |||
2517 | /* | 2583 | /* |
2518 | * Make sure we do not leak PI boosting priority to the child. | 2584 | * We mark the process as waking here. This guarantees that |
2585 | * nobody will actually run it, and a signal or other external | ||
2586 | * event cannot wake it up and insert it on the runqueue either. | ||
2519 | */ | 2587 | */ |
2520 | p->prio = current->normal_prio; | 2588 | p->state = TASK_WAKING; |
2521 | 2589 | ||
2522 | /* | 2590 | /* |
2523 | * Revert to default priority/policy on fork if requested. | 2591 | * Revert to default priority/policy on fork if requested. |
2524 | */ | 2592 | */ |
2525 | if (unlikely(p->sched_reset_on_fork)) { | 2593 | if (unlikely(p->sched_reset_on_fork)) { |
2526 | if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) | 2594 | if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) { |
2527 | p->policy = SCHED_NORMAL; | 2595 | p->policy = SCHED_NORMAL; |
2528 | 2596 | p->normal_prio = p->static_prio; | |
2529 | if (p->normal_prio < DEFAULT_PRIO) | 2597 | } |
2530 | p->prio = DEFAULT_PRIO; | ||
2531 | 2598 | ||
2532 | if (PRIO_TO_NICE(p->static_prio) < 0) { | 2599 | if (PRIO_TO_NICE(p->static_prio) < 0) { |
2533 | p->static_prio = NICE_TO_PRIO(0); | 2600 | p->static_prio = NICE_TO_PRIO(0); |
2601 | p->normal_prio = p->static_prio; | ||
2534 | set_load_weight(p); | 2602 | set_load_weight(p); |
2535 | } | 2603 | } |
2536 | 2604 | ||
@@ -2541,11 +2609,19 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2541 | p->sched_reset_on_fork = 0; | 2609 | p->sched_reset_on_fork = 0; |
2542 | } | 2610 | } |
2543 | 2611 | ||
2612 | /* | ||
2613 | * Make sure we do not leak PI boosting priority to the child. | ||
2614 | */ | ||
2615 | p->prio = current->normal_prio; | ||
2616 | |||
2544 | if (!rt_prio(p->prio)) | 2617 | if (!rt_prio(p->prio)) |
2545 | p->sched_class = &fair_sched_class; | 2618 | p->sched_class = &fair_sched_class; |
2546 | 2619 | ||
2620 | if (p->sched_class->task_fork) | ||
2621 | p->sched_class->task_fork(p); | ||
2622 | |||
2547 | #ifdef CONFIG_SMP | 2623 | #ifdef CONFIG_SMP |
2548 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0); | 2624 | cpu = select_task_rq(p, SD_BALANCE_FORK, 0); |
2549 | #endif | 2625 | #endif |
2550 | set_task_cpu(p, cpu); | 2626 | set_task_cpu(p, cpu); |
2551 | 2627 | ||
@@ -2578,26 +2654,15 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2578 | struct rq *rq; | 2654 | struct rq *rq; |
2579 | 2655 | ||
2580 | rq = task_rq_lock(p, &flags); | 2656 | rq = task_rq_lock(p, &flags); |
2581 | BUG_ON(p->state != TASK_RUNNING); | 2657 | BUG_ON(p->state != TASK_WAKING); |
2658 | p->state = TASK_RUNNING; | ||
2582 | update_rq_clock(rq); | 2659 | update_rq_clock(rq); |
2583 | 2660 | activate_task(rq, p, 0); | |
2584 | p->prio = effective_prio(p); | ||
2585 | |||
2586 | if (!p->sched_class->task_new || !current->se.on_rq) { | ||
2587 | activate_task(rq, p, 0); | ||
2588 | } else { | ||
2589 | /* | ||
2590 | * Let the scheduling class do new task startup | ||
2591 | * management (if any): | ||
2592 | */ | ||
2593 | p->sched_class->task_new(rq, p); | ||
2594 | inc_nr_running(rq); | ||
2595 | } | ||
2596 | trace_sched_wakeup_new(rq, p, 1); | 2661 | trace_sched_wakeup_new(rq, p, 1); |
2597 | check_preempt_curr(rq, p, WF_FORK); | 2662 | check_preempt_curr(rq, p, WF_FORK); |
2598 | #ifdef CONFIG_SMP | 2663 | #ifdef CONFIG_SMP |
2599 | if (p->sched_class->task_wake_up) | 2664 | if (p->sched_class->task_woken) |
2600 | p->sched_class->task_wake_up(rq, p); | 2665 | p->sched_class->task_woken(rq, p); |
2601 | #endif | 2666 | #endif |
2602 | task_rq_unlock(rq, &flags); | 2667 | task_rq_unlock(rq, &flags); |
2603 | } | 2668 | } |
@@ -2749,10 +2814,10 @@ static inline void post_schedule(struct rq *rq) | |||
2749 | if (rq->post_schedule) { | 2814 | if (rq->post_schedule) { |
2750 | unsigned long flags; | 2815 | unsigned long flags; |
2751 | 2816 | ||
2752 | spin_lock_irqsave(&rq->lock, flags); | 2817 | raw_spin_lock_irqsave(&rq->lock, flags); |
2753 | if (rq->curr->sched_class->post_schedule) | 2818 | if (rq->curr->sched_class->post_schedule) |
2754 | rq->curr->sched_class->post_schedule(rq); | 2819 | rq->curr->sched_class->post_schedule(rq); |
2755 | spin_unlock_irqrestore(&rq->lock, flags); | 2820 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
2756 | 2821 | ||
2757 | rq->post_schedule = 0; | 2822 | rq->post_schedule = 0; |
2758 | } | 2823 | } |
@@ -2816,14 +2881,14 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
2816 | */ | 2881 | */ |
2817 | arch_start_context_switch(prev); | 2882 | arch_start_context_switch(prev); |
2818 | 2883 | ||
2819 | if (unlikely(!mm)) { | 2884 | if (likely(!mm)) { |
2820 | next->active_mm = oldmm; | 2885 | next->active_mm = oldmm; |
2821 | atomic_inc(&oldmm->mm_count); | 2886 | atomic_inc(&oldmm->mm_count); |
2822 | enter_lazy_tlb(oldmm, next); | 2887 | enter_lazy_tlb(oldmm, next); |
2823 | } else | 2888 | } else |
2824 | switch_mm(oldmm, mm, next); | 2889 | switch_mm(oldmm, mm, next); |
2825 | 2890 | ||
2826 | if (unlikely(!prev->mm)) { | 2891 | if (likely(!prev->mm)) { |
2827 | prev->active_mm = NULL; | 2892 | prev->active_mm = NULL; |
2828 | rq->prev_mm = oldmm; | 2893 | rq->prev_mm = oldmm; |
2829 | } | 2894 | } |
@@ -2986,15 +3051,6 @@ static void calc_load_account_active(struct rq *this_rq) | |||
2986 | } | 3051 | } |
2987 | 3052 | ||
2988 | /* | 3053 | /* |
2989 | * Externally visible per-cpu scheduler statistics: | ||
2990 | * cpu_nr_migrations(cpu) - number of migrations into that cpu | ||
2991 | */ | ||
2992 | u64 cpu_nr_migrations(int cpu) | ||
2993 | { | ||
2994 | return cpu_rq(cpu)->nr_migrations_in; | ||
2995 | } | ||
2996 | |||
2997 | /* | ||
2998 | * Update rq->cpu_load[] statistics. This function is usually called every | 3054 | * Update rq->cpu_load[] statistics. This function is usually called every |
2999 | * scheduler tick (TICK_NSEC). | 3055 | * scheduler tick (TICK_NSEC). |
3000 | */ | 3056 | */ |
@@ -3043,15 +3099,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) | |||
3043 | { | 3099 | { |
3044 | BUG_ON(!irqs_disabled()); | 3100 | BUG_ON(!irqs_disabled()); |
3045 | if (rq1 == rq2) { | 3101 | if (rq1 == rq2) { |
3046 | spin_lock(&rq1->lock); | 3102 | raw_spin_lock(&rq1->lock); |
3047 | __acquire(rq2->lock); /* Fake it out ;) */ | 3103 | __acquire(rq2->lock); /* Fake it out ;) */ |
3048 | } else { | 3104 | } else { |
3049 | if (rq1 < rq2) { | 3105 | if (rq1 < rq2) { |
3050 | spin_lock(&rq1->lock); | 3106 | raw_spin_lock(&rq1->lock); |
3051 | spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); | 3107 | raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
3052 | } else { | 3108 | } else { |
3053 | spin_lock(&rq2->lock); | 3109 | raw_spin_lock(&rq2->lock); |
3054 | spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); | 3110 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
3055 | } | 3111 | } |
3056 | } | 3112 | } |
3057 | update_rq_clock(rq1); | 3113 | update_rq_clock(rq1); |
@@ -3068,29 +3124,44 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) | |||
3068 | __releases(rq1->lock) | 3124 | __releases(rq1->lock) |
3069 | __releases(rq2->lock) | 3125 | __releases(rq2->lock) |
3070 | { | 3126 | { |
3071 | spin_unlock(&rq1->lock); | 3127 | raw_spin_unlock(&rq1->lock); |
3072 | if (rq1 != rq2) | 3128 | if (rq1 != rq2) |
3073 | spin_unlock(&rq2->lock); | 3129 | raw_spin_unlock(&rq2->lock); |
3074 | else | 3130 | else |
3075 | __release(rq2->lock); | 3131 | __release(rq2->lock); |
3076 | } | 3132 | } |
3077 | 3133 | ||
3078 | /* | 3134 | /* |
3079 | * If dest_cpu is allowed for this process, migrate the task to it. | 3135 | * sched_exec - execve() is a valuable balancing opportunity, because at |
3080 | * This is accomplished by forcing the cpu_allowed mask to only | 3136 | * this point the task has the smallest effective memory and cache footprint. |
3081 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then | ||
3082 | * the cpu_allowed mask is restored. | ||
3083 | */ | 3137 | */ |
3084 | static void sched_migrate_task(struct task_struct *p, int dest_cpu) | 3138 | void sched_exec(void) |
3085 | { | 3139 | { |
3140 | struct task_struct *p = current; | ||
3086 | struct migration_req req; | 3141 | struct migration_req req; |
3142 | int dest_cpu, this_cpu; | ||
3087 | unsigned long flags; | 3143 | unsigned long flags; |
3088 | struct rq *rq; | 3144 | struct rq *rq; |
3089 | 3145 | ||
3146 | again: | ||
3147 | this_cpu = get_cpu(); | ||
3148 | dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0); | ||
3149 | if (dest_cpu == this_cpu) { | ||
3150 | put_cpu(); | ||
3151 | return; | ||
3152 | } | ||
3153 | |||
3090 | rq = task_rq_lock(p, &flags); | 3154 | rq = task_rq_lock(p, &flags); |
3155 | put_cpu(); | ||
3156 | |||
3157 | /* | ||
3158 | * select_task_rq() can race against ->cpus_allowed | ||
3159 | */ | ||
3091 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) | 3160 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) |
3092 | || unlikely(!cpu_active(dest_cpu))) | 3161 | || unlikely(!cpu_active(dest_cpu))) { |
3093 | goto out; | 3162 | task_rq_unlock(rq, &flags); |
3163 | goto again; | ||
3164 | } | ||
3094 | 3165 | ||
3095 | /* force the process onto the specified CPU */ | 3166 | /* force the process onto the specified CPU */ |
3096 | if (migrate_task(p, dest_cpu, &req)) { | 3167 | if (migrate_task(p, dest_cpu, &req)) { |
@@ -3105,24 +3176,10 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
3105 | 3176 | ||
3106 | return; | 3177 | return; |
3107 | } | 3178 | } |
3108 | out: | ||
3109 | task_rq_unlock(rq, &flags); | 3179 | task_rq_unlock(rq, &flags); |
3110 | } | 3180 | } |
3111 | 3181 | ||
3112 | /* | 3182 | /* |
3113 | * sched_exec - execve() is a valuable balancing opportunity, because at | ||
3114 | * this point the task has the smallest effective memory and cache footprint. | ||
3115 | */ | ||
3116 | void sched_exec(void) | ||
3117 | { | ||
3118 | int new_cpu, this_cpu = get_cpu(); | ||
3119 | new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0); | ||
3120 | put_cpu(); | ||
3121 | if (new_cpu != this_cpu) | ||
3122 | sched_migrate_task(current, new_cpu); | ||
3123 | } | ||
3124 | |||
3125 | /* | ||
3126 | * pull_task - move a task from a remote runqueue to the local runqueue. | 3183 | * pull_task - move a task from a remote runqueue to the local runqueue. |
3127 | * Both runqueues must be locked. | 3184 | * Both runqueues must be locked. |
3128 | */ | 3185 | */ |
@@ -3132,10 +3189,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, | |||
3132 | deactivate_task(src_rq, p, 0); | 3189 | deactivate_task(src_rq, p, 0); |
3133 | set_task_cpu(p, this_cpu); | 3190 | set_task_cpu(p, this_cpu); |
3134 | activate_task(this_rq, p, 0); | 3191 | activate_task(this_rq, p, 0); |
3135 | /* | ||
3136 | * Note that idle threads have a prio of MAX_PRIO, for this test | ||
3137 | * to be always true for them. | ||
3138 | */ | ||
3139 | check_preempt_curr(this_rq, p, 0); | 3192 | check_preempt_curr(this_rq, p, 0); |
3140 | } | 3193 | } |
3141 | 3194 | ||
@@ -3658,6 +3711,7 @@ static void update_group_power(struct sched_domain *sd, int cpu) | |||
3658 | 3711 | ||
3659 | /** | 3712 | /** |
3660 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. | 3713 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. |
3714 | * @sd: The sched_domain whose statistics are to be updated. | ||
3661 | * @group: sched_group whose statistics are to be updated. | 3715 | * @group: sched_group whose statistics are to be updated. |
3662 | * @this_cpu: Cpu for which load balance is currently performed. | 3716 | * @this_cpu: Cpu for which load balance is currently performed. |
3663 | * @idle: Idle status of this_cpu | 3717 | * @idle: Idle status of this_cpu |
@@ -4093,7 +4147,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
4093 | unsigned long flags; | 4147 | unsigned long flags; |
4094 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | 4148 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); |
4095 | 4149 | ||
4096 | cpumask_setall(cpus); | 4150 | cpumask_copy(cpus, cpu_active_mask); |
4097 | 4151 | ||
4098 | /* | 4152 | /* |
4099 | * When power savings policy is enabled for the parent domain, idle | 4153 | * When power savings policy is enabled for the parent domain, idle |
@@ -4166,14 +4220,15 @@ redo: | |||
4166 | 4220 | ||
4167 | if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { | 4221 | if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { |
4168 | 4222 | ||
4169 | spin_lock_irqsave(&busiest->lock, flags); | 4223 | raw_spin_lock_irqsave(&busiest->lock, flags); |
4170 | 4224 | ||
4171 | /* don't kick the migration_thread, if the curr | 4225 | /* don't kick the migration_thread, if the curr |
4172 | * task on busiest cpu can't be moved to this_cpu | 4226 | * task on busiest cpu can't be moved to this_cpu |
4173 | */ | 4227 | */ |
4174 | if (!cpumask_test_cpu(this_cpu, | 4228 | if (!cpumask_test_cpu(this_cpu, |
4175 | &busiest->curr->cpus_allowed)) { | 4229 | &busiest->curr->cpus_allowed)) { |
4176 | spin_unlock_irqrestore(&busiest->lock, flags); | 4230 | raw_spin_unlock_irqrestore(&busiest->lock, |
4231 | flags); | ||
4177 | all_pinned = 1; | 4232 | all_pinned = 1; |
4178 | goto out_one_pinned; | 4233 | goto out_one_pinned; |
4179 | } | 4234 | } |
@@ -4183,7 +4238,7 @@ redo: | |||
4183 | busiest->push_cpu = this_cpu; | 4238 | busiest->push_cpu = this_cpu; |
4184 | active_balance = 1; | 4239 | active_balance = 1; |
4185 | } | 4240 | } |
4186 | spin_unlock_irqrestore(&busiest->lock, flags); | 4241 | raw_spin_unlock_irqrestore(&busiest->lock, flags); |
4187 | if (active_balance) | 4242 | if (active_balance) |
4188 | wake_up_process(busiest->migration_thread); | 4243 | wake_up_process(busiest->migration_thread); |
4189 | 4244 | ||
@@ -4256,7 +4311,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) | |||
4256 | int all_pinned = 0; | 4311 | int all_pinned = 0; |
4257 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | 4312 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); |
4258 | 4313 | ||
4259 | cpumask_setall(cpus); | 4314 | cpumask_copy(cpus, cpu_active_mask); |
4260 | 4315 | ||
4261 | /* | 4316 | /* |
4262 | * When power savings policy is enabled for the parent domain, idle | 4317 | * When power savings policy is enabled for the parent domain, idle |
@@ -4365,10 +4420,10 @@ redo: | |||
4365 | /* | 4420 | /* |
4366 | * Should not call ttwu while holding a rq->lock | 4421 | * Should not call ttwu while holding a rq->lock |
4367 | */ | 4422 | */ |
4368 | spin_unlock(&this_rq->lock); | 4423 | raw_spin_unlock(&this_rq->lock); |
4369 | if (active_balance) | 4424 | if (active_balance) |
4370 | wake_up_process(busiest->migration_thread); | 4425 | wake_up_process(busiest->migration_thread); |
4371 | spin_lock(&this_rq->lock); | 4426 | raw_spin_lock(&this_rq->lock); |
4372 | 4427 | ||
4373 | } else | 4428 | } else |
4374 | sd->nr_balance_failed = 0; | 4429 | sd->nr_balance_failed = 0; |
@@ -4396,6 +4451,11 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
4396 | int pulled_task = 0; | 4451 | int pulled_task = 0; |
4397 | unsigned long next_balance = jiffies + HZ; | 4452 | unsigned long next_balance = jiffies + HZ; |
4398 | 4453 | ||
4454 | this_rq->idle_stamp = this_rq->clock; | ||
4455 | |||
4456 | if (this_rq->avg_idle < sysctl_sched_migration_cost) | ||
4457 | return; | ||
4458 | |||
4399 | for_each_domain(this_cpu, sd) { | 4459 | for_each_domain(this_cpu, sd) { |
4400 | unsigned long interval; | 4460 | unsigned long interval; |
4401 | 4461 | ||
@@ -4410,8 +4470,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
4410 | interval = msecs_to_jiffies(sd->balance_interval); | 4470 | interval = msecs_to_jiffies(sd->balance_interval); |
4411 | if (time_after(next_balance, sd->last_balance + interval)) | 4471 | if (time_after(next_balance, sd->last_balance + interval)) |
4412 | next_balance = sd->last_balance + interval; | 4472 | next_balance = sd->last_balance + interval; |
4413 | if (pulled_task) | 4473 | if (pulled_task) { |
4474 | this_rq->idle_stamp = 0; | ||
4414 | break; | 4475 | break; |
4476 | } | ||
4415 | } | 4477 | } |
4416 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { | 4478 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { |
4417 | /* | 4479 | /* |
@@ -4646,7 +4708,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
4646 | cpumask_set_cpu(cpu, nohz.cpu_mask); | 4708 | cpumask_set_cpu(cpu, nohz.cpu_mask); |
4647 | 4709 | ||
4648 | /* time for ilb owner also to sleep */ | 4710 | /* time for ilb owner also to sleep */ |
4649 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { | 4711 | if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) { |
4650 | if (atomic_read(&nohz.load_balancer) == cpu) | 4712 | if (atomic_read(&nohz.load_balancer) == cpu) |
4651 | atomic_set(&nohz.load_balancer, -1); | 4713 | atomic_set(&nohz.load_balancer, -1); |
4652 | return 0; | 4714 | return 0; |
@@ -5013,8 +5075,13 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime, | |||
5013 | p->gtime = cputime_add(p->gtime, cputime); | 5075 | p->gtime = cputime_add(p->gtime, cputime); |
5014 | 5076 | ||
5015 | /* Add guest time to cpustat. */ | 5077 | /* Add guest time to cpustat. */ |
5016 | cpustat->user = cputime64_add(cpustat->user, tmp); | 5078 | if (TASK_NICE(p) > 0) { |
5017 | cpustat->guest = cputime64_add(cpustat->guest, tmp); | 5079 | cpustat->nice = cputime64_add(cpustat->nice, tmp); |
5080 | cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp); | ||
5081 | } else { | ||
5082 | cpustat->user = cputime64_add(cpustat->user, tmp); | ||
5083 | cpustat->guest = cputime64_add(cpustat->guest, tmp); | ||
5084 | } | ||
5018 | } | 5085 | } |
5019 | 5086 | ||
5020 | /* | 5087 | /* |
@@ -5129,60 +5196,86 @@ void account_idle_ticks(unsigned long ticks) | |||
5129 | * Use precise platform statistics if available: | 5196 | * Use precise platform statistics if available: |
5130 | */ | 5197 | */ |
5131 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 5198 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
5132 | cputime_t task_utime(struct task_struct *p) | 5199 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
5133 | { | 5200 | { |
5134 | return p->utime; | 5201 | *ut = p->utime; |
5202 | *st = p->stime; | ||
5135 | } | 5203 | } |
5136 | 5204 | ||
5137 | cputime_t task_stime(struct task_struct *p) | 5205 | void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
5138 | { | 5206 | { |
5139 | return p->stime; | 5207 | struct task_cputime cputime; |
5208 | |||
5209 | thread_group_cputime(p, &cputime); | ||
5210 | |||
5211 | *ut = cputime.utime; | ||
5212 | *st = cputime.stime; | ||
5140 | } | 5213 | } |
5141 | #else | 5214 | #else |
5142 | cputime_t task_utime(struct task_struct *p) | 5215 | |
5216 | #ifndef nsecs_to_cputime | ||
5217 | # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) | ||
5218 | #endif | ||
5219 | |||
5220 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | ||
5143 | { | 5221 | { |
5144 | clock_t utime = cputime_to_clock_t(p->utime), | 5222 | cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime); |
5145 | total = utime + cputime_to_clock_t(p->stime); | ||
5146 | u64 temp; | ||
5147 | 5223 | ||
5148 | /* | 5224 | /* |
5149 | * Use CFS's precise accounting: | 5225 | * Use CFS's precise accounting: |
5150 | */ | 5226 | */ |
5151 | temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime); | 5227 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); |
5152 | 5228 | ||
5153 | if (total) { | 5229 | if (total) { |
5154 | temp *= utime; | 5230 | u64 temp; |
5231 | |||
5232 | temp = (u64)(rtime * utime); | ||
5155 | do_div(temp, total); | 5233 | do_div(temp, total); |
5156 | } | 5234 | utime = (cputime_t)temp; |
5157 | utime = (clock_t)temp; | 5235 | } else |
5236 | utime = rtime; | ||
5237 | |||
5238 | /* | ||
5239 | * Compare with previous values, to keep monotonicity: | ||
5240 | */ | ||
5241 | p->prev_utime = max(p->prev_utime, utime); | ||
5242 | p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime)); | ||
5158 | 5243 | ||
5159 | p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); | 5244 | *ut = p->prev_utime; |
5160 | return p->prev_utime; | 5245 | *st = p->prev_stime; |
5161 | } | 5246 | } |
5162 | 5247 | ||
5163 | cputime_t task_stime(struct task_struct *p) | 5248 | /* |
5249 | * Must be called with siglock held. | ||
5250 | */ | ||
5251 | void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | ||
5164 | { | 5252 | { |
5165 | clock_t stime; | 5253 | struct signal_struct *sig = p->signal; |
5254 | struct task_cputime cputime; | ||
5255 | cputime_t rtime, utime, total; | ||
5166 | 5256 | ||
5167 | /* | 5257 | thread_group_cputime(p, &cputime); |
5168 | * Use CFS's precise accounting. (we subtract utime from | ||
5169 | * the total, to make sure the total observed by userspace | ||
5170 | * grows monotonically - apps rely on that): | ||
5171 | */ | ||
5172 | stime = nsec_to_clock_t(p->se.sum_exec_runtime) - | ||
5173 | cputime_to_clock_t(task_utime(p)); | ||
5174 | 5258 | ||
5175 | if (stime >= 0) | 5259 | total = cputime_add(cputime.utime, cputime.stime); |
5176 | p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime)); | 5260 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); |
5177 | 5261 | ||
5178 | return p->prev_stime; | 5262 | if (total) { |
5179 | } | 5263 | u64 temp; |
5180 | #endif | ||
5181 | 5264 | ||
5182 | inline cputime_t task_gtime(struct task_struct *p) | 5265 | temp = (u64)(rtime * cputime.utime); |
5183 | { | 5266 | do_div(temp, total); |
5184 | return p->gtime; | 5267 | utime = (cputime_t)temp; |
5268 | } else | ||
5269 | utime = rtime; | ||
5270 | |||
5271 | sig->prev_utime = max(sig->prev_utime, utime); | ||
5272 | sig->prev_stime = max(sig->prev_stime, | ||
5273 | cputime_sub(rtime, sig->prev_utime)); | ||
5274 | |||
5275 | *ut = sig->prev_utime; | ||
5276 | *st = sig->prev_stime; | ||
5185 | } | 5277 | } |
5278 | #endif | ||
5186 | 5279 | ||
5187 | /* | 5280 | /* |
5188 | * This function gets called by the timer code, with HZ frequency. | 5281 | * This function gets called by the timer code, with HZ frequency. |
@@ -5199,11 +5292,11 @@ void scheduler_tick(void) | |||
5199 | 5292 | ||
5200 | sched_clock_tick(); | 5293 | sched_clock_tick(); |
5201 | 5294 | ||
5202 | spin_lock(&rq->lock); | 5295 | raw_spin_lock(&rq->lock); |
5203 | update_rq_clock(rq); | 5296 | update_rq_clock(rq); |
5204 | update_cpu_load(rq); | 5297 | update_cpu_load(rq); |
5205 | curr->sched_class->task_tick(rq, curr, 0); | 5298 | curr->sched_class->task_tick(rq, curr, 0); |
5206 | spin_unlock(&rq->lock); | 5299 | raw_spin_unlock(&rq->lock); |
5207 | 5300 | ||
5208 | perf_event_task_tick(curr, cpu); | 5301 | perf_event_task_tick(curr, cpu); |
5209 | 5302 | ||
@@ -5317,13 +5410,14 @@ static inline void schedule_debug(struct task_struct *prev) | |||
5317 | #endif | 5410 | #endif |
5318 | } | 5411 | } |
5319 | 5412 | ||
5320 | static void put_prev_task(struct rq *rq, struct task_struct *p) | 5413 | static void put_prev_task(struct rq *rq, struct task_struct *prev) |
5321 | { | 5414 | { |
5322 | u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime; | 5415 | if (prev->state == TASK_RUNNING) { |
5416 | u64 runtime = prev->se.sum_exec_runtime; | ||
5323 | 5417 | ||
5324 | update_avg(&p->se.avg_running, runtime); | 5418 | runtime -= prev->se.prev_sum_exec_runtime; |
5419 | runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); | ||
5325 | 5420 | ||
5326 | if (p->state == TASK_RUNNING) { | ||
5327 | /* | 5421 | /* |
5328 | * In order to avoid avg_overlap growing stale when we are | 5422 | * In order to avoid avg_overlap growing stale when we are |
5329 | * indeed overlapping and hence not getting put to sleep, grow | 5423 | * indeed overlapping and hence not getting put to sleep, grow |
@@ -5333,12 +5427,9 @@ static void put_prev_task(struct rq *rq, struct task_struct *p) | |||
5333 | * correlates to the amount of cache footprint a task can | 5427 | * correlates to the amount of cache footprint a task can |
5334 | * build up. | 5428 | * build up. |
5335 | */ | 5429 | */ |
5336 | runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); | 5430 | update_avg(&prev->se.avg_overlap, runtime); |
5337 | update_avg(&p->se.avg_overlap, runtime); | ||
5338 | } else { | ||
5339 | update_avg(&p->se.avg_running, 0); | ||
5340 | } | 5431 | } |
5341 | p->sched_class->put_prev_task(rq, p); | 5432 | prev->sched_class->put_prev_task(rq, prev); |
5342 | } | 5433 | } |
5343 | 5434 | ||
5344 | /* | 5435 | /* |
@@ -5399,7 +5490,7 @@ need_resched_nonpreemptible: | |||
5399 | if (sched_feat(HRTICK)) | 5490 | if (sched_feat(HRTICK)) |
5400 | hrtick_clear(rq); | 5491 | hrtick_clear(rq); |
5401 | 5492 | ||
5402 | spin_lock_irq(&rq->lock); | 5493 | raw_spin_lock_irq(&rq->lock); |
5403 | update_rq_clock(rq); | 5494 | update_rq_clock(rq); |
5404 | clear_tsk_need_resched(prev); | 5495 | clear_tsk_need_resched(prev); |
5405 | 5496 | ||
@@ -5435,7 +5526,7 @@ need_resched_nonpreemptible: | |||
5435 | cpu = smp_processor_id(); | 5526 | cpu = smp_processor_id(); |
5436 | rq = cpu_rq(cpu); | 5527 | rq = cpu_rq(cpu); |
5437 | } else | 5528 | } else |
5438 | spin_unlock_irq(&rq->lock); | 5529 | raw_spin_unlock_irq(&rq->lock); |
5439 | 5530 | ||
5440 | post_schedule(rq); | 5531 | post_schedule(rq); |
5441 | 5532 | ||
@@ -5448,7 +5539,7 @@ need_resched_nonpreemptible: | |||
5448 | } | 5539 | } |
5449 | EXPORT_SYMBOL(schedule); | 5540 | EXPORT_SYMBOL(schedule); |
5450 | 5541 | ||
5451 | #ifdef CONFIG_SMP | 5542 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
5452 | /* | 5543 | /* |
5453 | * Look out! "owner" is an entirely speculative pointer | 5544 | * Look out! "owner" is an entirely speculative pointer |
5454 | * access and not reliable. | 5545 | * access and not reliable. |
@@ -5852,14 +5943,15 @@ EXPORT_SYMBOL(wait_for_completion_killable); | |||
5852 | */ | 5943 | */ |
5853 | bool try_wait_for_completion(struct completion *x) | 5944 | bool try_wait_for_completion(struct completion *x) |
5854 | { | 5945 | { |
5946 | unsigned long flags; | ||
5855 | int ret = 1; | 5947 | int ret = 1; |
5856 | 5948 | ||
5857 | spin_lock_irq(&x->wait.lock); | 5949 | spin_lock_irqsave(&x->wait.lock, flags); |
5858 | if (!x->done) | 5950 | if (!x->done) |
5859 | ret = 0; | 5951 | ret = 0; |
5860 | else | 5952 | else |
5861 | x->done--; | 5953 | x->done--; |
5862 | spin_unlock_irq(&x->wait.lock); | 5954 | spin_unlock_irqrestore(&x->wait.lock, flags); |
5863 | return ret; | 5955 | return ret; |
5864 | } | 5956 | } |
5865 | EXPORT_SYMBOL(try_wait_for_completion); | 5957 | EXPORT_SYMBOL(try_wait_for_completion); |
@@ -5874,12 +5966,13 @@ EXPORT_SYMBOL(try_wait_for_completion); | |||
5874 | */ | 5966 | */ |
5875 | bool completion_done(struct completion *x) | 5967 | bool completion_done(struct completion *x) |
5876 | { | 5968 | { |
5969 | unsigned long flags; | ||
5877 | int ret = 1; | 5970 | int ret = 1; |
5878 | 5971 | ||
5879 | spin_lock_irq(&x->wait.lock); | 5972 | spin_lock_irqsave(&x->wait.lock, flags); |
5880 | if (!x->done) | 5973 | if (!x->done) |
5881 | ret = 0; | 5974 | ret = 0; |
5882 | spin_unlock_irq(&x->wait.lock); | 5975 | spin_unlock_irqrestore(&x->wait.lock, flags); |
5883 | return ret; | 5976 | return ret; |
5884 | } | 5977 | } |
5885 | EXPORT_SYMBOL(completion_done); | 5978 | EXPORT_SYMBOL(completion_done); |
@@ -6142,22 +6235,14 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) | |||
6142 | BUG_ON(p->se.on_rq); | 6235 | BUG_ON(p->se.on_rq); |
6143 | 6236 | ||
6144 | p->policy = policy; | 6237 | p->policy = policy; |
6145 | switch (p->policy) { | ||
6146 | case SCHED_NORMAL: | ||
6147 | case SCHED_BATCH: | ||
6148 | case SCHED_IDLE: | ||
6149 | p->sched_class = &fair_sched_class; | ||
6150 | break; | ||
6151 | case SCHED_FIFO: | ||
6152 | case SCHED_RR: | ||
6153 | p->sched_class = &rt_sched_class; | ||
6154 | break; | ||
6155 | } | ||
6156 | |||
6157 | p->rt_priority = prio; | 6238 | p->rt_priority = prio; |
6158 | p->normal_prio = normal_prio(p); | 6239 | p->normal_prio = normal_prio(p); |
6159 | /* we are holding p->pi_lock already */ | 6240 | /* we are holding p->pi_lock already */ |
6160 | p->prio = rt_mutex_getprio(p); | 6241 | p->prio = rt_mutex_getprio(p); |
6242 | if (rt_prio(p->prio)) | ||
6243 | p->sched_class = &rt_sched_class; | ||
6244 | else | ||
6245 | p->sched_class = &fair_sched_class; | ||
6161 | set_load_weight(p); | 6246 | set_load_weight(p); |
6162 | } | 6247 | } |
6163 | 6248 | ||
@@ -6272,7 +6357,7 @@ recheck: | |||
6272 | * make sure no PI-waiters arrive (or leave) while we are | 6357 | * make sure no PI-waiters arrive (or leave) while we are |
6273 | * changing the priority of the task: | 6358 | * changing the priority of the task: |
6274 | */ | 6359 | */ |
6275 | spin_lock_irqsave(&p->pi_lock, flags); | 6360 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
6276 | /* | 6361 | /* |
6277 | * To be able to change p->policy safely, the apropriate | 6362 | * To be able to change p->policy safely, the apropriate |
6278 | * runqueue lock must be held. | 6363 | * runqueue lock must be held. |
@@ -6282,7 +6367,7 @@ recheck: | |||
6282 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { | 6367 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
6283 | policy = oldpolicy = -1; | 6368 | policy = oldpolicy = -1; |
6284 | __task_rq_unlock(rq); | 6369 | __task_rq_unlock(rq); |
6285 | spin_unlock_irqrestore(&p->pi_lock, flags); | 6370 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
6286 | goto recheck; | 6371 | goto recheck; |
6287 | } | 6372 | } |
6288 | update_rq_clock(rq); | 6373 | update_rq_clock(rq); |
@@ -6306,7 +6391,7 @@ recheck: | |||
6306 | check_class_changed(rq, p, prev_class, oldprio, running); | 6391 | check_class_changed(rq, p, prev_class, oldprio, running); |
6307 | } | 6392 | } |
6308 | __task_rq_unlock(rq); | 6393 | __task_rq_unlock(rq); |
6309 | spin_unlock_irqrestore(&p->pi_lock, flags); | 6394 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
6310 | 6395 | ||
6311 | rt_mutex_adjust_pi(p); | 6396 | rt_mutex_adjust_pi(p); |
6312 | 6397 | ||
@@ -6406,7 +6491,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) | |||
6406 | return -EINVAL; | 6491 | return -EINVAL; |
6407 | 6492 | ||
6408 | retval = -ESRCH; | 6493 | retval = -ESRCH; |
6409 | read_lock(&tasklist_lock); | 6494 | rcu_read_lock(); |
6410 | p = find_process_by_pid(pid); | 6495 | p = find_process_by_pid(pid); |
6411 | if (p) { | 6496 | if (p) { |
6412 | retval = security_task_getscheduler(p); | 6497 | retval = security_task_getscheduler(p); |
@@ -6414,7 +6499,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) | |||
6414 | retval = p->policy | 6499 | retval = p->policy |
6415 | | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); | 6500 | | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); |
6416 | } | 6501 | } |
6417 | read_unlock(&tasklist_lock); | 6502 | rcu_read_unlock(); |
6418 | return retval; | 6503 | return retval; |
6419 | } | 6504 | } |
6420 | 6505 | ||
@@ -6432,7 +6517,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) | |||
6432 | if (!param || pid < 0) | 6517 | if (!param || pid < 0) |
6433 | return -EINVAL; | 6518 | return -EINVAL; |
6434 | 6519 | ||
6435 | read_lock(&tasklist_lock); | 6520 | rcu_read_lock(); |
6436 | p = find_process_by_pid(pid); | 6521 | p = find_process_by_pid(pid); |
6437 | retval = -ESRCH; | 6522 | retval = -ESRCH; |
6438 | if (!p) | 6523 | if (!p) |
@@ -6443,7 +6528,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) | |||
6443 | goto out_unlock; | 6528 | goto out_unlock; |
6444 | 6529 | ||
6445 | lp.sched_priority = p->rt_priority; | 6530 | lp.sched_priority = p->rt_priority; |
6446 | read_unlock(&tasklist_lock); | 6531 | rcu_read_unlock(); |
6447 | 6532 | ||
6448 | /* | 6533 | /* |
6449 | * This one might sleep, we cannot do it with a spinlock held ... | 6534 | * This one might sleep, we cannot do it with a spinlock held ... |
@@ -6453,7 +6538,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) | |||
6453 | return retval; | 6538 | return retval; |
6454 | 6539 | ||
6455 | out_unlock: | 6540 | out_unlock: |
6456 | read_unlock(&tasklist_lock); | 6541 | rcu_read_unlock(); |
6457 | return retval; | 6542 | return retval; |
6458 | } | 6543 | } |
6459 | 6544 | ||
@@ -6464,22 +6549,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) | |||
6464 | int retval; | 6549 | int retval; |
6465 | 6550 | ||
6466 | get_online_cpus(); | 6551 | get_online_cpus(); |
6467 | read_lock(&tasklist_lock); | 6552 | rcu_read_lock(); |
6468 | 6553 | ||
6469 | p = find_process_by_pid(pid); | 6554 | p = find_process_by_pid(pid); |
6470 | if (!p) { | 6555 | if (!p) { |
6471 | read_unlock(&tasklist_lock); | 6556 | rcu_read_unlock(); |
6472 | put_online_cpus(); | 6557 | put_online_cpus(); |
6473 | return -ESRCH; | 6558 | return -ESRCH; |
6474 | } | 6559 | } |
6475 | 6560 | ||
6476 | /* | 6561 | /* Prevent p going away */ |
6477 | * It is not safe to call set_cpus_allowed with the | ||
6478 | * tasklist_lock held. We will bump the task_struct's | ||
6479 | * usage count and then drop tasklist_lock. | ||
6480 | */ | ||
6481 | get_task_struct(p); | 6562 | get_task_struct(p); |
6482 | read_unlock(&tasklist_lock); | 6563 | rcu_read_unlock(); |
6483 | 6564 | ||
6484 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { | 6565 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { |
6485 | retval = -ENOMEM; | 6566 | retval = -ENOMEM; |
@@ -6560,10 +6641,12 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, | |||
6560 | long sched_getaffinity(pid_t pid, struct cpumask *mask) | 6641 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
6561 | { | 6642 | { |
6562 | struct task_struct *p; | 6643 | struct task_struct *p; |
6644 | unsigned long flags; | ||
6645 | struct rq *rq; | ||
6563 | int retval; | 6646 | int retval; |
6564 | 6647 | ||
6565 | get_online_cpus(); | 6648 | get_online_cpus(); |
6566 | read_lock(&tasklist_lock); | 6649 | rcu_read_lock(); |
6567 | 6650 | ||
6568 | retval = -ESRCH; | 6651 | retval = -ESRCH; |
6569 | p = find_process_by_pid(pid); | 6652 | p = find_process_by_pid(pid); |
@@ -6574,10 +6657,12 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) | |||
6574 | if (retval) | 6657 | if (retval) |
6575 | goto out_unlock; | 6658 | goto out_unlock; |
6576 | 6659 | ||
6660 | rq = task_rq_lock(p, &flags); | ||
6577 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); | 6661 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
6662 | task_rq_unlock(rq, &flags); | ||
6578 | 6663 | ||
6579 | out_unlock: | 6664 | out_unlock: |
6580 | read_unlock(&tasklist_lock); | 6665 | rcu_read_unlock(); |
6581 | put_online_cpus(); | 6666 | put_online_cpus(); |
6582 | 6667 | ||
6583 | return retval; | 6668 | return retval; |
@@ -6632,7 +6717,7 @@ SYSCALL_DEFINE0(sched_yield) | |||
6632 | */ | 6717 | */ |
6633 | __release(rq->lock); | 6718 | __release(rq->lock); |
6634 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); | 6719 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
6635 | _raw_spin_unlock(&rq->lock); | 6720 | do_raw_spin_unlock(&rq->lock); |
6636 | preempt_enable_no_resched(); | 6721 | preempt_enable_no_resched(); |
6637 | 6722 | ||
6638 | schedule(); | 6723 | schedule(); |
@@ -6720,9 +6805,6 @@ EXPORT_SYMBOL(yield); | |||
6720 | /* | 6805 | /* |
6721 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so | 6806 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
6722 | * that process accounting knows that this is a task in IO wait state. | 6807 | * that process accounting knows that this is a task in IO wait state. |
6723 | * | ||
6724 | * But don't do that if it is a deliberate, throttling IO wait (this task | ||
6725 | * has set its backing_dev_info: the queue against which it should throttle) | ||
6726 | */ | 6808 | */ |
6727 | void __sched io_schedule(void) | 6809 | void __sched io_schedule(void) |
6728 | { | 6810 | { |
@@ -6815,6 +6897,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, | |||
6815 | { | 6897 | { |
6816 | struct task_struct *p; | 6898 | struct task_struct *p; |
6817 | unsigned int time_slice; | 6899 | unsigned int time_slice; |
6900 | unsigned long flags; | ||
6901 | struct rq *rq; | ||
6818 | int retval; | 6902 | int retval; |
6819 | struct timespec t; | 6903 | struct timespec t; |
6820 | 6904 | ||
@@ -6822,7 +6906,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, | |||
6822 | return -EINVAL; | 6906 | return -EINVAL; |
6823 | 6907 | ||
6824 | retval = -ESRCH; | 6908 | retval = -ESRCH; |
6825 | read_lock(&tasklist_lock); | 6909 | rcu_read_lock(); |
6826 | p = find_process_by_pid(pid); | 6910 | p = find_process_by_pid(pid); |
6827 | if (!p) | 6911 | if (!p) |
6828 | goto out_unlock; | 6912 | goto out_unlock; |
@@ -6831,15 +6915,17 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, | |||
6831 | if (retval) | 6915 | if (retval) |
6832 | goto out_unlock; | 6916 | goto out_unlock; |
6833 | 6917 | ||
6834 | time_slice = p->sched_class->get_rr_interval(p); | 6918 | rq = task_rq_lock(p, &flags); |
6919 | time_slice = p->sched_class->get_rr_interval(rq, p); | ||
6920 | task_rq_unlock(rq, &flags); | ||
6835 | 6921 | ||
6836 | read_unlock(&tasklist_lock); | 6922 | rcu_read_unlock(); |
6837 | jiffies_to_timespec(time_slice, &t); | 6923 | jiffies_to_timespec(time_slice, &t); |
6838 | retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; | 6924 | retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; |
6839 | return retval; | 6925 | return retval; |
6840 | 6926 | ||
6841 | out_unlock: | 6927 | out_unlock: |
6842 | read_unlock(&tasklist_lock); | 6928 | rcu_read_unlock(); |
6843 | return retval; | 6929 | return retval; |
6844 | } | 6930 | } |
6845 | 6931 | ||
@@ -6905,7 +6991,7 @@ void show_state_filter(unsigned long state_filter) | |||
6905 | /* | 6991 | /* |
6906 | * Only show locks if all tasks are dumped: | 6992 | * Only show locks if all tasks are dumped: |
6907 | */ | 6993 | */ |
6908 | if (state_filter == -1) | 6994 | if (!state_filter) |
6909 | debug_show_all_locks(); | 6995 | debug_show_all_locks(); |
6910 | } | 6996 | } |
6911 | 6997 | ||
@@ -6927,12 +7013,12 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
6927 | struct rq *rq = cpu_rq(cpu); | 7013 | struct rq *rq = cpu_rq(cpu); |
6928 | unsigned long flags; | 7014 | unsigned long flags; |
6929 | 7015 | ||
6930 | spin_lock_irqsave(&rq->lock, flags); | 7016 | raw_spin_lock_irqsave(&rq->lock, flags); |
6931 | 7017 | ||
6932 | __sched_fork(idle); | 7018 | __sched_fork(idle); |
7019 | idle->state = TASK_RUNNING; | ||
6933 | idle->se.exec_start = sched_clock(); | 7020 | idle->se.exec_start = sched_clock(); |
6934 | 7021 | ||
6935 | idle->prio = idle->normal_prio = MAX_PRIO; | ||
6936 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); | 7022 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); |
6937 | __set_task_cpu(idle, cpu); | 7023 | __set_task_cpu(idle, cpu); |
6938 | 7024 | ||
@@ -6940,7 +7026,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
6940 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 7026 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
6941 | idle->oncpu = 1; | 7027 | idle->oncpu = 1; |
6942 | #endif | 7028 | #endif |
6943 | spin_unlock_irqrestore(&rq->lock, flags); | 7029 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
6944 | 7030 | ||
6945 | /* Set the preempt count _outside_ the spinlocks! */ | 7031 | /* Set the preempt count _outside_ the spinlocks! */ |
6946 | #if defined(CONFIG_PREEMPT) | 7032 | #if defined(CONFIG_PREEMPT) |
@@ -6973,22 +7059,43 @@ cpumask_var_t nohz_cpu_mask; | |||
6973 | * | 7059 | * |
6974 | * This idea comes from the SD scheduler of Con Kolivas: | 7060 | * This idea comes from the SD scheduler of Con Kolivas: |
6975 | */ | 7061 | */ |
6976 | static inline void sched_init_granularity(void) | 7062 | static int get_update_sysctl_factor(void) |
6977 | { | 7063 | { |
6978 | unsigned int factor = 1 + ilog2(num_online_cpus()); | 7064 | unsigned int cpus = min_t(int, num_online_cpus(), 8); |
6979 | const unsigned long limit = 200000000; | 7065 | unsigned int factor; |
6980 | 7066 | ||
6981 | sysctl_sched_min_granularity *= factor; | 7067 | switch (sysctl_sched_tunable_scaling) { |
6982 | if (sysctl_sched_min_granularity > limit) | 7068 | case SCHED_TUNABLESCALING_NONE: |
6983 | sysctl_sched_min_granularity = limit; | 7069 | factor = 1; |
7070 | break; | ||
7071 | case SCHED_TUNABLESCALING_LINEAR: | ||
7072 | factor = cpus; | ||
7073 | break; | ||
7074 | case SCHED_TUNABLESCALING_LOG: | ||
7075 | default: | ||
7076 | factor = 1 + ilog2(cpus); | ||
7077 | break; | ||
7078 | } | ||
6984 | 7079 | ||
6985 | sysctl_sched_latency *= factor; | 7080 | return factor; |
6986 | if (sysctl_sched_latency > limit) | 7081 | } |
6987 | sysctl_sched_latency = limit; | 7082 | |
7083 | static void update_sysctl(void) | ||
7084 | { | ||
7085 | unsigned int factor = get_update_sysctl_factor(); | ||
6988 | 7086 | ||
6989 | sysctl_sched_wakeup_granularity *= factor; | 7087 | #define SET_SYSCTL(name) \ |
7088 | (sysctl_##name = (factor) * normalized_sysctl_##name) | ||
7089 | SET_SYSCTL(sched_min_granularity); | ||
7090 | SET_SYSCTL(sched_latency); | ||
7091 | SET_SYSCTL(sched_wakeup_granularity); | ||
7092 | SET_SYSCTL(sched_shares_ratelimit); | ||
7093 | #undef SET_SYSCTL | ||
7094 | } | ||
6990 | 7095 | ||
6991 | sysctl_sched_shares_ratelimit *= factor; | 7096 | static inline void sched_init_granularity(void) |
7097 | { | ||
7098 | update_sysctl(); | ||
6992 | } | 7099 | } |
6993 | 7100 | ||
6994 | #ifdef CONFIG_SMP | 7101 | #ifdef CONFIG_SMP |
@@ -7024,8 +7131,24 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
7024 | struct rq *rq; | 7131 | struct rq *rq; |
7025 | int ret = 0; | 7132 | int ret = 0; |
7026 | 7133 | ||
7134 | /* | ||
7135 | * Since we rely on wake-ups to migrate sleeping tasks, don't change | ||
7136 | * the ->cpus_allowed mask from under waking tasks, which would be | ||
7137 | * possible when we change rq->lock in ttwu(), so synchronize against | ||
7138 | * TASK_WAKING to avoid that. | ||
7139 | */ | ||
7140 | again: | ||
7141 | while (p->state == TASK_WAKING) | ||
7142 | cpu_relax(); | ||
7143 | |||
7027 | rq = task_rq_lock(p, &flags); | 7144 | rq = task_rq_lock(p, &flags); |
7028 | if (!cpumask_intersects(new_mask, cpu_online_mask)) { | 7145 | |
7146 | if (p->state == TASK_WAKING) { | ||
7147 | task_rq_unlock(rq, &flags); | ||
7148 | goto again; | ||
7149 | } | ||
7150 | |||
7151 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { | ||
7029 | ret = -EINVAL; | 7152 | ret = -EINVAL; |
7030 | goto out; | 7153 | goto out; |
7031 | } | 7154 | } |
@@ -7047,7 +7170,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
7047 | if (cpumask_test_cpu(task_cpu(p), new_mask)) | 7170 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
7048 | goto out; | 7171 | goto out; |
7049 | 7172 | ||
7050 | if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { | 7173 | if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) { |
7051 | /* Need help from migration thread: drop lock and wait. */ | 7174 | /* Need help from migration thread: drop lock and wait. */ |
7052 | struct task_struct *mt = rq->migration_thread; | 7175 | struct task_struct *mt = rq->migration_thread; |
7053 | 7176 | ||
@@ -7080,7 +7203,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); | |||
7080 | static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | 7203 | static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) |
7081 | { | 7204 | { |
7082 | struct rq *rq_dest, *rq_src; | 7205 | struct rq *rq_dest, *rq_src; |
7083 | int ret = 0, on_rq; | 7206 | int ret = 0; |
7084 | 7207 | ||
7085 | if (unlikely(!cpu_active(dest_cpu))) | 7208 | if (unlikely(!cpu_active(dest_cpu))) |
7086 | return ret; | 7209 | return ret; |
@@ -7096,12 +7219,13 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
7096 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) | 7219 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
7097 | goto fail; | 7220 | goto fail; |
7098 | 7221 | ||
7099 | on_rq = p->se.on_rq; | 7222 | /* |
7100 | if (on_rq) | 7223 | * If we're not on a rq, the next wake-up will ensure we're |
7224 | * placed properly. | ||
7225 | */ | ||
7226 | if (p->se.on_rq) { | ||
7101 | deactivate_task(rq_src, p, 0); | 7227 | deactivate_task(rq_src, p, 0); |
7102 | 7228 | set_task_cpu(p, dest_cpu); | |
7103 | set_task_cpu(p, dest_cpu); | ||
7104 | if (on_rq) { | ||
7105 | activate_task(rq_dest, p, 0); | 7229 | activate_task(rq_dest, p, 0); |
7106 | check_preempt_curr(rq_dest, p, 0); | 7230 | check_preempt_curr(rq_dest, p, 0); |
7107 | } | 7231 | } |
@@ -7136,10 +7260,10 @@ static int migration_thread(void *data) | |||
7136 | struct migration_req *req; | 7260 | struct migration_req *req; |
7137 | struct list_head *head; | 7261 | struct list_head *head; |
7138 | 7262 | ||
7139 | spin_lock_irq(&rq->lock); | 7263 | raw_spin_lock_irq(&rq->lock); |
7140 | 7264 | ||
7141 | if (cpu_is_offline(cpu)) { | 7265 | if (cpu_is_offline(cpu)) { |
7142 | spin_unlock_irq(&rq->lock); | 7266 | raw_spin_unlock_irq(&rq->lock); |
7143 | break; | 7267 | break; |
7144 | } | 7268 | } |
7145 | 7269 | ||
@@ -7151,7 +7275,7 @@ static int migration_thread(void *data) | |||
7151 | head = &rq->migration_queue; | 7275 | head = &rq->migration_queue; |
7152 | 7276 | ||
7153 | if (list_empty(head)) { | 7277 | if (list_empty(head)) { |
7154 | spin_unlock_irq(&rq->lock); | 7278 | raw_spin_unlock_irq(&rq->lock); |
7155 | schedule(); | 7279 | schedule(); |
7156 | set_current_state(TASK_INTERRUPTIBLE); | 7280 | set_current_state(TASK_INTERRUPTIBLE); |
7157 | continue; | 7281 | continue; |
@@ -7160,14 +7284,14 @@ static int migration_thread(void *data) | |||
7160 | list_del_init(head->next); | 7284 | list_del_init(head->next); |
7161 | 7285 | ||
7162 | if (req->task != NULL) { | 7286 | if (req->task != NULL) { |
7163 | spin_unlock(&rq->lock); | 7287 | raw_spin_unlock(&rq->lock); |
7164 | __migrate_task(req->task, cpu, req->dest_cpu); | 7288 | __migrate_task(req->task, cpu, req->dest_cpu); |
7165 | } else if (likely(cpu == (badcpu = smp_processor_id()))) { | 7289 | } else if (likely(cpu == (badcpu = smp_processor_id()))) { |
7166 | req->dest_cpu = RCU_MIGRATION_GOT_QS; | 7290 | req->dest_cpu = RCU_MIGRATION_GOT_QS; |
7167 | spin_unlock(&rq->lock); | 7291 | raw_spin_unlock(&rq->lock); |
7168 | } else { | 7292 | } else { |
7169 | req->dest_cpu = RCU_MIGRATION_MUST_SYNC; | 7293 | req->dest_cpu = RCU_MIGRATION_MUST_SYNC; |
7170 | spin_unlock(&rq->lock); | 7294 | raw_spin_unlock(&rq->lock); |
7171 | WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); | 7295 | WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); |
7172 | } | 7296 | } |
7173 | local_irq_enable(); | 7297 | local_irq_enable(); |
@@ -7197,37 +7321,10 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | |||
7197 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 7321 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
7198 | { | 7322 | { |
7199 | int dest_cpu; | 7323 | int dest_cpu; |
7200 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu)); | ||
7201 | 7324 | ||
7202 | again: | 7325 | again: |
7203 | /* Look for allowed, online CPU in same node. */ | 7326 | dest_cpu = select_fallback_rq(dead_cpu, p); |
7204 | for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask) | ||
7205 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) | ||
7206 | goto move; | ||
7207 | |||
7208 | /* Any allowed, online CPU? */ | ||
7209 | dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask); | ||
7210 | if (dest_cpu < nr_cpu_ids) | ||
7211 | goto move; | ||
7212 | |||
7213 | /* No more Mr. Nice Guy. */ | ||
7214 | if (dest_cpu >= nr_cpu_ids) { | ||
7215 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); | ||
7216 | dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed); | ||
7217 | 7327 | ||
7218 | /* | ||
7219 | * Don't tell them about moving exiting tasks or | ||
7220 | * kernel threads (both mm NULL), since they never | ||
7221 | * leave kernel. | ||
7222 | */ | ||
7223 | if (p->mm && printk_ratelimit()) { | ||
7224 | printk(KERN_INFO "process %d (%s) no " | ||
7225 | "longer affine to cpu%d\n", | ||
7226 | task_pid_nr(p), p->comm, dead_cpu); | ||
7227 | } | ||
7228 | } | ||
7229 | |||
7230 | move: | ||
7231 | /* It can have affinity changed while we were choosing. */ | 7328 | /* It can have affinity changed while we were choosing. */ |
7232 | if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) | 7329 | if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) |
7233 | goto again; | 7330 | goto again; |
@@ -7242,7 +7339,7 @@ move: | |||
7242 | */ | 7339 | */ |
7243 | static void migrate_nr_uninterruptible(struct rq *rq_src) | 7340 | static void migrate_nr_uninterruptible(struct rq *rq_src) |
7244 | { | 7341 | { |
7245 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); | 7342 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); |
7246 | unsigned long flags; | 7343 | unsigned long flags; |
7247 | 7344 | ||
7248 | local_irq_save(flags); | 7345 | local_irq_save(flags); |
@@ -7290,14 +7387,14 @@ void sched_idle_next(void) | |||
7290 | * Strictly not necessary since rest of the CPUs are stopped by now | 7387 | * Strictly not necessary since rest of the CPUs are stopped by now |
7291 | * and interrupts disabled on the current cpu. | 7388 | * and interrupts disabled on the current cpu. |
7292 | */ | 7389 | */ |
7293 | spin_lock_irqsave(&rq->lock, flags); | 7390 | raw_spin_lock_irqsave(&rq->lock, flags); |
7294 | 7391 | ||
7295 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); | 7392 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); |
7296 | 7393 | ||
7297 | update_rq_clock(rq); | 7394 | update_rq_clock(rq); |
7298 | activate_task(rq, p, 0); | 7395 | activate_task(rq, p, 0); |
7299 | 7396 | ||
7300 | spin_unlock_irqrestore(&rq->lock, flags); | 7397 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7301 | } | 7398 | } |
7302 | 7399 | ||
7303 | /* | 7400 | /* |
@@ -7333,9 +7430,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) | |||
7333 | * that's OK. No task can be added to this CPU, so iteration is | 7430 | * that's OK. No task can be added to this CPU, so iteration is |
7334 | * fine. | 7431 | * fine. |
7335 | */ | 7432 | */ |
7336 | spin_unlock_irq(&rq->lock); | 7433 | raw_spin_unlock_irq(&rq->lock); |
7337 | move_task_off_dead_cpu(dead_cpu, p); | 7434 | move_task_off_dead_cpu(dead_cpu, p); |
7338 | spin_lock_irq(&rq->lock); | 7435 | raw_spin_lock_irq(&rq->lock); |
7339 | 7436 | ||
7340 | put_task_struct(p); | 7437 | put_task_struct(p); |
7341 | } | 7438 | } |
@@ -7376,17 +7473,16 @@ static struct ctl_table sd_ctl_dir[] = { | |||
7376 | .procname = "sched_domain", | 7473 | .procname = "sched_domain", |
7377 | .mode = 0555, | 7474 | .mode = 0555, |
7378 | }, | 7475 | }, |
7379 | {0, }, | 7476 | {} |
7380 | }; | 7477 | }; |
7381 | 7478 | ||
7382 | static struct ctl_table sd_ctl_root[] = { | 7479 | static struct ctl_table sd_ctl_root[] = { |
7383 | { | 7480 | { |
7384 | .ctl_name = CTL_KERN, | ||
7385 | .procname = "kernel", | 7481 | .procname = "kernel", |
7386 | .mode = 0555, | 7482 | .mode = 0555, |
7387 | .child = sd_ctl_dir, | 7483 | .child = sd_ctl_dir, |
7388 | }, | 7484 | }, |
7389 | {0, }, | 7485 | {} |
7390 | }; | 7486 | }; |
7391 | 7487 | ||
7392 | static struct ctl_table *sd_alloc_ctl_entry(int n) | 7488 | static struct ctl_table *sd_alloc_ctl_entry(int n) |
@@ -7496,7 +7592,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu) | |||
7496 | static struct ctl_table_header *sd_sysctl_header; | 7592 | static struct ctl_table_header *sd_sysctl_header; |
7497 | static void register_sched_domain_sysctl(void) | 7593 | static void register_sched_domain_sysctl(void) |
7498 | { | 7594 | { |
7499 | int i, cpu_num = num_online_cpus(); | 7595 | int i, cpu_num = num_possible_cpus(); |
7500 | struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); | 7596 | struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); |
7501 | char buf[32]; | 7597 | char buf[32]; |
7502 | 7598 | ||
@@ -7506,7 +7602,7 @@ static void register_sched_domain_sysctl(void) | |||
7506 | if (entry == NULL) | 7602 | if (entry == NULL) |
7507 | return; | 7603 | return; |
7508 | 7604 | ||
7509 | for_each_online_cpu(i) { | 7605 | for_each_possible_cpu(i) { |
7510 | snprintf(buf, 32, "cpu%d", i); | 7606 | snprintf(buf, 32, "cpu%d", i); |
7511 | entry->procname = kstrdup(buf, GFP_KERNEL); | 7607 | entry->procname = kstrdup(buf, GFP_KERNEL); |
7512 | entry->mode = 0555; | 7608 | entry->mode = 0555; |
@@ -7602,13 +7698,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7602 | 7698 | ||
7603 | /* Update our root-domain */ | 7699 | /* Update our root-domain */ |
7604 | rq = cpu_rq(cpu); | 7700 | rq = cpu_rq(cpu); |
7605 | spin_lock_irqsave(&rq->lock, flags); | 7701 | raw_spin_lock_irqsave(&rq->lock, flags); |
7606 | if (rq->rd) { | 7702 | if (rq->rd) { |
7607 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); | 7703 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
7608 | 7704 | ||
7609 | set_rq_online(rq); | 7705 | set_rq_online(rq); |
7610 | } | 7706 | } |
7611 | spin_unlock_irqrestore(&rq->lock, flags); | 7707 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7612 | break; | 7708 | break; |
7613 | 7709 | ||
7614 | #ifdef CONFIG_HOTPLUG_CPU | 7710 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -7633,14 +7729,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7633 | put_task_struct(rq->migration_thread); | 7729 | put_task_struct(rq->migration_thread); |
7634 | rq->migration_thread = NULL; | 7730 | rq->migration_thread = NULL; |
7635 | /* Idle task back to normal (off runqueue, low prio) */ | 7731 | /* Idle task back to normal (off runqueue, low prio) */ |
7636 | spin_lock_irq(&rq->lock); | 7732 | raw_spin_lock_irq(&rq->lock); |
7637 | update_rq_clock(rq); | 7733 | update_rq_clock(rq); |
7638 | deactivate_task(rq, rq->idle, 0); | 7734 | deactivate_task(rq, rq->idle, 0); |
7639 | rq->idle->static_prio = MAX_PRIO; | ||
7640 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); | 7735 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); |
7641 | rq->idle->sched_class = &idle_sched_class; | 7736 | rq->idle->sched_class = &idle_sched_class; |
7642 | migrate_dead_tasks(cpu); | 7737 | migrate_dead_tasks(cpu); |
7643 | spin_unlock_irq(&rq->lock); | 7738 | raw_spin_unlock_irq(&rq->lock); |
7644 | cpuset_unlock(); | 7739 | cpuset_unlock(); |
7645 | migrate_nr_uninterruptible(rq); | 7740 | migrate_nr_uninterruptible(rq); |
7646 | BUG_ON(rq->nr_running != 0); | 7741 | BUG_ON(rq->nr_running != 0); |
@@ -7650,30 +7745,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7650 | * they didn't take sched_hotcpu_mutex. Just wake up | 7745 | * they didn't take sched_hotcpu_mutex. Just wake up |
7651 | * the requestors. | 7746 | * the requestors. |
7652 | */ | 7747 | */ |
7653 | spin_lock_irq(&rq->lock); | 7748 | raw_spin_lock_irq(&rq->lock); |
7654 | while (!list_empty(&rq->migration_queue)) { | 7749 | while (!list_empty(&rq->migration_queue)) { |
7655 | struct migration_req *req; | 7750 | struct migration_req *req; |
7656 | 7751 | ||
7657 | req = list_entry(rq->migration_queue.next, | 7752 | req = list_entry(rq->migration_queue.next, |
7658 | struct migration_req, list); | 7753 | struct migration_req, list); |
7659 | list_del_init(&req->list); | 7754 | list_del_init(&req->list); |
7660 | spin_unlock_irq(&rq->lock); | 7755 | raw_spin_unlock_irq(&rq->lock); |
7661 | complete(&req->done); | 7756 | complete(&req->done); |
7662 | spin_lock_irq(&rq->lock); | 7757 | raw_spin_lock_irq(&rq->lock); |
7663 | } | 7758 | } |
7664 | spin_unlock_irq(&rq->lock); | 7759 | raw_spin_unlock_irq(&rq->lock); |
7665 | break; | 7760 | break; |
7666 | 7761 | ||
7667 | case CPU_DYING: | 7762 | case CPU_DYING: |
7668 | case CPU_DYING_FROZEN: | 7763 | case CPU_DYING_FROZEN: |
7669 | /* Update our root-domain */ | 7764 | /* Update our root-domain */ |
7670 | rq = cpu_rq(cpu); | 7765 | rq = cpu_rq(cpu); |
7671 | spin_lock_irqsave(&rq->lock, flags); | 7766 | raw_spin_lock_irqsave(&rq->lock, flags); |
7672 | if (rq->rd) { | 7767 | if (rq->rd) { |
7673 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); | 7768 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
7674 | set_rq_offline(rq); | 7769 | set_rq_offline(rq); |
7675 | } | 7770 | } |
7676 | spin_unlock_irqrestore(&rq->lock, flags); | 7771 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7677 | break; | 7772 | break; |
7678 | #endif | 7773 | #endif |
7679 | } | 7774 | } |
@@ -7710,6 +7805,16 @@ early_initcall(migration_init); | |||
7710 | 7805 | ||
7711 | #ifdef CONFIG_SCHED_DEBUG | 7806 | #ifdef CONFIG_SCHED_DEBUG |
7712 | 7807 | ||
7808 | static __read_mostly int sched_domain_debug_enabled; | ||
7809 | |||
7810 | static int __init sched_domain_debug_setup(char *str) | ||
7811 | { | ||
7812 | sched_domain_debug_enabled = 1; | ||
7813 | |||
7814 | return 0; | ||
7815 | } | ||
7816 | early_param("sched_debug", sched_domain_debug_setup); | ||
7817 | |||
7713 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 7818 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
7714 | struct cpumask *groupmask) | 7819 | struct cpumask *groupmask) |
7715 | { | 7820 | { |
@@ -7796,6 +7901,9 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
7796 | cpumask_var_t groupmask; | 7901 | cpumask_var_t groupmask; |
7797 | int level = 0; | 7902 | int level = 0; |
7798 | 7903 | ||
7904 | if (!sched_domain_debug_enabled) | ||
7905 | return; | ||
7906 | |||
7799 | if (!sd) { | 7907 | if (!sd) { |
7800 | printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); | 7908 | printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); |
7801 | return; | 7909 | return; |
@@ -7875,6 +7983,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
7875 | 7983 | ||
7876 | static void free_rootdomain(struct root_domain *rd) | 7984 | static void free_rootdomain(struct root_domain *rd) |
7877 | { | 7985 | { |
7986 | synchronize_sched(); | ||
7987 | |||
7878 | cpupri_cleanup(&rd->cpupri); | 7988 | cpupri_cleanup(&rd->cpupri); |
7879 | 7989 | ||
7880 | free_cpumask_var(rd->rto_mask); | 7990 | free_cpumask_var(rd->rto_mask); |
@@ -7888,7 +7998,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
7888 | struct root_domain *old_rd = NULL; | 7998 | struct root_domain *old_rd = NULL; |
7889 | unsigned long flags; | 7999 | unsigned long flags; |
7890 | 8000 | ||
7891 | spin_lock_irqsave(&rq->lock, flags); | 8001 | raw_spin_lock_irqsave(&rq->lock, flags); |
7892 | 8002 | ||
7893 | if (rq->rd) { | 8003 | if (rq->rd) { |
7894 | old_rd = rq->rd; | 8004 | old_rd = rq->rd; |
@@ -7914,7 +8024,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
7914 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) | 8024 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) |
7915 | set_rq_online(rq); | 8025 | set_rq_online(rq); |
7916 | 8026 | ||
7917 | spin_unlock_irqrestore(&rq->lock, flags); | 8027 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
7918 | 8028 | ||
7919 | if (old_rd) | 8029 | if (old_rd) |
7920 | free_rootdomain(old_rd); | 8030 | free_rootdomain(old_rd); |
@@ -8015,6 +8125,7 @@ static cpumask_var_t cpu_isolated_map; | |||
8015 | /* Setup the mask of cpus configured for isolated domains */ | 8125 | /* Setup the mask of cpus configured for isolated domains */ |
8016 | static int __init isolated_cpu_setup(char *str) | 8126 | static int __init isolated_cpu_setup(char *str) |
8017 | { | 8127 | { |
8128 | alloc_bootmem_cpumask_var(&cpu_isolated_map); | ||
8018 | cpulist_parse(str, cpu_isolated_map); | 8129 | cpulist_parse(str, cpu_isolated_map); |
8019 | return 1; | 8130 | return 1; |
8020 | } | 8131 | } |
@@ -8851,7 +8962,7 @@ static int build_sched_domains(const struct cpumask *cpu_map) | |||
8851 | return __build_sched_domains(cpu_map, NULL); | 8962 | return __build_sched_domains(cpu_map, NULL); |
8852 | } | 8963 | } |
8853 | 8964 | ||
8854 | static struct cpumask *doms_cur; /* current sched domains */ | 8965 | static cpumask_var_t *doms_cur; /* current sched domains */ |
8855 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ | 8966 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
8856 | static struct sched_domain_attr *dattr_cur; | 8967 | static struct sched_domain_attr *dattr_cur; |
8857 | /* attribues of custom domains in 'doms_cur' */ | 8968 | /* attribues of custom domains in 'doms_cur' */ |
@@ -8873,6 +8984,31 @@ int __attribute__((weak)) arch_update_cpu_topology(void) | |||
8873 | return 0; | 8984 | return 0; |
8874 | } | 8985 | } |
8875 | 8986 | ||
8987 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms) | ||
8988 | { | ||
8989 | int i; | ||
8990 | cpumask_var_t *doms; | ||
8991 | |||
8992 | doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); | ||
8993 | if (!doms) | ||
8994 | return NULL; | ||
8995 | for (i = 0; i < ndoms; i++) { | ||
8996 | if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { | ||
8997 | free_sched_domains(doms, i); | ||
8998 | return NULL; | ||
8999 | } | ||
9000 | } | ||
9001 | return doms; | ||
9002 | } | ||
9003 | |||
9004 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) | ||
9005 | { | ||
9006 | unsigned int i; | ||
9007 | for (i = 0; i < ndoms; i++) | ||
9008 | free_cpumask_var(doms[i]); | ||
9009 | kfree(doms); | ||
9010 | } | ||
9011 | |||
8876 | /* | 9012 | /* |
8877 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 9013 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
8878 | * For now this just excludes isolated cpus, but could be used to | 9014 | * For now this just excludes isolated cpus, but could be used to |
@@ -8884,12 +9020,12 @@ static int arch_init_sched_domains(const struct cpumask *cpu_map) | |||
8884 | 9020 | ||
8885 | arch_update_cpu_topology(); | 9021 | arch_update_cpu_topology(); |
8886 | ndoms_cur = 1; | 9022 | ndoms_cur = 1; |
8887 | doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); | 9023 | doms_cur = alloc_sched_domains(ndoms_cur); |
8888 | if (!doms_cur) | 9024 | if (!doms_cur) |
8889 | doms_cur = fallback_doms; | 9025 | doms_cur = &fallback_doms; |
8890 | cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); | 9026 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); |
8891 | dattr_cur = NULL; | 9027 | dattr_cur = NULL; |
8892 | err = build_sched_domains(doms_cur); | 9028 | err = build_sched_domains(doms_cur[0]); |
8893 | register_sched_domain_sysctl(); | 9029 | register_sched_domain_sysctl(); |
8894 | 9030 | ||
8895 | return err; | 9031 | return err; |
@@ -8939,19 +9075,19 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
8939 | * doms_new[] to the current sched domain partitioning, doms_cur[]. | 9075 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
8940 | * It destroys each deleted domain and builds each new domain. | 9076 | * It destroys each deleted domain and builds each new domain. |
8941 | * | 9077 | * |
8942 | * 'doms_new' is an array of cpumask's of length 'ndoms_new'. | 9078 | * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. |
8943 | * The masks don't intersect (don't overlap.) We should setup one | 9079 | * The masks don't intersect (don't overlap.) We should setup one |
8944 | * sched domain for each mask. CPUs not in any of the cpumasks will | 9080 | * sched domain for each mask. CPUs not in any of the cpumasks will |
8945 | * not be load balanced. If the same cpumask appears both in the | 9081 | * not be load balanced. If the same cpumask appears both in the |
8946 | * current 'doms_cur' domains and in the new 'doms_new', we can leave | 9082 | * current 'doms_cur' domains and in the new 'doms_new', we can leave |
8947 | * it as it is. | 9083 | * it as it is. |
8948 | * | 9084 | * |
8949 | * The passed in 'doms_new' should be kmalloc'd. This routine takes | 9085 | * The passed in 'doms_new' should be allocated using |
8950 | * ownership of it and will kfree it when done with it. If the caller | 9086 | * alloc_sched_domains. This routine takes ownership of it and will |
8951 | * failed the kmalloc call, then it can pass in doms_new == NULL && | 9087 | * free_sched_domains it when done with it. If the caller failed the |
8952 | * ndoms_new == 1, and partition_sched_domains() will fallback to | 9088 | * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, |
8953 | * the single partition 'fallback_doms', it also forces the domains | 9089 | * and partition_sched_domains() will fallback to the single partition |
8954 | * to be rebuilt. | 9090 | * 'fallback_doms', it also forces the domains to be rebuilt. |
8955 | * | 9091 | * |
8956 | * If doms_new == NULL it will be replaced with cpu_online_mask. | 9092 | * If doms_new == NULL it will be replaced with cpu_online_mask. |
8957 | * ndoms_new == 0 is a special case for destroying existing domains, | 9093 | * ndoms_new == 0 is a special case for destroying existing domains, |
@@ -8959,8 +9095,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
8959 | * | 9095 | * |
8960 | * Call with hotplug lock held | 9096 | * Call with hotplug lock held |
8961 | */ | 9097 | */ |
8962 | /* FIXME: Change to struct cpumask *doms_new[] */ | 9098 | void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
8963 | void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
8964 | struct sched_domain_attr *dattr_new) | 9099 | struct sched_domain_attr *dattr_new) |
8965 | { | 9100 | { |
8966 | int i, j, n; | 9101 | int i, j, n; |
@@ -8979,40 +9114,40 @@ void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | |||
8979 | /* Destroy deleted domains */ | 9114 | /* Destroy deleted domains */ |
8980 | for (i = 0; i < ndoms_cur; i++) { | 9115 | for (i = 0; i < ndoms_cur; i++) { |
8981 | for (j = 0; j < n && !new_topology; j++) { | 9116 | for (j = 0; j < n && !new_topology; j++) { |
8982 | if (cpumask_equal(&doms_cur[i], &doms_new[j]) | 9117 | if (cpumask_equal(doms_cur[i], doms_new[j]) |
8983 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 9118 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
8984 | goto match1; | 9119 | goto match1; |
8985 | } | 9120 | } |
8986 | /* no match - a current sched domain not in new doms_new[] */ | 9121 | /* no match - a current sched domain not in new doms_new[] */ |
8987 | detach_destroy_domains(doms_cur + i); | 9122 | detach_destroy_domains(doms_cur[i]); |
8988 | match1: | 9123 | match1: |
8989 | ; | 9124 | ; |
8990 | } | 9125 | } |
8991 | 9126 | ||
8992 | if (doms_new == NULL) { | 9127 | if (doms_new == NULL) { |
8993 | ndoms_cur = 0; | 9128 | ndoms_cur = 0; |
8994 | doms_new = fallback_doms; | 9129 | doms_new = &fallback_doms; |
8995 | cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); | 9130 | cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); |
8996 | WARN_ON_ONCE(dattr_new); | 9131 | WARN_ON_ONCE(dattr_new); |
8997 | } | 9132 | } |
8998 | 9133 | ||
8999 | /* Build new domains */ | 9134 | /* Build new domains */ |
9000 | for (i = 0; i < ndoms_new; i++) { | 9135 | for (i = 0; i < ndoms_new; i++) { |
9001 | for (j = 0; j < ndoms_cur && !new_topology; j++) { | 9136 | for (j = 0; j < ndoms_cur && !new_topology; j++) { |
9002 | if (cpumask_equal(&doms_new[i], &doms_cur[j]) | 9137 | if (cpumask_equal(doms_new[i], doms_cur[j]) |
9003 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | 9138 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
9004 | goto match2; | 9139 | goto match2; |
9005 | } | 9140 | } |
9006 | /* no match - add a new doms_new */ | 9141 | /* no match - add a new doms_new */ |
9007 | __build_sched_domains(doms_new + i, | 9142 | __build_sched_domains(doms_new[i], |
9008 | dattr_new ? dattr_new + i : NULL); | 9143 | dattr_new ? dattr_new + i : NULL); |
9009 | match2: | 9144 | match2: |
9010 | ; | 9145 | ; |
9011 | } | 9146 | } |
9012 | 9147 | ||
9013 | /* Remember the new sched domains */ | 9148 | /* Remember the new sched domains */ |
9014 | if (doms_cur != fallback_doms) | 9149 | if (doms_cur != &fallback_doms) |
9015 | kfree(doms_cur); | 9150 | free_sched_domains(doms_cur, ndoms_cur); |
9016 | kfree(dattr_cur); /* kfree(NULL) is safe */ | 9151 | kfree(dattr_cur); /* kfree(NULL) is safe */ |
9017 | doms_cur = doms_new; | 9152 | doms_cur = doms_new; |
9018 | dattr_cur = dattr_new; | 9153 | dattr_cur = dattr_new; |
@@ -9123,8 +9258,10 @@ static int update_sched_domains(struct notifier_block *nfb, | |||
9123 | switch (action) { | 9258 | switch (action) { |
9124 | case CPU_ONLINE: | 9259 | case CPU_ONLINE: |
9125 | case CPU_ONLINE_FROZEN: | 9260 | case CPU_ONLINE_FROZEN: |
9126 | case CPU_DEAD: | 9261 | case CPU_DOWN_PREPARE: |
9127 | case CPU_DEAD_FROZEN: | 9262 | case CPU_DOWN_PREPARE_FROZEN: |
9263 | case CPU_DOWN_FAILED: | ||
9264 | case CPU_DOWN_FAILED_FROZEN: | ||
9128 | partition_sched_domains(1, NULL, NULL); | 9265 | partition_sched_domains(1, NULL, NULL); |
9129 | return NOTIFY_OK; | 9266 | return NOTIFY_OK; |
9130 | 9267 | ||
@@ -9171,7 +9308,7 @@ void __init sched_init_smp(void) | |||
9171 | #endif | 9308 | #endif |
9172 | get_online_cpus(); | 9309 | get_online_cpus(); |
9173 | mutex_lock(&sched_domains_mutex); | 9310 | mutex_lock(&sched_domains_mutex); |
9174 | arch_init_sched_domains(cpu_online_mask); | 9311 | arch_init_sched_domains(cpu_active_mask); |
9175 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); | 9312 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
9176 | if (cpumask_empty(non_isolated_cpus)) | 9313 | if (cpumask_empty(non_isolated_cpus)) |
9177 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); | 9314 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
@@ -9244,13 +9381,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | |||
9244 | #ifdef CONFIG_SMP | 9381 | #ifdef CONFIG_SMP |
9245 | rt_rq->rt_nr_migratory = 0; | 9382 | rt_rq->rt_nr_migratory = 0; |
9246 | rt_rq->overloaded = 0; | 9383 | rt_rq->overloaded = 0; |
9247 | plist_head_init(&rt_rq->pushable_tasks, &rq->lock); | 9384 | plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock); |
9248 | #endif | 9385 | #endif |
9249 | 9386 | ||
9250 | rt_rq->rt_time = 0; | 9387 | rt_rq->rt_time = 0; |
9251 | rt_rq->rt_throttled = 0; | 9388 | rt_rq->rt_throttled = 0; |
9252 | rt_rq->rt_runtime = 0; | 9389 | rt_rq->rt_runtime = 0; |
9253 | spin_lock_init(&rt_rq->rt_runtime_lock); | 9390 | raw_spin_lock_init(&rt_rq->rt_runtime_lock); |
9254 | 9391 | ||
9255 | #ifdef CONFIG_RT_GROUP_SCHED | 9392 | #ifdef CONFIG_RT_GROUP_SCHED |
9256 | rt_rq->rt_nr_boosted = 0; | 9393 | rt_rq->rt_nr_boosted = 0; |
@@ -9334,10 +9471,6 @@ void __init sched_init(void) | |||
9334 | #ifdef CONFIG_CPUMASK_OFFSTACK | 9471 | #ifdef CONFIG_CPUMASK_OFFSTACK |
9335 | alloc_size += num_possible_cpus() * cpumask_size(); | 9472 | alloc_size += num_possible_cpus() * cpumask_size(); |
9336 | #endif | 9473 | #endif |
9337 | /* | ||
9338 | * As sched_init() is called before page_alloc is setup, | ||
9339 | * we use alloc_bootmem(). | ||
9340 | */ | ||
9341 | if (alloc_size) { | 9474 | if (alloc_size) { |
9342 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); | 9475 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); |
9343 | 9476 | ||
@@ -9406,11 +9539,15 @@ void __init sched_init(void) | |||
9406 | #endif /* CONFIG_USER_SCHED */ | 9539 | #endif /* CONFIG_USER_SCHED */ |
9407 | #endif /* CONFIG_GROUP_SCHED */ | 9540 | #endif /* CONFIG_GROUP_SCHED */ |
9408 | 9541 | ||
9542 | #if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP | ||
9543 | update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long), | ||
9544 | __alignof__(unsigned long)); | ||
9545 | #endif | ||
9409 | for_each_possible_cpu(i) { | 9546 | for_each_possible_cpu(i) { |
9410 | struct rq *rq; | 9547 | struct rq *rq; |
9411 | 9548 | ||
9412 | rq = cpu_rq(i); | 9549 | rq = cpu_rq(i); |
9413 | spin_lock_init(&rq->lock); | 9550 | raw_spin_lock_init(&rq->lock); |
9414 | rq->nr_running = 0; | 9551 | rq->nr_running = 0; |
9415 | rq->calc_load_active = 0; | 9552 | rq->calc_load_active = 0; |
9416 | rq->calc_load_update = jiffies + LOAD_FREQ; | 9553 | rq->calc_load_update = jiffies + LOAD_FREQ; |
@@ -9488,6 +9625,8 @@ void __init sched_init(void) | |||
9488 | rq->cpu = i; | 9625 | rq->cpu = i; |
9489 | rq->online = 0; | 9626 | rq->online = 0; |
9490 | rq->migration_thread = NULL; | 9627 | rq->migration_thread = NULL; |
9628 | rq->idle_stamp = 0; | ||
9629 | rq->avg_idle = 2*sysctl_sched_migration_cost; | ||
9491 | INIT_LIST_HEAD(&rq->migration_queue); | 9630 | INIT_LIST_HEAD(&rq->migration_queue); |
9492 | rq_attach_root(rq, &def_root_domain); | 9631 | rq_attach_root(rq, &def_root_domain); |
9493 | #endif | 9632 | #endif |
@@ -9506,7 +9645,7 @@ void __init sched_init(void) | |||
9506 | #endif | 9645 | #endif |
9507 | 9646 | ||
9508 | #ifdef CONFIG_RT_MUTEXES | 9647 | #ifdef CONFIG_RT_MUTEXES |
9509 | plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); | 9648 | plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock); |
9510 | #endif | 9649 | #endif |
9511 | 9650 | ||
9512 | /* | 9651 | /* |
@@ -9531,13 +9670,15 @@ void __init sched_init(void) | |||
9531 | current->sched_class = &fair_sched_class; | 9670 | current->sched_class = &fair_sched_class; |
9532 | 9671 | ||
9533 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | 9672 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ |
9534 | alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); | 9673 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); |
9535 | #ifdef CONFIG_SMP | 9674 | #ifdef CONFIG_SMP |
9536 | #ifdef CONFIG_NO_HZ | 9675 | #ifdef CONFIG_NO_HZ |
9537 | alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); | 9676 | zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); |
9538 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); | 9677 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); |
9539 | #endif | 9678 | #endif |
9540 | alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); | 9679 | /* May be allocated at isolcpus cmdline parse time */ |
9680 | if (cpu_isolated_map == NULL) | ||
9681 | zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); | ||
9541 | #endif /* SMP */ | 9682 | #endif /* SMP */ |
9542 | 9683 | ||
9543 | perf_event_init(); | 9684 | perf_event_init(); |
@@ -9548,7 +9689,7 @@ void __init sched_init(void) | |||
9548 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP | 9689 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP |
9549 | static inline int preempt_count_equals(int preempt_offset) | 9690 | static inline int preempt_count_equals(int preempt_offset) |
9550 | { | 9691 | { |
9551 | int nested = preempt_count() & ~PREEMPT_ACTIVE; | 9692 | int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); |
9552 | 9693 | ||
9553 | return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); | 9694 | return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); |
9554 | } | 9695 | } |
@@ -9629,13 +9770,13 @@ void normalize_rt_tasks(void) | |||
9629 | continue; | 9770 | continue; |
9630 | } | 9771 | } |
9631 | 9772 | ||
9632 | spin_lock(&p->pi_lock); | 9773 | raw_spin_lock(&p->pi_lock); |
9633 | rq = __task_rq_lock(p); | 9774 | rq = __task_rq_lock(p); |
9634 | 9775 | ||
9635 | normalize_task(rq, p); | 9776 | normalize_task(rq, p); |
9636 | 9777 | ||
9637 | __task_rq_unlock(rq); | 9778 | __task_rq_unlock(rq); |
9638 | spin_unlock(&p->pi_lock); | 9779 | raw_spin_unlock(&p->pi_lock); |
9639 | } while_each_thread(g, p); | 9780 | } while_each_thread(g, p); |
9640 | 9781 | ||
9641 | read_unlock_irqrestore(&tasklist_lock, flags); | 9782 | read_unlock_irqrestore(&tasklist_lock, flags); |
@@ -9731,13 +9872,15 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
9731 | se = kzalloc_node(sizeof(struct sched_entity), | 9872 | se = kzalloc_node(sizeof(struct sched_entity), |
9732 | GFP_KERNEL, cpu_to_node(i)); | 9873 | GFP_KERNEL, cpu_to_node(i)); |
9733 | if (!se) | 9874 | if (!se) |
9734 | goto err; | 9875 | goto err_free_rq; |
9735 | 9876 | ||
9736 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); | 9877 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); |
9737 | } | 9878 | } |
9738 | 9879 | ||
9739 | return 1; | 9880 | return 1; |
9740 | 9881 | ||
9882 | err_free_rq: | ||
9883 | kfree(cfs_rq); | ||
9741 | err: | 9884 | err: |
9742 | return 0; | 9885 | return 0; |
9743 | } | 9886 | } |
@@ -9819,13 +9962,15 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
9819 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), | 9962 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), |
9820 | GFP_KERNEL, cpu_to_node(i)); | 9963 | GFP_KERNEL, cpu_to_node(i)); |
9821 | if (!rt_se) | 9964 | if (!rt_se) |
9822 | goto err; | 9965 | goto err_free_rq; |
9823 | 9966 | ||
9824 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); | 9967 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); |
9825 | } | 9968 | } |
9826 | 9969 | ||
9827 | return 1; | 9970 | return 1; |
9828 | 9971 | ||
9972 | err_free_rq: | ||
9973 | kfree(rt_rq); | ||
9829 | err: | 9974 | err: |
9830 | return 0; | 9975 | return 0; |
9831 | } | 9976 | } |
@@ -9959,7 +10104,7 @@ void sched_move_task(struct task_struct *tsk) | |||
9959 | 10104 | ||
9960 | #ifdef CONFIG_FAIR_GROUP_SCHED | 10105 | #ifdef CONFIG_FAIR_GROUP_SCHED |
9961 | if (tsk->sched_class->moved_group) | 10106 | if (tsk->sched_class->moved_group) |
9962 | tsk->sched_class->moved_group(tsk); | 10107 | tsk->sched_class->moved_group(tsk, on_rq); |
9963 | #endif | 10108 | #endif |
9964 | 10109 | ||
9965 | if (unlikely(running)) | 10110 | if (unlikely(running)) |
@@ -9994,9 +10139,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares) | |||
9994 | struct rq *rq = cfs_rq->rq; | 10139 | struct rq *rq = cfs_rq->rq; |
9995 | unsigned long flags; | 10140 | unsigned long flags; |
9996 | 10141 | ||
9997 | spin_lock_irqsave(&rq->lock, flags); | 10142 | raw_spin_lock_irqsave(&rq->lock, flags); |
9998 | __set_se_shares(se, shares); | 10143 | __set_se_shares(se, shares); |
9999 | spin_unlock_irqrestore(&rq->lock, flags); | 10144 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
10000 | } | 10145 | } |
10001 | 10146 | ||
10002 | static DEFINE_MUTEX(shares_mutex); | 10147 | static DEFINE_MUTEX(shares_mutex); |
@@ -10181,18 +10326,18 @@ static int tg_set_bandwidth(struct task_group *tg, | |||
10181 | if (err) | 10326 | if (err) |
10182 | goto unlock; | 10327 | goto unlock; |
10183 | 10328 | ||
10184 | spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); | 10329 | raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
10185 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); | 10330 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); |
10186 | tg->rt_bandwidth.rt_runtime = rt_runtime; | 10331 | tg->rt_bandwidth.rt_runtime = rt_runtime; |
10187 | 10332 | ||
10188 | for_each_possible_cpu(i) { | 10333 | for_each_possible_cpu(i) { |
10189 | struct rt_rq *rt_rq = tg->rt_rq[i]; | 10334 | struct rt_rq *rt_rq = tg->rt_rq[i]; |
10190 | 10335 | ||
10191 | spin_lock(&rt_rq->rt_runtime_lock); | 10336 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
10192 | rt_rq->rt_runtime = rt_runtime; | 10337 | rt_rq->rt_runtime = rt_runtime; |
10193 | spin_unlock(&rt_rq->rt_runtime_lock); | 10338 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
10194 | } | 10339 | } |
10195 | spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); | 10340 | raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
10196 | unlock: | 10341 | unlock: |
10197 | read_unlock(&tasklist_lock); | 10342 | read_unlock(&tasklist_lock); |
10198 | mutex_unlock(&rt_constraints_mutex); | 10343 | mutex_unlock(&rt_constraints_mutex); |
@@ -10297,15 +10442,15 @@ static int sched_rt_global_constraints(void) | |||
10297 | if (sysctl_sched_rt_runtime == 0) | 10442 | if (sysctl_sched_rt_runtime == 0) |
10298 | return -EBUSY; | 10443 | return -EBUSY; |
10299 | 10444 | ||
10300 | spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); | 10445 | raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); |
10301 | for_each_possible_cpu(i) { | 10446 | for_each_possible_cpu(i) { |
10302 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; | 10447 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; |
10303 | 10448 | ||
10304 | spin_lock(&rt_rq->rt_runtime_lock); | 10449 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
10305 | rt_rq->rt_runtime = global_rt_runtime(); | 10450 | rt_rq->rt_runtime = global_rt_runtime(); |
10306 | spin_unlock(&rt_rq->rt_runtime_lock); | 10451 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
10307 | } | 10452 | } |
10308 | spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); | 10453 | raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); |
10309 | 10454 | ||
10310 | return 0; | 10455 | return 0; |
10311 | } | 10456 | } |
@@ -10596,9 +10741,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) | |||
10596 | /* | 10741 | /* |
10597 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. | 10742 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. |
10598 | */ | 10743 | */ |
10599 | spin_lock_irq(&cpu_rq(cpu)->lock); | 10744 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
10600 | data = *cpuusage; | 10745 | data = *cpuusage; |
10601 | spin_unlock_irq(&cpu_rq(cpu)->lock); | 10746 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
10602 | #else | 10747 | #else |
10603 | data = *cpuusage; | 10748 | data = *cpuusage; |
10604 | #endif | 10749 | #endif |
@@ -10614,9 +10759,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) | |||
10614 | /* | 10759 | /* |
10615 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. | 10760 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. |
10616 | */ | 10761 | */ |
10617 | spin_lock_irq(&cpu_rq(cpu)->lock); | 10762 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
10618 | *cpuusage = val; | 10763 | *cpuusage = val; |
10619 | spin_unlock_irq(&cpu_rq(cpu)->lock); | 10764 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
10620 | #else | 10765 | #else |
10621 | *cpuusage = val; | 10766 | *cpuusage = val; |
10622 | #endif | 10767 | #endif |
@@ -10850,9 +10995,9 @@ void synchronize_sched_expedited(void) | |||
10850 | init_completion(&req->done); | 10995 | init_completion(&req->done); |
10851 | req->task = NULL; | 10996 | req->task = NULL; |
10852 | req->dest_cpu = RCU_MIGRATION_NEED_QS; | 10997 | req->dest_cpu = RCU_MIGRATION_NEED_QS; |
10853 | spin_lock_irqsave(&rq->lock, flags); | 10998 | raw_spin_lock_irqsave(&rq->lock, flags); |
10854 | list_add(&req->list, &rq->migration_queue); | 10999 | list_add(&req->list, &rq->migration_queue); |
10855 | spin_unlock_irqrestore(&rq->lock, flags); | 11000 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
10856 | wake_up_process(rq->migration_thread); | 11001 | wake_up_process(rq->migration_thread); |
10857 | } | 11002 | } |
10858 | for_each_online_cpu(cpu) { | 11003 | for_each_online_cpu(cpu) { |
@@ -10860,13 +11005,14 @@ void synchronize_sched_expedited(void) | |||
10860 | req = &per_cpu(rcu_migration_req, cpu); | 11005 | req = &per_cpu(rcu_migration_req, cpu); |
10861 | rq = cpu_rq(cpu); | 11006 | rq = cpu_rq(cpu); |
10862 | wait_for_completion(&req->done); | 11007 | wait_for_completion(&req->done); |
10863 | spin_lock_irqsave(&rq->lock, flags); | 11008 | raw_spin_lock_irqsave(&rq->lock, flags); |
10864 | if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) | 11009 | if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) |
10865 | need_full_sync = 1; | 11010 | need_full_sync = 1; |
10866 | req->dest_cpu = RCU_MIGRATION_IDLE; | 11011 | req->dest_cpu = RCU_MIGRATION_IDLE; |
10867 | spin_unlock_irqrestore(&rq->lock, flags); | 11012 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
10868 | } | 11013 | } |
10869 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; | 11014 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; |
11015 | synchronize_sched_expedited_count++; | ||
10870 | mutex_unlock(&rcu_sched_expedited_mutex); | 11016 | mutex_unlock(&rcu_sched_expedited_mutex); |
10871 | put_online_cpus(); | 11017 | put_online_cpus(); |
10872 | if (need_full_sync) | 11018 | if (need_full_sync) |