diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-09-25 13:03:56 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-09-25 13:03:56 -0400 |
commit | 593d1006cdf710ab3469c0c37c184fea0bc3da97 (patch) | |
tree | e4db58440018a52089e8d6b39160f753ab10df99 /kernel/sched | |
parent | 5217192b85480353aeeb395574e60d0db04f3676 (diff) | |
parent | 9b20aa63b8fc9a6a3b6831f4eae3621755e51211 (diff) |
Merge remote-tracking branch 'tip/core/rcu' into next.2012.09.25b
Resolved conflict in kernel/sched/core.c using Peter Zijlstra's
approach from https://lkml.org/lkml/2012/9/5/585.
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 52 | ||||
-rw-r--r-- | kernel/sched/fair.c | 37 | ||||
-rw-r--r-- | kernel/sched/rt.c | 1 | ||||
-rw-r--r-- | kernel/sched/sched.h | 1 |
4 files changed, 29 insertions, 62 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8c38b5e7ce47..1a48cdbc8631 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -5342,9 +5342,6 @@ static void migrate_tasks(unsigned int dead_cpu) | |||
5342 | */ | 5342 | */ |
5343 | rq->stop = NULL; | 5343 | rq->stop = NULL; |
5344 | 5344 | ||
5345 | /* Ensure any throttled groups are reachable by pick_next_task */ | ||
5346 | unthrottle_offline_cfs_rqs(rq); | ||
5347 | |||
5348 | for ( ; ; ) { | 5345 | for ( ; ; ) { |
5349 | /* | 5346 | /* |
5350 | * There's this thread running, bail when that's the only | 5347 | * There's this thread running, bail when that's the only |
@@ -5610,15 +5607,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5610 | break; | 5607 | break; |
5611 | 5608 | ||
5612 | case CPU_DEAD: | 5609 | case CPU_DEAD: |
5613 | { | 5610 | calc_load_migrate(rq); |
5614 | struct rq *dest_rq; | ||
5615 | |||
5616 | local_irq_save(flags); | ||
5617 | dest_rq = cpu_rq(smp_processor_id()); | ||
5618 | raw_spin_lock(&dest_rq->lock); | ||
5619 | calc_load_migrate(rq); | ||
5620 | raw_spin_unlock_irqrestore(&dest_rq->lock, flags); | ||
5621 | } | ||
5622 | break; | 5611 | break; |
5623 | #endif | 5612 | #endif |
5624 | } | 5613 | } |
@@ -6027,11 +6016,6 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu) | |||
6027 | * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this | 6016 | * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this |
6028 | * allows us to avoid some pointer chasing select_idle_sibling(). | 6017 | * allows us to avoid some pointer chasing select_idle_sibling(). |
6029 | * | 6018 | * |
6030 | * Iterate domains and sched_groups downward, assigning CPUs to be | ||
6031 | * select_idle_sibling() hw buddy. Cross-wiring hw makes bouncing | ||
6032 | * due to random perturbation self canceling, ie sw buddies pull | ||
6033 | * their counterpart to their CPU's hw counterpart. | ||
6034 | * | ||
6035 | * Also keep a unique ID per domain (we use the first cpu number in | 6019 | * Also keep a unique ID per domain (we use the first cpu number in |
6036 | * the cpumask of the domain), this allows us to quickly tell if | 6020 | * the cpumask of the domain), this allows us to quickly tell if |
6037 | * two cpus are in the same cache domain, see cpus_share_cache(). | 6021 | * two cpus are in the same cache domain, see cpus_share_cache(). |
@@ -6045,40 +6029,8 @@ static void update_top_cache_domain(int cpu) | |||
6045 | int id = cpu; | 6029 | int id = cpu; |
6046 | 6030 | ||
6047 | sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); | 6031 | sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); |
6048 | if (sd) { | 6032 | if (sd) |
6049 | struct sched_domain *tmp = sd; | ||
6050 | struct sched_group *sg, *prev; | ||
6051 | bool right; | ||
6052 | |||
6053 | /* | ||
6054 | * Traverse to first CPU in group, and count hops | ||
6055 | * to cpu from there, switching direction on each | ||
6056 | * hop, never ever pointing the last CPU rightward. | ||
6057 | */ | ||
6058 | do { | ||
6059 | id = cpumask_first(sched_domain_span(tmp)); | ||
6060 | prev = sg = tmp->groups; | ||
6061 | right = 1; | ||
6062 | |||
6063 | while (cpumask_first(sched_group_cpus(sg)) != id) | ||
6064 | sg = sg->next; | ||
6065 | |||
6066 | while (!cpumask_test_cpu(cpu, sched_group_cpus(sg))) { | ||
6067 | prev = sg; | ||
6068 | sg = sg->next; | ||
6069 | right = !right; | ||
6070 | } | ||
6071 | |||
6072 | /* A CPU went down, never point back to domain start. */ | ||
6073 | if (right && cpumask_first(sched_group_cpus(sg->next)) == id) | ||
6074 | right = false; | ||
6075 | |||
6076 | sg = right ? sg->next : prev; | ||
6077 | tmp->idle_buddy = cpumask_first(sched_group_cpus(sg)); | ||
6078 | } while ((tmp = tmp->child)); | ||
6079 | |||
6080 | id = cpumask_first(sched_domain_span(sd)); | 6033 | id = cpumask_first(sched_domain_span(sd)); |
6081 | } | ||
6082 | 6034 | ||
6083 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); | 6035 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); |
6084 | per_cpu(sd_llc_id, cpu) = id; | 6036 | per_cpu(sd_llc_id, cpu) = id; |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c219bf8d704c..96e2b18b6283 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -2052,7 +2052,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | |||
2052 | hrtimer_cancel(&cfs_b->slack_timer); | 2052 | hrtimer_cancel(&cfs_b->slack_timer); |
2053 | } | 2053 | } |
2054 | 2054 | ||
2055 | void unthrottle_offline_cfs_rqs(struct rq *rq) | 2055 | static void unthrottle_offline_cfs_rqs(struct rq *rq) |
2056 | { | 2056 | { |
2057 | struct cfs_rq *cfs_rq; | 2057 | struct cfs_rq *cfs_rq; |
2058 | 2058 | ||
@@ -2106,7 +2106,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) | |||
2106 | return NULL; | 2106 | return NULL; |
2107 | } | 2107 | } |
2108 | static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} | 2108 | static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} |
2109 | void unthrottle_offline_cfs_rqs(struct rq *rq) {} | 2109 | static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} |
2110 | 2110 | ||
2111 | #endif /* CONFIG_CFS_BANDWIDTH */ | 2111 | #endif /* CONFIG_CFS_BANDWIDTH */ |
2112 | 2112 | ||
@@ -2637,6 +2637,8 @@ static int select_idle_sibling(struct task_struct *p, int target) | |||
2637 | int cpu = smp_processor_id(); | 2637 | int cpu = smp_processor_id(); |
2638 | int prev_cpu = task_cpu(p); | 2638 | int prev_cpu = task_cpu(p); |
2639 | struct sched_domain *sd; | 2639 | struct sched_domain *sd; |
2640 | struct sched_group *sg; | ||
2641 | int i; | ||
2640 | 2642 | ||
2641 | /* | 2643 | /* |
2642 | * If the task is going to be woken-up on this cpu and if it is | 2644 | * If the task is going to be woken-up on this cpu and if it is |
@@ -2653,17 +2655,29 @@ static int select_idle_sibling(struct task_struct *p, int target) | |||
2653 | return prev_cpu; | 2655 | return prev_cpu; |
2654 | 2656 | ||
2655 | /* | 2657 | /* |
2656 | * Otherwise, check assigned siblings to find an elegible idle cpu. | 2658 | * Otherwise, iterate the domains and find an elegible idle cpu. |
2657 | */ | 2659 | */ |
2658 | sd = rcu_dereference(per_cpu(sd_llc, target)); | 2660 | sd = rcu_dereference(per_cpu(sd_llc, target)); |
2659 | |||
2660 | for_each_lower_domain(sd) { | 2661 | for_each_lower_domain(sd) { |
2661 | if (!cpumask_test_cpu(sd->idle_buddy, tsk_cpus_allowed(p))) | 2662 | sg = sd->groups; |
2662 | continue; | 2663 | do { |
2663 | if (idle_cpu(sd->idle_buddy)) | 2664 | if (!cpumask_intersects(sched_group_cpus(sg), |
2664 | return sd->idle_buddy; | 2665 | tsk_cpus_allowed(p))) |
2665 | } | 2666 | goto next; |
2666 | 2667 | ||
2668 | for_each_cpu(i, sched_group_cpus(sg)) { | ||
2669 | if (!idle_cpu(i)) | ||
2670 | goto next; | ||
2671 | } | ||
2672 | |||
2673 | target = cpumask_first_and(sched_group_cpus(sg), | ||
2674 | tsk_cpus_allowed(p)); | ||
2675 | goto done; | ||
2676 | next: | ||
2677 | sg = sg->next; | ||
2678 | } while (sg != sd->groups); | ||
2679 | } | ||
2680 | done: | ||
2667 | return target; | 2681 | return target; |
2668 | } | 2682 | } |
2669 | 2683 | ||
@@ -3658,7 +3672,6 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) | |||
3658 | * @group: sched_group whose statistics are to be updated. | 3672 | * @group: sched_group whose statistics are to be updated. |
3659 | * @load_idx: Load index of sched_domain of this_cpu for load calc. | 3673 | * @load_idx: Load index of sched_domain of this_cpu for load calc. |
3660 | * @local_group: Does group contain this_cpu. | 3674 | * @local_group: Does group contain this_cpu. |
3661 | * @cpus: Set of cpus considered for load balancing. | ||
3662 | * @balance: Should we balance. | 3675 | * @balance: Should we balance. |
3663 | * @sgs: variable to hold the statistics for this group. | 3676 | * @sgs: variable to hold the statistics for this group. |
3664 | */ | 3677 | */ |
@@ -3805,7 +3818,6 @@ static bool update_sd_pick_busiest(struct lb_env *env, | |||
3805 | /** | 3818 | /** |
3806 | * update_sd_lb_stats - Update sched_domain's statistics for load balancing. | 3819 | * update_sd_lb_stats - Update sched_domain's statistics for load balancing. |
3807 | * @env: The load balancing environment. | 3820 | * @env: The load balancing environment. |
3808 | * @cpus: Set of cpus considered for load balancing. | ||
3809 | * @balance: Should we balance. | 3821 | * @balance: Should we balance. |
3810 | * @sds: variable to hold the statistics for this sched_domain. | 3822 | * @sds: variable to hold the statistics for this sched_domain. |
3811 | */ | 3823 | */ |
@@ -4956,6 +4968,9 @@ static void rq_online_fair(struct rq *rq) | |||
4956 | static void rq_offline_fair(struct rq *rq) | 4968 | static void rq_offline_fair(struct rq *rq) |
4957 | { | 4969 | { |
4958 | update_sysctl(); | 4970 | update_sysctl(); |
4971 | |||
4972 | /* Ensure any throttled groups are reachable by pick_next_task */ | ||
4973 | unthrottle_offline_cfs_rqs(rq); | ||
4959 | } | 4974 | } |
4960 | 4975 | ||
4961 | #endif /* CONFIG_SMP */ | 4976 | #endif /* CONFIG_SMP */ |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 944cb68420e9..e0b7ba9c040f 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -691,6 +691,7 @@ balanced: | |||
691 | * runtime - in which case borrowing doesn't make sense. | 691 | * runtime - in which case borrowing doesn't make sense. |
692 | */ | 692 | */ |
693 | rt_rq->rt_runtime = RUNTIME_INF; | 693 | rt_rq->rt_runtime = RUNTIME_INF; |
694 | rt_rq->rt_throttled = 0; | ||
694 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 695 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
695 | raw_spin_unlock(&rt_b->rt_runtime_lock); | 696 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
696 | } | 697 | } |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f6714d009e77..0848fa36c383 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1144,7 +1144,6 @@ extern void print_rt_stats(struct seq_file *m, int cpu); | |||
1144 | 1144 | ||
1145 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); | 1145 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); |
1146 | extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); | 1146 | extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); |
1147 | extern void unthrottle_offline_cfs_rqs(struct rq *rq); | ||
1148 | 1147 | ||
1149 | extern void account_cfs_bandwidth_used(int enabled, int was_enabled); | 1148 | extern void account_cfs_bandwidth_used(int enabled, int was_enabled); |
1150 | 1149 | ||