diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 67 |
1 files changed, 38 insertions, 29 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 73513f4e19df..6cc1fd5d5072 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -231,13 +231,20 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
231 | 231 | ||
232 | spin_lock(&rt_b->rt_runtime_lock); | 232 | spin_lock(&rt_b->rt_runtime_lock); |
233 | for (;;) { | 233 | for (;;) { |
234 | unsigned long delta; | ||
235 | ktime_t soft, hard; | ||
236 | |||
234 | if (hrtimer_active(&rt_b->rt_period_timer)) | 237 | if (hrtimer_active(&rt_b->rt_period_timer)) |
235 | break; | 238 | break; |
236 | 239 | ||
237 | now = hrtimer_cb_get_time(&rt_b->rt_period_timer); | 240 | now = hrtimer_cb_get_time(&rt_b->rt_period_timer); |
238 | hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); | 241 | hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); |
239 | hrtimer_start_expires(&rt_b->rt_period_timer, | 242 | |
240 | HRTIMER_MODE_ABS); | 243 | soft = hrtimer_get_softexpires(&rt_b->rt_period_timer); |
244 | hard = hrtimer_get_expires(&rt_b->rt_period_timer); | ||
245 | delta = ktime_to_ns(ktime_sub(hard, soft)); | ||
246 | __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, | ||
247 | HRTIMER_MODE_ABS, 0); | ||
241 | } | 248 | } |
242 | spin_unlock(&rt_b->rt_runtime_lock); | 249 | spin_unlock(&rt_b->rt_runtime_lock); |
243 | } | 250 | } |
@@ -1110,7 +1117,7 @@ static void hrtick_start(struct rq *rq, u64 delay) | |||
1110 | if (rq == this_rq()) { | 1117 | if (rq == this_rq()) { |
1111 | hrtimer_restart(timer); | 1118 | hrtimer_restart(timer); |
1112 | } else if (!rq->hrtick_csd_pending) { | 1119 | } else if (!rq->hrtick_csd_pending) { |
1113 | __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd); | 1120 | __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); |
1114 | rq->hrtick_csd_pending = 1; | 1121 | rq->hrtick_csd_pending = 1; |
1115 | } | 1122 | } |
1116 | } | 1123 | } |
@@ -1146,7 +1153,8 @@ static __init void init_hrtick(void) | |||
1146 | */ | 1153 | */ |
1147 | static void hrtick_start(struct rq *rq, u64 delay) | 1154 | static void hrtick_start(struct rq *rq, u64 delay) |
1148 | { | 1155 | { |
1149 | hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL); | 1156 | __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, |
1157 | HRTIMER_MODE_REL, 0); | ||
1150 | } | 1158 | } |
1151 | 1159 | ||
1152 | static inline void init_hrtick(void) | 1160 | static inline void init_hrtick(void) |
@@ -3818,19 +3826,23 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | |||
3818 | */ | 3826 | */ |
3819 | #define MAX_PINNED_INTERVAL 512 | 3827 | #define MAX_PINNED_INTERVAL 512 |
3820 | 3828 | ||
3829 | /* Working cpumask for load_balance and load_balance_newidle. */ | ||
3830 | static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask); | ||
3831 | |||
3821 | /* | 3832 | /* |
3822 | * Check this_cpu to ensure it is balanced within domain. Attempt to move | 3833 | * Check this_cpu to ensure it is balanced within domain. Attempt to move |
3823 | * tasks if there is an imbalance. | 3834 | * tasks if there is an imbalance. |
3824 | */ | 3835 | */ |
3825 | static int load_balance(int this_cpu, struct rq *this_rq, | 3836 | static int load_balance(int this_cpu, struct rq *this_rq, |
3826 | struct sched_domain *sd, enum cpu_idle_type idle, | 3837 | struct sched_domain *sd, enum cpu_idle_type idle, |
3827 | int *balance, struct cpumask *cpus) | 3838 | int *balance) |
3828 | { | 3839 | { |
3829 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; | 3840 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; |
3830 | struct sched_group *group; | 3841 | struct sched_group *group; |
3831 | unsigned long imbalance; | 3842 | unsigned long imbalance; |
3832 | struct rq *busiest; | 3843 | struct rq *busiest; |
3833 | unsigned long flags; | 3844 | unsigned long flags; |
3845 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | ||
3834 | 3846 | ||
3835 | cpumask_setall(cpus); | 3847 | cpumask_setall(cpus); |
3836 | 3848 | ||
@@ -3985,8 +3997,7 @@ out: | |||
3985 | * this_rq is locked. | 3997 | * this_rq is locked. |
3986 | */ | 3998 | */ |
3987 | static int | 3999 | static int |
3988 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | 4000 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) |
3989 | struct cpumask *cpus) | ||
3990 | { | 4001 | { |
3991 | struct sched_group *group; | 4002 | struct sched_group *group; |
3992 | struct rq *busiest = NULL; | 4003 | struct rq *busiest = NULL; |
@@ -3994,6 +4005,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | |||
3994 | int ld_moved = 0; | 4005 | int ld_moved = 0; |
3995 | int sd_idle = 0; | 4006 | int sd_idle = 0; |
3996 | int all_pinned = 0; | 4007 | int all_pinned = 0; |
4008 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | ||
3997 | 4009 | ||
3998 | cpumask_setall(cpus); | 4010 | cpumask_setall(cpus); |
3999 | 4011 | ||
@@ -4134,10 +4146,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
4134 | struct sched_domain *sd; | 4146 | struct sched_domain *sd; |
4135 | int pulled_task = 0; | 4147 | int pulled_task = 0; |
4136 | unsigned long next_balance = jiffies + HZ; | 4148 | unsigned long next_balance = jiffies + HZ; |
4137 | cpumask_var_t tmpmask; | ||
4138 | |||
4139 | if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC)) | ||
4140 | return; | ||
4141 | 4149 | ||
4142 | for_each_domain(this_cpu, sd) { | 4150 | for_each_domain(this_cpu, sd) { |
4143 | unsigned long interval; | 4151 | unsigned long interval; |
@@ -4148,7 +4156,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
4148 | if (sd->flags & SD_BALANCE_NEWIDLE) | 4156 | if (sd->flags & SD_BALANCE_NEWIDLE) |
4149 | /* If we've pulled tasks over stop searching: */ | 4157 | /* If we've pulled tasks over stop searching: */ |
4150 | pulled_task = load_balance_newidle(this_cpu, this_rq, | 4158 | pulled_task = load_balance_newidle(this_cpu, this_rq, |
4151 | sd, tmpmask); | 4159 | sd); |
4152 | 4160 | ||
4153 | interval = msecs_to_jiffies(sd->balance_interval); | 4161 | interval = msecs_to_jiffies(sd->balance_interval); |
4154 | if (time_after(next_balance, sd->last_balance + interval)) | 4162 | if (time_after(next_balance, sd->last_balance + interval)) |
@@ -4163,7 +4171,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
4163 | */ | 4171 | */ |
4164 | this_rq->next_balance = next_balance; | 4172 | this_rq->next_balance = next_balance; |
4165 | } | 4173 | } |
4166 | free_cpumask_var(tmpmask); | ||
4167 | } | 4174 | } |
4168 | 4175 | ||
4169 | /* | 4176 | /* |
@@ -4313,11 +4320,6 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
4313 | unsigned long next_balance = jiffies + 60*HZ; | 4320 | unsigned long next_balance = jiffies + 60*HZ; |
4314 | int update_next_balance = 0; | 4321 | int update_next_balance = 0; |
4315 | int need_serialize; | 4322 | int need_serialize; |
4316 | cpumask_var_t tmp; | ||
4317 | |||
4318 | /* Fails alloc? Rebalancing probably not a priority right now. */ | ||
4319 | if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) | ||
4320 | return; | ||
4321 | 4323 | ||
4322 | for_each_domain(cpu, sd) { | 4324 | for_each_domain(cpu, sd) { |
4323 | if (!(sd->flags & SD_LOAD_BALANCE)) | 4325 | if (!(sd->flags & SD_LOAD_BALANCE)) |
@@ -4342,7 +4344,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
4342 | } | 4344 | } |
4343 | 4345 | ||
4344 | if (time_after_eq(jiffies, sd->last_balance + interval)) { | 4346 | if (time_after_eq(jiffies, sd->last_balance + interval)) { |
4345 | if (load_balance(cpu, rq, sd, idle, &balance, tmp)) { | 4347 | if (load_balance(cpu, rq, sd, idle, &balance)) { |
4346 | /* | 4348 | /* |
4347 | * We've pulled tasks over so either we're no | 4349 | * We've pulled tasks over so either we're no |
4348 | * longer idle, or one of our SMT siblings is | 4350 | * longer idle, or one of our SMT siblings is |
@@ -4376,8 +4378,6 @@ out: | |||
4376 | */ | 4378 | */ |
4377 | if (likely(update_next_balance)) | 4379 | if (likely(update_next_balance)) |
4378 | rq->next_balance = next_balance; | 4380 | rq->next_balance = next_balance; |
4379 | |||
4380 | free_cpumask_var(tmp); | ||
4381 | } | 4381 | } |
4382 | 4382 | ||
4383 | /* | 4383 | /* |
@@ -4781,10 +4781,7 @@ void scheduler_tick(void) | |||
4781 | #endif | 4781 | #endif |
4782 | } | 4782 | } |
4783 | 4783 | ||
4784 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ | 4784 | unsigned long get_parent_ip(unsigned long addr) |
4785 | defined(CONFIG_PREEMPT_TRACER)) | ||
4786 | |||
4787 | static inline unsigned long get_parent_ip(unsigned long addr) | ||
4788 | { | 4785 | { |
4789 | if (in_lock_functions(addr)) { | 4786 | if (in_lock_functions(addr)) { |
4790 | addr = CALLER_ADDR2; | 4787 | addr = CALLER_ADDR2; |
@@ -4794,6 +4791,9 @@ static inline unsigned long get_parent_ip(unsigned long addr) | |||
4794 | return addr; | 4791 | return addr; |
4795 | } | 4792 | } |
4796 | 4793 | ||
4794 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ | ||
4795 | defined(CONFIG_PREEMPT_TRACER)) | ||
4796 | |||
4797 | void __kprobes add_preempt_count(int val) | 4797 | void __kprobes add_preempt_count(int val) |
4798 | { | 4798 | { |
4799 | #ifdef CONFIG_DEBUG_PREEMPT | 4799 | #ifdef CONFIG_DEBUG_PREEMPT |
@@ -7728,7 +7728,7 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map, | |||
7728 | { | 7728 | { |
7729 | int group; | 7729 | int group; |
7730 | 7730 | ||
7731 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); | 7731 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); |
7732 | group = cpumask_first(mask); | 7732 | group = cpumask_first(mask); |
7733 | if (sg) | 7733 | if (sg) |
7734 | *sg = &per_cpu(sched_group_core, group).sg; | 7734 | *sg = &per_cpu(sched_group_core, group).sg; |
@@ -7757,7 +7757,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, | |||
7757 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); | 7757 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); |
7758 | group = cpumask_first(mask); | 7758 | group = cpumask_first(mask); |
7759 | #elif defined(CONFIG_SCHED_SMT) | 7759 | #elif defined(CONFIG_SCHED_SMT) |
7760 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); | 7760 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); |
7761 | group = cpumask_first(mask); | 7761 | group = cpumask_first(mask); |
7762 | #else | 7762 | #else |
7763 | group = cpu; | 7763 | group = cpu; |
@@ -8100,7 +8100,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
8100 | SD_INIT(sd, SIBLING); | 8100 | SD_INIT(sd, SIBLING); |
8101 | set_domain_attribute(sd, attr); | 8101 | set_domain_attribute(sd, attr); |
8102 | cpumask_and(sched_domain_span(sd), | 8102 | cpumask_and(sched_domain_span(sd), |
8103 | &per_cpu(cpu_sibling_map, i), cpu_map); | 8103 | topology_thread_cpumask(i), cpu_map); |
8104 | sd->parent = p; | 8104 | sd->parent = p; |
8105 | p->child = sd; | 8105 | p->child = sd; |
8106 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); | 8106 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); |
@@ -8111,7 +8111,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
8111 | /* Set up CPU (sibling) groups */ | 8111 | /* Set up CPU (sibling) groups */ |
8112 | for_each_cpu(i, cpu_map) { | 8112 | for_each_cpu(i, cpu_map) { |
8113 | cpumask_and(this_sibling_map, | 8113 | cpumask_and(this_sibling_map, |
8114 | &per_cpu(cpu_sibling_map, i), cpu_map); | 8114 | topology_thread_cpumask(i), cpu_map); |
8115 | if (i != cpumask_first(this_sibling_map)) | 8115 | if (i != cpumask_first(this_sibling_map)) |
8116 | continue; | 8116 | continue; |
8117 | 8117 | ||
@@ -8787,6 +8787,9 @@ void __init sched_init(void) | |||
8787 | #ifdef CONFIG_USER_SCHED | 8787 | #ifdef CONFIG_USER_SCHED |
8788 | alloc_size *= 2; | 8788 | alloc_size *= 2; |
8789 | #endif | 8789 | #endif |
8790 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
8791 | alloc_size += num_possible_cpus() * cpumask_size(); | ||
8792 | #endif | ||
8790 | /* | 8793 | /* |
8791 | * As sched_init() is called before page_alloc is setup, | 8794 | * As sched_init() is called before page_alloc is setup, |
8792 | * we use alloc_bootmem(). | 8795 | * we use alloc_bootmem(). |
@@ -8824,6 +8827,12 @@ void __init sched_init(void) | |||
8824 | ptr += nr_cpu_ids * sizeof(void **); | 8827 | ptr += nr_cpu_ids * sizeof(void **); |
8825 | #endif /* CONFIG_USER_SCHED */ | 8828 | #endif /* CONFIG_USER_SCHED */ |
8826 | #endif /* CONFIG_RT_GROUP_SCHED */ | 8829 | #endif /* CONFIG_RT_GROUP_SCHED */ |
8830 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
8831 | for_each_possible_cpu(i) { | ||
8832 | per_cpu(load_balance_tmpmask, i) = (void *)ptr; | ||
8833 | ptr += cpumask_size(); | ||
8834 | } | ||
8835 | #endif /* CONFIG_CPUMASK_OFFSTACK */ | ||
8827 | } | 8836 | } |
8828 | 8837 | ||
8829 | #ifdef CONFIG_SMP | 8838 | #ifdef CONFIG_SMP |