diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 31 |
1 files changed, 23 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 52bbf1c842a8..e72485033c48 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -2266,6 +2266,16 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
| 2266 | if (!sched_feat(SYNC_WAKEUPS)) | 2266 | if (!sched_feat(SYNC_WAKEUPS)) |
| 2267 | sync = 0; | 2267 | sync = 0; |
| 2268 | 2268 | ||
| 2269 | if (!sync) { | ||
| 2270 | if (current->se.avg_overlap < sysctl_sched_migration_cost && | ||
| 2271 | p->se.avg_overlap < sysctl_sched_migration_cost) | ||
| 2272 | sync = 1; | ||
| 2273 | } else { | ||
| 2274 | if (current->se.avg_overlap >= sysctl_sched_migration_cost || | ||
| 2275 | p->se.avg_overlap >= sysctl_sched_migration_cost) | ||
| 2276 | sync = 0; | ||
| 2277 | } | ||
| 2278 | |||
| 2269 | #ifdef CONFIG_SMP | 2279 | #ifdef CONFIG_SMP |
| 2270 | if (sched_feat(LB_WAKEUP_UPDATE)) { | 2280 | if (sched_feat(LB_WAKEUP_UPDATE)) { |
| 2271 | struct sched_domain *sd; | 2281 | struct sched_domain *sd; |
| @@ -3880,19 +3890,24 @@ int select_nohz_load_balancer(int stop_tick) | |||
| 3880 | int cpu = smp_processor_id(); | 3890 | int cpu = smp_processor_id(); |
| 3881 | 3891 | ||
| 3882 | if (stop_tick) { | 3892 | if (stop_tick) { |
| 3883 | cpumask_set_cpu(cpu, nohz.cpu_mask); | ||
| 3884 | cpu_rq(cpu)->in_nohz_recently = 1; | 3893 | cpu_rq(cpu)->in_nohz_recently = 1; |
| 3885 | 3894 | ||
| 3886 | /* | 3895 | if (!cpu_active(cpu)) { |
| 3887 | * If we are going offline and still the leader, give up! | 3896 | if (atomic_read(&nohz.load_balancer) != cpu) |
| 3888 | */ | 3897 | return 0; |
| 3889 | if (!cpu_active(cpu) && | 3898 | |
| 3890 | atomic_read(&nohz.load_balancer) == cpu) { | 3899 | /* |
| 3900 | * If we are going offline and still the leader, | ||
| 3901 | * give up! | ||
| 3902 | */ | ||
| 3891 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | 3903 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) |
| 3892 | BUG(); | 3904 | BUG(); |
| 3905 | |||
| 3893 | return 0; | 3906 | return 0; |
| 3894 | } | 3907 | } |
| 3895 | 3908 | ||
| 3909 | cpumask_set_cpu(cpu, nohz.cpu_mask); | ||
| 3910 | |||
| 3896 | /* time for ilb owner also to sleep */ | 3911 | /* time for ilb owner also to sleep */ |
| 3897 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { | 3912 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
| 3898 | if (atomic_read(&nohz.load_balancer) == cpu) | 3913 | if (atomic_read(&nohz.load_balancer) == cpu) |
| @@ -4687,8 +4702,8 @@ EXPORT_SYMBOL(default_wake_function); | |||
| 4687 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns | 4702 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns |
| 4688 | * zero in this (rare) case, and we handle it by continuing to scan the queue. | 4703 | * zero in this (rare) case, and we handle it by continuing to scan the queue. |
| 4689 | */ | 4704 | */ |
| 4690 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | 4705 | void __wake_up_common(wait_queue_head_t *q, unsigned int mode, |
| 4691 | int nr_exclusive, int sync, void *key) | 4706 | int nr_exclusive, int sync, void *key) |
| 4692 | { | 4707 | { |
| 4693 | wait_queue_t *curr, *next; | 4708 | wait_queue_t *curr, *next; |
| 4694 | 4709 | ||
