diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/futex.c | 2 | ||||
| -rw-r--r-- | kernel/perf_event.c | 12 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 14 |
3 files changed, 19 insertions, 9 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index dfb924ffe65b..fe28dc282eae 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -1886,7 +1886,7 @@ retry: | |||
| 1886 | restart->futex.val = val; | 1886 | restart->futex.val = val; |
| 1887 | restart->futex.time = abs_time->tv64; | 1887 | restart->futex.time = abs_time->tv64; |
| 1888 | restart->futex.bitset = bitset; | 1888 | restart->futex.bitset = bitset; |
| 1889 | restart->futex.flags = flags; | 1889 | restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; |
| 1890 | 1890 | ||
| 1891 | ret = -ERESTART_RESTARTBLOCK; | 1891 | ret = -ERESTART_RESTARTBLOCK; |
| 1892 | 1892 | ||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 27960f114efd..8e81a9860a0d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -364,6 +364,7 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
| 364 | } | 364 | } |
| 365 | 365 | ||
| 366 | if (mode & PERF_CGROUP_SWIN) { | 366 | if (mode & PERF_CGROUP_SWIN) { |
| 367 | WARN_ON_ONCE(cpuctx->cgrp); | ||
| 367 | /* set cgrp before ctxsw in to | 368 | /* set cgrp before ctxsw in to |
| 368 | * allow event_filter_match() to not | 369 | * allow event_filter_match() to not |
| 369 | * have to pass task around | 370 | * have to pass task around |
| @@ -2423,6 +2424,14 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
| 2423 | if (!ctx || !ctx->nr_events) | 2424 | if (!ctx || !ctx->nr_events) |
| 2424 | goto out; | 2425 | goto out; |
| 2425 | 2426 | ||
| 2427 | /* | ||
| 2428 | * We must ctxsw out cgroup events to avoid conflict | ||
| 2429 | * when invoking perf_task_event_sched_in() later on | ||
| 2430 | * in this function. Otherwise we end up trying to | ||
| 2431 | * ctxswin cgroup events which are already scheduled | ||
| 2432 | * in. | ||
| 2433 | */ | ||
| 2434 | perf_cgroup_sched_out(current); | ||
| 2426 | task_ctx_sched_out(ctx, EVENT_ALL); | 2435 | task_ctx_sched_out(ctx, EVENT_ALL); |
| 2427 | 2436 | ||
| 2428 | raw_spin_lock(&ctx->lock); | 2437 | raw_spin_lock(&ctx->lock); |
| @@ -2447,6 +2456,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
| 2447 | 2456 | ||
| 2448 | raw_spin_unlock(&ctx->lock); | 2457 | raw_spin_unlock(&ctx->lock); |
| 2449 | 2458 | ||
| 2459 | /* | ||
| 2460 | * Also calls ctxswin for cgroup events, if any: | ||
| 2461 | */ | ||
| 2450 | perf_event_context_sched_in(ctx, ctx->task); | 2462 | perf_event_context_sched_in(ctx, ctx->task); |
| 2451 | out: | 2463 | out: |
| 2452 | local_irq_restore(flags); | 2464 | local_irq_restore(flags); |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7f00772e57c9..6fa833ab2cb8 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -2104,21 +2104,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
| 2104 | enum cpu_idle_type idle, int *all_pinned, | 2104 | enum cpu_idle_type idle, int *all_pinned, |
| 2105 | int *this_best_prio, struct cfs_rq *busiest_cfs_rq) | 2105 | int *this_best_prio, struct cfs_rq *busiest_cfs_rq) |
| 2106 | { | 2106 | { |
| 2107 | int loops = 0, pulled = 0, pinned = 0; | 2107 | int loops = 0, pulled = 0; |
| 2108 | long rem_load_move = max_load_move; | 2108 | long rem_load_move = max_load_move; |
| 2109 | struct task_struct *p, *n; | 2109 | struct task_struct *p, *n; |
| 2110 | 2110 | ||
| 2111 | if (max_load_move == 0) | 2111 | if (max_load_move == 0) |
| 2112 | goto out; | 2112 | goto out; |
| 2113 | 2113 | ||
| 2114 | pinned = 1; | ||
| 2115 | |||
| 2116 | list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { | 2114 | list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { |
| 2117 | if (loops++ > sysctl_sched_nr_migrate) | 2115 | if (loops++ > sysctl_sched_nr_migrate) |
| 2118 | break; | 2116 | break; |
| 2119 | 2117 | ||
| 2120 | if ((p->se.load.weight >> 1) > rem_load_move || | 2118 | if ((p->se.load.weight >> 1) > rem_load_move || |
| 2121 | !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) | 2119 | !can_migrate_task(p, busiest, this_cpu, sd, idle, |
| 2120 | all_pinned)) | ||
| 2122 | continue; | 2121 | continue; |
| 2123 | 2122 | ||
| 2124 | pull_task(busiest, p, this_rq, this_cpu); | 2123 | pull_task(busiest, p, this_rq, this_cpu); |
| @@ -2153,9 +2152,6 @@ out: | |||
| 2153 | */ | 2152 | */ |
| 2154 | schedstat_add(sd, lb_gained[idle], pulled); | 2153 | schedstat_add(sd, lb_gained[idle], pulled); |
| 2155 | 2154 | ||
| 2156 | if (all_pinned) | ||
| 2157 | *all_pinned = pinned; | ||
| 2158 | |||
| 2159 | return max_load_move - rem_load_move; | 2155 | return max_load_move - rem_load_move; |
| 2160 | } | 2156 | } |
| 2161 | 2157 | ||
| @@ -3127,6 +3123,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 3127 | if (!sds.busiest || sds.busiest_nr_running == 0) | 3123 | if (!sds.busiest || sds.busiest_nr_running == 0) |
| 3128 | goto out_balanced; | 3124 | goto out_balanced; |
| 3129 | 3125 | ||
| 3126 | sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; | ||
| 3127 | |||
| 3130 | /* | 3128 | /* |
| 3131 | * If the busiest group is imbalanced the below checks don't | 3129 | * If the busiest group is imbalanced the below checks don't |
| 3132 | * work because they assumes all things are equal, which typically | 3130 | * work because they assumes all things are equal, which typically |
| @@ -3151,7 +3149,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 3151 | * Don't pull any tasks if this group is already above the domain | 3149 | * Don't pull any tasks if this group is already above the domain |
| 3152 | * average load. | 3150 | * average load. |
| 3153 | */ | 3151 | */ |
| 3154 | sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; | ||
| 3155 | if (sds.this_load >= sds.avg_load) | 3152 | if (sds.this_load >= sds.avg_load) |
| 3156 | goto out_balanced; | 3153 | goto out_balanced; |
| 3157 | 3154 | ||
| @@ -3340,6 +3337,7 @@ redo: | |||
| 3340 | * still unbalanced. ld_moved simply stays zero, so it is | 3337 | * still unbalanced. ld_moved simply stays zero, so it is |
| 3341 | * correctly treated as an imbalance. | 3338 | * correctly treated as an imbalance. |
| 3342 | */ | 3339 | */ |
| 3340 | all_pinned = 1; | ||
| 3343 | local_irq_save(flags); | 3341 | local_irq_save(flags); |
| 3344 | double_rq_lock(this_rq, busiest); | 3342 | double_rq_lock(this_rq, busiest); |
| 3345 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | 3343 | ld_moved = move_tasks(this_rq, this_cpu, busiest, |
