aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/perf_event.c12
-rw-r--r--kernel/power/Kconfig6
-rw-r--r--kernel/sched.c20
-rw-r--r--kernel/sched_fair.c14
-rw-r--r--kernel/trace/blktrace.c23
6 files changed, 37 insertions, 40 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index dfb924ffe65b..fe28dc282eae 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1886,7 +1886,7 @@ retry:
1886 restart->futex.val = val; 1886 restart->futex.val = val;
1887 restart->futex.time = abs_time->tv64; 1887 restart->futex.time = abs_time->tv64;
1888 restart->futex.bitset = bitset; 1888 restart->futex.bitset = bitset;
1889 restart->futex.flags = flags; 1889 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
1890 1890
1891 ret = -ERESTART_RESTARTBLOCK; 1891 ret = -ERESTART_RESTARTBLOCK;
1892 1892
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 27960f114efd..8e81a9860a0d 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -364,6 +364,7 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
364 } 364 }
365 365
366 if (mode & PERF_CGROUP_SWIN) { 366 if (mode & PERF_CGROUP_SWIN) {
367 WARN_ON_ONCE(cpuctx->cgrp);
367 /* set cgrp before ctxsw in to 368 /* set cgrp before ctxsw in to
368 * allow event_filter_match() to not 369 * allow event_filter_match() to not
369 * have to pass task around 370 * have to pass task around
@@ -2423,6 +2424,14 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2423 if (!ctx || !ctx->nr_events) 2424 if (!ctx || !ctx->nr_events)
2424 goto out; 2425 goto out;
2425 2426
2427 /*
2428 * We must ctxsw out cgroup events to avoid conflict
2429 * when invoking perf_task_event_sched_in() later on
2430 * in this function. Otherwise we end up trying to
2431 * ctxswin cgroup events which are already scheduled
2432 * in.
2433 */
2434 perf_cgroup_sched_out(current);
2426 task_ctx_sched_out(ctx, EVENT_ALL); 2435 task_ctx_sched_out(ctx, EVENT_ALL);
2427 2436
2428 raw_spin_lock(&ctx->lock); 2437 raw_spin_lock(&ctx->lock);
@@ -2447,6 +2456,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2447 2456
2448 raw_spin_unlock(&ctx->lock); 2457 raw_spin_unlock(&ctx->lock);
2449 2458
2459 /*
2460 * Also calls ctxswin for cgroup events, if any:
2461 */
2450 perf_event_context_sched_in(ctx, ctx->task); 2462 perf_event_context_sched_in(ctx, ctx->task);
2451out: 2463out:
2452 local_irq_restore(flags); 2464 local_irq_restore(flags);
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 4603f08dc47b..6de9a8fc3417 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -18,9 +18,13 @@ config SUSPEND_FREEZER
18 18
19 Turning OFF this setting is NOT recommended! If in doubt, say Y. 19 Turning OFF this setting is NOT recommended! If in doubt, say Y.
20 20
21config HIBERNATE_CALLBACKS
22 bool
23
21config HIBERNATION 24config HIBERNATION
22 bool "Hibernation (aka 'suspend to disk')" 25 bool "Hibernation (aka 'suspend to disk')"
23 depends on SWAP && ARCH_HIBERNATION_POSSIBLE 26 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
27 select HIBERNATE_CALLBACKS
24 select LZO_COMPRESS 28 select LZO_COMPRESS
25 select LZO_DECOMPRESS 29 select LZO_DECOMPRESS
26 ---help--- 30 ---help---
@@ -85,7 +89,7 @@ config PM_STD_PARTITION
85 89
86config PM_SLEEP 90config PM_SLEEP
87 def_bool y 91 def_bool y
88 depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE 92 depends on SUSPEND || HIBERNATE_CALLBACKS
89 93
90config PM_SLEEP_SMP 94config PM_SLEEP_SMP
91 def_bool y 95 def_bool y
diff --git a/kernel/sched.c b/kernel/sched.c
index 48013633d792..a187c3fe027b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4111,20 +4111,20 @@ need_resched:
4111 try_to_wake_up_local(to_wakeup); 4111 try_to_wake_up_local(to_wakeup);
4112 } 4112 }
4113 deactivate_task(rq, prev, DEQUEUE_SLEEP); 4113 deactivate_task(rq, prev, DEQUEUE_SLEEP);
4114
4115 /*
4116 * If we are going to sleep and we have plugged IO queued, make
4117 * sure to submit it to avoid deadlocks.
4118 */
4119 if (blk_needs_flush_plug(prev)) {
4120 raw_spin_unlock(&rq->lock);
4121 blk_flush_plug(prev);
4122 raw_spin_lock(&rq->lock);
4123 }
4114 } 4124 }
4115 switch_count = &prev->nvcsw; 4125 switch_count = &prev->nvcsw;
4116 } 4126 }
4117 4127
4118 /*
4119 * If we are going to sleep and we have plugged IO queued, make
4120 * sure to submit it to avoid deadlocks.
4121 */
4122 if (prev->state != TASK_RUNNING && blk_needs_flush_plug(prev)) {
4123 raw_spin_unlock(&rq->lock);
4124 blk_flush_plug(prev);
4125 raw_spin_lock(&rq->lock);
4126 }
4127
4128 pre_schedule(rq, prev); 4128 pre_schedule(rq, prev);
4129 4129
4130 if (unlikely(!rq->nr_running)) 4130 if (unlikely(!rq->nr_running))
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7f00772e57c9..6fa833ab2cb8 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2104,21 +2104,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2104 enum cpu_idle_type idle, int *all_pinned, 2104 enum cpu_idle_type idle, int *all_pinned,
2105 int *this_best_prio, struct cfs_rq *busiest_cfs_rq) 2105 int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
2106{ 2106{
2107 int loops = 0, pulled = 0, pinned = 0; 2107 int loops = 0, pulled = 0;
2108 long rem_load_move = max_load_move; 2108 long rem_load_move = max_load_move;
2109 struct task_struct *p, *n; 2109 struct task_struct *p, *n;
2110 2110
2111 if (max_load_move == 0) 2111 if (max_load_move == 0)
2112 goto out; 2112 goto out;
2113 2113
2114 pinned = 1;
2115
2116 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { 2114 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
2117 if (loops++ > sysctl_sched_nr_migrate) 2115 if (loops++ > sysctl_sched_nr_migrate)
2118 break; 2116 break;
2119 2117
2120 if ((p->se.load.weight >> 1) > rem_load_move || 2118 if ((p->se.load.weight >> 1) > rem_load_move ||
2121 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) 2119 !can_migrate_task(p, busiest, this_cpu, sd, idle,
2120 all_pinned))
2122 continue; 2121 continue;
2123 2122
2124 pull_task(busiest, p, this_rq, this_cpu); 2123 pull_task(busiest, p, this_rq, this_cpu);
@@ -2153,9 +2152,6 @@ out:
2153 */ 2152 */
2154 schedstat_add(sd, lb_gained[idle], pulled); 2153 schedstat_add(sd, lb_gained[idle], pulled);
2155 2154
2156 if (all_pinned)
2157 *all_pinned = pinned;
2158
2159 return max_load_move - rem_load_move; 2155 return max_load_move - rem_load_move;
2160} 2156}
2161 2157
@@ -3127,6 +3123,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3127 if (!sds.busiest || sds.busiest_nr_running == 0) 3123 if (!sds.busiest || sds.busiest_nr_running == 0)
3128 goto out_balanced; 3124 goto out_balanced;
3129 3125
3126 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
3127
3130 /* 3128 /*
3131 * If the busiest group is imbalanced the below checks don't 3129 * If the busiest group is imbalanced the below checks don't
3132 * work because they assumes all things are equal, which typically 3130 * work because they assumes all things are equal, which typically
@@ -3151,7 +3149,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3151 * Don't pull any tasks if this group is already above the domain 3149 * Don't pull any tasks if this group is already above the domain
3152 * average load. 3150 * average load.
3153 */ 3151 */
3154 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
3155 if (sds.this_load >= sds.avg_load) 3152 if (sds.this_load >= sds.avg_load)
3156 goto out_balanced; 3153 goto out_balanced;
3157 3154
@@ -3340,6 +3337,7 @@ redo:
3340 * still unbalanced. ld_moved simply stays zero, so it is 3337 * still unbalanced. ld_moved simply stays zero, so it is
3341 * correctly treated as an imbalance. 3338 * correctly treated as an imbalance.
3342 */ 3339 */
3340 all_pinned = 1;
3343 local_irq_save(flags); 3341 local_irq_save(flags);
3344 double_rq_lock(this_rq, busiest); 3342 double_rq_lock(this_rq, busiest);
3345 ld_moved = move_tasks(this_rq, this_cpu, busiest, 3343 ld_moved = move_tasks(this_rq, this_cpu, busiest,
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 7aa40f8e182d..3e3970d53d14 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -850,32 +850,19 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); 850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
851} 851}
852 852
853static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q) 853static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q,
854 unsigned int depth)
854{ 855{
855 struct blk_trace *bt = q->blk_trace; 856 struct blk_trace *bt = q->blk_trace;
856 857
857 if (bt) { 858 if (bt) {
858 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; 859 __be64 rpdu = cpu_to_be64(depth);
859 __be64 rpdu = cpu_to_be64(pdu);
860 860
861 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, 861 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
862 sizeof(rpdu), &rpdu); 862 sizeof(rpdu), &rpdu);
863 } 863 }
864} 864}
865 865
866static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q)
867{
868 struct blk_trace *bt = q->blk_trace;
869
870 if (bt) {
871 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
872 __be64 rpdu = cpu_to_be64(pdu);
873
874 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
875 sizeof(rpdu), &rpdu);
876 }
877}
878
879static void blk_add_trace_split(void *ignore, 866static void blk_add_trace_split(void *ignore,
880 struct request_queue *q, struct bio *bio, 867 struct request_queue *q, struct bio *bio,
881 unsigned int pdu) 868 unsigned int pdu)
@@ -1015,8 +1002,6 @@ static void blk_register_tracepoints(void)
1015 WARN_ON(ret); 1002 WARN_ON(ret);
1016 ret = register_trace_block_plug(blk_add_trace_plug, NULL); 1003 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1017 WARN_ON(ret); 1004 WARN_ON(ret);
1018 ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
1019 WARN_ON(ret);
1020 ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); 1005 ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
1021 WARN_ON(ret); 1006 WARN_ON(ret);
1022 ret = register_trace_block_split(blk_add_trace_split, NULL); 1007 ret = register_trace_block_split(blk_add_trace_split, NULL);
@@ -1033,7 +1018,6 @@ static void blk_unregister_tracepoints(void)
1033 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1018 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1034 unregister_trace_block_split(blk_add_trace_split, NULL); 1019 unregister_trace_block_split(blk_add_trace_split, NULL);
1035 unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); 1020 unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
1036 unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
1037 unregister_trace_block_plug(blk_add_trace_plug, NULL); 1021 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1038 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); 1022 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1039 unregister_trace_block_getrq(blk_add_trace_getrq, NULL); 1023 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
@@ -1348,7 +1332,6 @@ static const struct {
1348 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, 1332 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1349 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, 1333 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1350 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, 1334 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1351 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1352 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, 1335 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1353 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, 1336 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1354 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic }, 1337 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },