aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2012-03-05 03:20:08 -0500
committerIngo Molnar <mingo@elte.hu>2012-03-05 03:20:08 -0500
commit737f24bda723fdf89ecaacb99fa2bf5683c32799 (patch)
tree35495fff3e9956679cb5468e74e6814c8e44ee66 /kernel
parent8eedce996556d7d06522cd3a0e6069141c8dffe0 (diff)
parentb7c924274c456499264d1cfa3d44063bb11eb5db (diff)
Merge branch 'perf/urgent' into perf/core
Conflicts: tools/perf/builtin-record.c tools/perf/builtin-top.c tools/perf/perf.h tools/perf/util/top.h Merge reason: resolve these cherry-picking conflicts. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c19
-rw-r--r--kernel/events/hw_breakpoint.c4
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/irq/autoprobe.c4
-rw-r--r--kernel/irq/chip.c42
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/kprobes.c6
-rw-r--r--kernel/params.c3
-rw-r--r--kernel/pid.c4
-rw-r--r--kernel/power/power.h24
-rw-r--r--kernel/power/process.c7
-rw-r--r--kernel/power/user.c6
-rw-r--r--kernel/relay.c10
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/fair.c2
16 files changed, 108 insertions, 35 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5e0f8bb89b2b..e8b32ac75ce3 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2303,7 +2303,7 @@ do { \
2303static DEFINE_PER_CPU(int, perf_throttled_count); 2303static DEFINE_PER_CPU(int, perf_throttled_count);
2304static DEFINE_PER_CPU(u64, perf_throttled_seq); 2304static DEFINE_PER_CPU(u64, perf_throttled_seq);
2305 2305
2306static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) 2306static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
2307{ 2307{
2308 struct hw_perf_event *hwc = &event->hw; 2308 struct hw_perf_event *hwc = &event->hw;
2309 s64 period, sample_period; 2309 s64 period, sample_period;
@@ -2322,9 +2322,13 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
2322 hwc->sample_period = sample_period; 2322 hwc->sample_period = sample_period;
2323 2323
2324 if (local64_read(&hwc->period_left) > 8*sample_period) { 2324 if (local64_read(&hwc->period_left) > 8*sample_period) {
2325 event->pmu->stop(event, PERF_EF_UPDATE); 2325 if (disable)
2326 event->pmu->stop(event, PERF_EF_UPDATE);
2327
2326 local64_set(&hwc->period_left, 0); 2328 local64_set(&hwc->period_left, 0);
2327 event->pmu->start(event, PERF_EF_RELOAD); 2329
2330 if (disable)
2331 event->pmu->start(event, PERF_EF_RELOAD);
2328 } 2332 }
2329} 2333}
2330 2334
@@ -2350,6 +2354,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2350 return; 2354 return;
2351 2355
2352 raw_spin_lock(&ctx->lock); 2356 raw_spin_lock(&ctx->lock);
2357 perf_pmu_disable(ctx->pmu);
2353 2358
2354 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 2359 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2355 if (event->state != PERF_EVENT_STATE_ACTIVE) 2360 if (event->state != PERF_EVENT_STATE_ACTIVE)
@@ -2381,13 +2386,17 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2381 /* 2386 /*
2382 * restart the event 2387 * restart the event
2383 * reload only if value has changed 2388 * reload only if value has changed
2389 * we have stopped the event so tell that
2390 * to perf_adjust_period() to avoid stopping it
2391 * twice.
2384 */ 2392 */
2385 if (delta > 0) 2393 if (delta > 0)
2386 perf_adjust_period(event, period, delta); 2394 perf_adjust_period(event, period, delta, false);
2387 2395
2388 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 2396 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2389 } 2397 }
2390 2398
2399 perf_pmu_enable(ctx->pmu);
2391 raw_spin_unlock(&ctx->lock); 2400 raw_spin_unlock(&ctx->lock);
2392} 2401}
2393 2402
@@ -4567,7 +4576,7 @@ static int __perf_event_overflow(struct perf_event *event,
4567 hwc->freq_time_stamp = now; 4576 hwc->freq_time_stamp = now;
4568 4577
4569 if (delta > 0 && delta < 2*TICK_NSEC) 4578 if (delta > 0 && delta < 2*TICK_NSEC)
4570 perf_adjust_period(event, delta, hwc->last_period); 4579 perf_adjust_period(event, delta, hwc->last_period, true);
4571 } 4580 }
4572 4581
4573 /* 4582 /*
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index b0309f76d777..3330022a7ac1 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -658,10 +658,10 @@ int __init init_hw_breakpoint(void)
658 658
659 err_alloc: 659 err_alloc:
660 for_each_possible_cpu(err_cpu) { 660 for_each_possible_cpu(err_cpu) {
661 if (err_cpu == cpu)
662 break;
663 for (i = 0; i < TYPE_MAX; i++) 661 for (i = 0; i < TYPE_MAX; i++)
664 kfree(per_cpu(nr_task_bp_pinned[i], cpu)); 662 kfree(per_cpu(nr_task_bp_pinned[i], cpu));
663 if (err_cpu == cpu)
664 break;
665 } 665 }
666 666
667 return -ENOMEM; 667 return -ENOMEM;
diff --git a/kernel/fork.c b/kernel/fork.c
index 1b2ef3c23ae4..e2cd3e2a5ae8 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -66,6 +66,7 @@
66#include <linux/user-return-notifier.h> 66#include <linux/user-return-notifier.h>
67#include <linux/oom.h> 67#include <linux/oom.h>
68#include <linux/khugepaged.h> 68#include <linux/khugepaged.h>
69#include <linux/signalfd.h>
69 70
70#include <asm/pgtable.h> 71#include <asm/pgtable.h>
71#include <asm/pgalloc.h> 72#include <asm/pgalloc.h>
@@ -910,7 +911,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
910 return -ENOMEM; 911 return -ENOMEM;
911 912
912 new_ioc->ioprio = ioc->ioprio; 913 new_ioc->ioprio = ioc->ioprio;
913 put_io_context(new_ioc, NULL); 914 put_io_context(new_ioc);
914 } 915 }
915#endif 916#endif
916 return 0; 917 return 0;
@@ -935,8 +936,10 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
935 936
936void __cleanup_sighand(struct sighand_struct *sighand) 937void __cleanup_sighand(struct sighand_struct *sighand)
937{ 938{
938 if (atomic_dec_and_test(&sighand->count)) 939 if (atomic_dec_and_test(&sighand->count)) {
940 signalfd_cleanup(sighand);
939 kmem_cache_free(sighand_cachep, sighand); 941 kmem_cache_free(sighand_cachep, sighand);
942 }
940} 943}
941 944
942 945
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 342d8f44e401..0119b9d467ae 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -53,7 +53,7 @@ unsigned long probe_irq_on(void)
53 if (desc->irq_data.chip->irq_set_type) 53 if (desc->irq_data.chip->irq_set_type)
54 desc->irq_data.chip->irq_set_type(&desc->irq_data, 54 desc->irq_data.chip->irq_set_type(&desc->irq_data,
55 IRQ_TYPE_PROBE); 55 IRQ_TYPE_PROBE);
56 irq_startup(desc); 56 irq_startup(desc, false);
57 } 57 }
58 raw_spin_unlock_irq(&desc->lock); 58 raw_spin_unlock_irq(&desc->lock);
59 } 59 }
@@ -70,7 +70,7 @@ unsigned long probe_irq_on(void)
70 raw_spin_lock_irq(&desc->lock); 70 raw_spin_lock_irq(&desc->lock);
71 if (!desc->action && irq_settings_can_probe(desc)) { 71 if (!desc->action && irq_settings_can_probe(desc)) {
72 desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; 72 desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
73 if (irq_startup(desc)) 73 if (irq_startup(desc, false))
74 desc->istate |= IRQS_PENDING; 74 desc->istate |= IRQS_PENDING;
75 } 75 }
76 raw_spin_unlock_irq(&desc->lock); 76 raw_spin_unlock_irq(&desc->lock);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index fc418249f01f..838687ed802a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -159,19 +159,22 @@ static void irq_state_set_masked(struct irq_desc *desc)
159 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 159 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
160} 160}
161 161
162int irq_startup(struct irq_desc *desc) 162int irq_startup(struct irq_desc *desc, bool resend)
163{ 163{
164 int ret = 0;
165
164 irq_state_clr_disabled(desc); 166 irq_state_clr_disabled(desc);
165 desc->depth = 0; 167 desc->depth = 0;
166 168
167 if (desc->irq_data.chip->irq_startup) { 169 if (desc->irq_data.chip->irq_startup) {
168 int ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 170 ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
169 irq_state_clr_masked(desc); 171 irq_state_clr_masked(desc);
170 return ret; 172 } else {
173 irq_enable(desc);
171 } 174 }
172 175 if (resend)
173 irq_enable(desc); 176 check_irq_resend(desc, desc->irq_data.irq);
174 return 0; 177 return ret;
175} 178}
176 179
177void irq_shutdown(struct irq_desc *desc) 180void irq_shutdown(struct irq_desc *desc)
@@ -332,6 +335,24 @@ out_unlock:
332} 335}
333EXPORT_SYMBOL_GPL(handle_simple_irq); 336EXPORT_SYMBOL_GPL(handle_simple_irq);
334 337
338/*
339 * Called unconditionally from handle_level_irq() and only for oneshot
340 * interrupts from handle_fasteoi_irq()
341 */
342static void cond_unmask_irq(struct irq_desc *desc)
343{
344 /*
345 * We need to unmask in the following cases:
346 * - Standard level irq (IRQF_ONESHOT is not set)
347 * - Oneshot irq which did not wake the thread (caused by a
348 * spurious interrupt or a primary handler handling it
349 * completely).
350 */
351 if (!irqd_irq_disabled(&desc->irq_data) &&
352 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
353 unmask_irq(desc);
354}
355
335/** 356/**
336 * handle_level_irq - Level type irq handler 357 * handle_level_irq - Level type irq handler
337 * @irq: the interrupt number 358 * @irq: the interrupt number
@@ -364,8 +385,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
364 385
365 handle_irq_event(desc); 386 handle_irq_event(desc);
366 387
367 if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT)) 388 cond_unmask_irq(desc);
368 unmask_irq(desc); 389
369out_unlock: 390out_unlock:
370 raw_spin_unlock(&desc->lock); 391 raw_spin_unlock(&desc->lock);
371} 392}
@@ -419,6 +440,9 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
419 preflow_handler(desc); 440 preflow_handler(desc);
420 handle_irq_event(desc); 441 handle_irq_event(desc);
421 442
443 if (desc->istate & IRQS_ONESHOT)
444 cond_unmask_irq(desc);
445
422out_eoi: 446out_eoi:
423 desc->irq_data.chip->irq_eoi(&desc->irq_data); 447 desc->irq_data.chip->irq_eoi(&desc->irq_data);
424out_unlock: 448out_unlock:
@@ -627,7 +651,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
627 irq_settings_set_noprobe(desc); 651 irq_settings_set_noprobe(desc);
628 irq_settings_set_norequest(desc); 652 irq_settings_set_norequest(desc);
629 irq_settings_set_nothread(desc); 653 irq_settings_set_nothread(desc);
630 irq_startup(desc); 654 irq_startup(desc, true);
631 } 655 }
632out: 656out:
633 irq_put_desc_busunlock(desc, flags); 657 irq_put_desc_busunlock(desc, flags);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index b7952316016a..40378ff877e7 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -67,7 +67,7 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
67extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); 67extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
68extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); 68extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
69 69
70extern int irq_startup(struct irq_desc *desc); 70extern int irq_startup(struct irq_desc *desc, bool resend);
71extern void irq_shutdown(struct irq_desc *desc); 71extern void irq_shutdown(struct irq_desc *desc);
72extern void irq_enable(struct irq_desc *desc); 72extern void irq_enable(struct irq_desc *desc);
73extern void irq_disable(struct irq_desc *desc); 73extern void irq_disable(struct irq_desc *desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index a9a9dbe49fea..32313c084442 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1027,7 +1027,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1027 desc->istate |= IRQS_ONESHOT; 1027 desc->istate |= IRQS_ONESHOT;
1028 1028
1029 if (irq_settings_can_autoenable(desc)) 1029 if (irq_settings_can_autoenable(desc))
1030 irq_startup(desc); 1030 irq_startup(desc, true);
1031 else 1031 else
1032 /* Undo nested disables: */ 1032 /* Undo nested disables: */
1033 desc->depth = 1; 1033 desc->depth = 1;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 29f5b65bee29..9788c0ec6f43 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1673,8 +1673,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1673 ri->rp = rp; 1673 ri->rp = rp;
1674 ri->task = current; 1674 ri->task = current;
1675 1675
1676 if (rp->entry_handler && rp->entry_handler(ri, regs)) 1676 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1677 raw_spin_lock_irqsave(&rp->lock, flags);
1678 hlist_add_head(&ri->hlist, &rp->free_instances);
1679 raw_spin_unlock_irqrestore(&rp->lock, flags);
1677 return 0; 1680 return 0;
1681 }
1678 1682
1679 arch_prepare_kretprobe(ri, regs); 1683 arch_prepare_kretprobe(ri, regs);
1680 1684
diff --git a/kernel/params.c b/kernel/params.c
index 32ee04308285..4bc965d8a1fe 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -97,7 +97,8 @@ static int parse_one(char *param,
97 for (i = 0; i < num_params; i++) { 97 for (i = 0; i < num_params; i++) {
98 if (parameq(param, params[i].name)) { 98 if (parameq(param, params[i].name)) {
99 /* No one handled NULL, so do it here. */ 99 /* No one handled NULL, so do it here. */
100 if (!val && params[i].ops->set != param_set_bool) 100 if (!val && params[i].ops->set != param_set_bool
101 && params[i].ops->set != param_set_bint)
101 return -EINVAL; 102 return -EINVAL;
102 pr_debug("They are equal! Calling %p\n", 103 pr_debug("They are equal! Calling %p\n",
103 params[i].ops->set); 104 params[i].ops->set);
diff --git a/kernel/pid.c b/kernel/pid.c
index ce8e00deaccb..9f08dfabaf13 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -543,12 +543,12 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
543 */ 543 */
544void __init pidhash_init(void) 544void __init pidhash_init(void)
545{ 545{
546 int i, pidhash_size; 546 unsigned int i, pidhash_size;
547 547
548 pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, 548 pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
549 HASH_EARLY | HASH_SMALL, 549 HASH_EARLY | HASH_SMALL,
550 &pidhash_shift, NULL, 4096); 550 &pidhash_shift, NULL, 4096);
551 pidhash_size = 1 << pidhash_shift; 551 pidhash_size = 1U << pidhash_shift;
552 552
553 for (i = 0; i < pidhash_size; i++) 553 for (i = 0; i < pidhash_size; i++)
554 INIT_HLIST_HEAD(&pid_hash[i]); 554 INIT_HLIST_HEAD(&pid_hash[i]);
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 0c4defe6d3b8..21724eee5206 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -231,8 +231,28 @@ extern int pm_test_level;
231#ifdef CONFIG_SUSPEND_FREEZER 231#ifdef CONFIG_SUSPEND_FREEZER
232static inline int suspend_freeze_processes(void) 232static inline int suspend_freeze_processes(void)
233{ 233{
234 int error = freeze_processes(); 234 int error;
235 return error ? : freeze_kernel_threads(); 235
236 error = freeze_processes();
237
238 /*
239 * freeze_processes() automatically thaws every task if freezing
240 * fails. So we need not do anything extra upon error.
241 */
242 if (error)
243 goto Finish;
244
245 error = freeze_kernel_threads();
246
247 /*
248 * freeze_kernel_threads() thaws only kernel threads upon freezing
249 * failure. So we have to thaw the userspace tasks ourselves.
250 */
251 if (error)
252 thaw_processes();
253
254 Finish:
255 return error;
236} 256}
237 257
238static inline void suspend_thaw_processes(void) 258static inline void suspend_thaw_processes(void)
diff --git a/kernel/power/process.c b/kernel/power/process.c
index eeca00311f39..7e426459e60a 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -143,7 +143,10 @@ int freeze_processes(void)
143/** 143/**
144 * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. 144 * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
145 * 145 *
146 * On success, returns 0. On failure, -errno and system is fully thawed. 146 * On success, returns 0. On failure, -errno and only the kernel threads are
147 * thawed, so as to give a chance to the caller to do additional cleanups
148 * (if any) before thawing the userspace tasks. So, it is the responsibility
149 * of the caller to thaw the userspace tasks, when the time is right.
147 */ 150 */
148int freeze_kernel_threads(void) 151int freeze_kernel_threads(void)
149{ 152{
@@ -159,7 +162,7 @@ int freeze_kernel_threads(void)
159 BUG_ON(in_atomic()); 162 BUG_ON(in_atomic());
160 163
161 if (error) 164 if (error)
162 thaw_processes(); 165 thaw_kernel_threads();
163 return error; 166 return error;
164} 167}
165 168
diff --git a/kernel/power/user.c b/kernel/power/user.c
index e5a21a857302..3e100075b13c 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -249,13 +249,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
249 } 249 }
250 pm_restore_gfp_mask(); 250 pm_restore_gfp_mask();
251 error = hibernation_snapshot(data->platform_support); 251 error = hibernation_snapshot(data->platform_support);
252 if (!error) { 252 if (error) {
253 thaw_kernel_threads();
254 } else {
253 error = put_user(in_suspend, (int __user *)arg); 255 error = put_user(in_suspend, (int __user *)arg);
254 if (!error && !freezer_test_done) 256 if (!error && !freezer_test_done)
255 data->ready = 1; 257 data->ready = 1;
256 if (freezer_test_done) { 258 if (freezer_test_done) {
257 freezer_test_done = false; 259 freezer_test_done = false;
258 thaw_processes(); 260 thaw_kernel_threads();
259 } 261 }
260 } 262 }
261 break; 263 break;
diff --git a/kernel/relay.c b/kernel/relay.c
index 4335e1d7ee2d..ab56a1764d4d 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -164,10 +164,14 @@ depopulate:
164 */ 164 */
165static struct rchan_buf *relay_create_buf(struct rchan *chan) 165static struct rchan_buf *relay_create_buf(struct rchan *chan)
166{ 166{
167 struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); 167 struct rchan_buf *buf;
168 if (!buf) 168
169 if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
169 return NULL; 170 return NULL;
170 171
172 buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
173 if (!buf)
174 return NULL;
171 buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL); 175 buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
172 if (!buf->padding) 176 if (!buf->padding)
173 goto free_buf; 177 goto free_buf;
@@ -574,6 +578,8 @@ struct rchan *relay_open(const char *base_filename,
574 578
575 if (!(subbuf_size && n_subbufs)) 579 if (!(subbuf_size && n_subbufs))
576 return NULL; 580 return NULL;
581 if (subbuf_size > UINT_MAX / n_subbufs)
582 return NULL;
577 583
578 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL); 584 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
579 if (!chan) 585 if (!chan)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 112c6824476b..6c41ba49767a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1932,7 +1932,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1932 local_irq_enable(); 1932 local_irq_enable();
1933#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 1933#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1934 finish_lock_switch(rq, prev); 1934 finish_lock_switch(rq, prev);
1935 trace_sched_stat_sleeptime(current, rq->clock);
1936 1935
1937 fire_sched_in_preempt_notifiers(current); 1936 fire_sched_in_preempt_notifiers(current);
1938 if (mm) 1937 if (mm)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 423547ada38a..fd974faf467d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1003,6 +1003,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
1003 if (unlikely(delta > se->statistics.sleep_max)) 1003 if (unlikely(delta > se->statistics.sleep_max))
1004 se->statistics.sleep_max = delta; 1004 se->statistics.sleep_max = delta;
1005 1005
1006 se->statistics.sleep_start = 0;
1006 se->statistics.sum_sleep_runtime += delta; 1007 se->statistics.sum_sleep_runtime += delta;
1007 1008
1008 if (tsk) { 1009 if (tsk) {
@@ -1019,6 +1020,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
1019 if (unlikely(delta > se->statistics.block_max)) 1020 if (unlikely(delta > se->statistics.block_max))
1020 se->statistics.block_max = delta; 1021 se->statistics.block_max = delta;
1021 1022
1023 se->statistics.block_start = 0;
1022 se->statistics.sum_sleep_runtime += delta; 1024 se->statistics.sum_sleep_runtime += delta;
1023 1025
1024 if (tsk) { 1026 if (tsk) {