aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-12-01 19:58:33 -0500
committerDave Airlie <airlied@redhat.com>2014-12-01 19:58:33 -0500
commite8115e79aa62b6ebdb3e8e61ca4092cc32938afc (patch)
tree42b791ab54ef9d5c73dcd49f907b8b37fa2f7e19 /kernel
parent9be23ae4350bfd71c0cc2ea3494671ee90e5603b (diff)
parent009d0431c3914de64666bec0d350e54fdd59df6a (diff)
Merge tag 'v3.18-rc7' into drm-next
This fixes a bunch of conflicts prior to merging i915 tree. Linux 3.18-rc7 Conflicts: drivers/gpu/drm/exynos/exynos_drm_drv.c drivers/gpu/drm/i915/i915_drv.c drivers/gpu/drm/i915/intel_pm.c drivers/gpu/drm/tegra/dc.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/audit_tree.c1
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/events/uprobes.c1
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/sched/core.c63
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/fair.c14
-rw-r--r--kernel/sched/idle_task.c5
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/sched/stop_task.c5
-rw-r--r--kernel/time/posix-cpu-timers.c2
-rw-r--r--kernel/trace/ring_buffer.c81
-rw-r--r--kernel/trace/trace.c33
16 files changed, 131 insertions, 95 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 80983df92cd4..cebb11db4d34 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -739,7 +739,7 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
739 739
740 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE); 740 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
741 audit_log_task_info(ab, current); 741 audit_log_task_info(ab, current);
742 audit_log_format(ab, "feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d", 742 audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
743 audit_feature_names[which], !!old_feature, !!new_feature, 743 audit_feature_names[which], !!old_feature, !!new_feature,
744 !!old_lock, !!new_lock, res); 744 !!old_lock, !!new_lock, res);
745 audit_log_end(ab); 745 audit_log_end(ab);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index e242e3a9864a..80f29e015570 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -154,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count)
154 chunk->owners[i].index = i; 154 chunk->owners[i].index = i;
155 } 155 }
156 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); 156 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
157 chunk->mark.mask = FS_IN_IGNORED;
157 return chunk; 158 return chunk;
158} 159}
159 160
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2b02c9fda790..1cd5eef1fcdd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1562,8 +1562,10 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group
1562 1562
1563 if (!task) { 1563 if (!task) {
1564 /* 1564 /*
1565 * Per cpu events are removed via an smp call and 1565 * Per cpu events are removed via an smp call. The removal can
1566 * the removal is always successful. 1566 * fail if the CPU is currently offline, but in that case we
1567 * already called __perf_remove_from_context from
1568 * perf_event_exit_cpu.
1567 */ 1569 */
1568 cpu_function_call(event->cpu, __perf_remove_from_context, &re); 1570 cpu_function_call(event->cpu, __perf_remove_from_context, &re);
1569 return; 1571 return;
@@ -8117,7 +8119,7 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
8117 8119
8118static void __perf_event_exit_context(void *__info) 8120static void __perf_event_exit_context(void *__info)
8119{ 8121{
8120 struct remove_event re = { .detach_group = false }; 8122 struct remove_event re = { .detach_group = true };
8121 struct perf_event_context *ctx = __info; 8123 struct perf_event_context *ctx = __info;
8122 8124
8123 perf_pmu_rotate_stop(ctx->pmu); 8125 perf_pmu_rotate_stop(ctx->pmu);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index bc143cf56cab..d2a5689a6b2e 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1640,7 +1640,6 @@ bool uprobe_deny_signal(void)
1640 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { 1640 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1641 utask->state = UTASK_SSTEP_TRAPPED; 1641 utask->state = UTASK_SSTEP_TRAPPED;
1642 set_tsk_thread_flag(t, TIF_UPROBE); 1642 set_tsk_thread_flag(t, TIF_UPROBE);
1643 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1644 } 1643 }
1645 } 1644 }
1646 1645
diff --git a/kernel/panic.c b/kernel/panic.c
index d09dc5c32c67..cf80672b7924 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -244,6 +244,7 @@ static const struct tnt tnts[] = {
244 * 'I' - Working around severe firmware bug. 244 * 'I' - Working around severe firmware bug.
245 * 'O' - Out-of-tree module has been loaded. 245 * 'O' - Out-of-tree module has been loaded.
246 * 'E' - Unsigned module has been loaded. 246 * 'E' - Unsigned module has been loaded.
247 * 'L' - A soft lockup has previously occurred.
247 * 248 *
248 * The string is overwritten by the next call to print_tainted(). 249 * The string is overwritten by the next call to print_tainted().
249 */ 250 */
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4ca9a33ff620..c347e3ce3a55 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -146,7 +146,7 @@ static int platform_suspend_prepare(suspend_state_t state)
146 146
147static int platform_suspend_prepare_late(suspend_state_t state) 147static int platform_suspend_prepare_late(suspend_state_t state)
148{ 148{
149 return state == PM_SUSPEND_FREEZE && freeze_ops->prepare ? 149 return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ?
150 freeze_ops->prepare() : 0; 150 freeze_ops->prepare() : 0;
151} 151}
152 152
@@ -164,7 +164,7 @@ static void platform_resume_noirq(suspend_state_t state)
164 164
165static void platform_resume_early(suspend_state_t state) 165static void platform_resume_early(suspend_state_t state)
166{ 166{
167 if (state == PM_SUSPEND_FREEZE && freeze_ops->restore) 167 if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore)
168 freeze_ops->restore(); 168 freeze_ops->restore();
169} 169}
170 170
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 240157c13ddc..24beb9bb4c3e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2475,44 +2475,6 @@ EXPORT_PER_CPU_SYMBOL(kstat);
2475EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 2475EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
2476 2476
2477/* 2477/*
2478 * Return any ns on the sched_clock that have not yet been accounted in
2479 * @p in case that task is currently running.
2480 *
2481 * Called with task_rq_lock() held on @rq.
2482 */
2483static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2484{
2485 u64 ns = 0;
2486
2487 /*
2488 * Must be ->curr _and_ ->on_rq. If dequeued, we would
2489 * project cycles that may never be accounted to this
2490 * thread, breaking clock_gettime().
2491 */
2492 if (task_current(rq, p) && task_on_rq_queued(p)) {
2493 update_rq_clock(rq);
2494 ns = rq_clock_task(rq) - p->se.exec_start;
2495 if ((s64)ns < 0)
2496 ns = 0;
2497 }
2498
2499 return ns;
2500}
2501
2502unsigned long long task_delta_exec(struct task_struct *p)
2503{
2504 unsigned long flags;
2505 struct rq *rq;
2506 u64 ns = 0;
2507
2508 rq = task_rq_lock(p, &flags);
2509 ns = do_task_delta_exec(p, rq);
2510 task_rq_unlock(rq, p, &flags);
2511
2512 return ns;
2513}
2514
2515/*
2516 * Return accounted runtime for the task. 2478 * Return accounted runtime for the task.
2517 * In case the task is currently running, return the runtime plus current's 2479 * In case the task is currently running, return the runtime plus current's
2518 * pending runtime that have not been accounted yet. 2480 * pending runtime that have not been accounted yet.
@@ -2521,7 +2483,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
2521{ 2483{
2522 unsigned long flags; 2484 unsigned long flags;
2523 struct rq *rq; 2485 struct rq *rq;
2524 u64 ns = 0; 2486 u64 ns;
2525 2487
2526#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 2488#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
2527 /* 2489 /*
@@ -2540,7 +2502,16 @@ unsigned long long task_sched_runtime(struct task_struct *p)
2540#endif 2502#endif
2541 2503
2542 rq = task_rq_lock(p, &flags); 2504 rq = task_rq_lock(p, &flags);
2543 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); 2505 /*
2506 * Must be ->curr _and_ ->on_rq. If dequeued, we would
2507 * project cycles that may never be accounted to this
2508 * thread, breaking clock_gettime().
2509 */
2510 if (task_current(rq, p) && task_on_rq_queued(p)) {
2511 update_rq_clock(rq);
2512 p->sched_class->update_curr(rq);
2513 }
2514 ns = p->se.sum_exec_runtime;
2544 task_rq_unlock(rq, p, &flags); 2515 task_rq_unlock(rq, p, &flags);
2545 2516
2546 return ns; 2517 return ns;
@@ -6368,6 +6339,10 @@ static void sched_init_numa(void)
6368 if (!sched_debug()) 6339 if (!sched_debug())
6369 break; 6340 break;
6370 } 6341 }
6342
6343 if (!level)
6344 return;
6345
6371 /* 6346 /*
6372 * 'level' contains the number of unique distances, excluding the 6347 * 'level' contains the number of unique distances, excluding the
6373 * identity distance node_distance(i,i). 6348 * identity distance node_distance(i,i).
@@ -7444,8 +7419,12 @@ void sched_move_task(struct task_struct *tsk)
7444 if (unlikely(running)) 7419 if (unlikely(running))
7445 put_prev_task(rq, tsk); 7420 put_prev_task(rq, tsk);
7446 7421
7447 tg = container_of(task_css_check(tsk, cpu_cgrp_id, 7422 /*
7448 lockdep_is_held(&tsk->sighand->siglock)), 7423 * All callers are synchronized by task_rq_lock(); we do not use RCU
7424 * which is pointless here. Thus, we pass "true" to task_css_check()
7425 * to prevent lockdep warnings.
7426 */
7427 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
7449 struct task_group, css); 7428 struct task_group, css);
7450 tg = autogroup_task_group(tsk, tg); 7429 tg = autogroup_task_group(tsk, tg);
7451 tsk->sched_task_group = tg; 7430 tsk->sched_task_group = tg;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 5285332392d5..28fa9d9e9201 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1701,4 +1701,6 @@ const struct sched_class dl_sched_class = {
1701 .prio_changed = prio_changed_dl, 1701 .prio_changed = prio_changed_dl,
1702 .switched_from = switched_from_dl, 1702 .switched_from = switched_from_dl,
1703 .switched_to = switched_to_dl, 1703 .switched_to = switched_to_dl,
1704
1705 .update_curr = update_curr_dl,
1704}; 1706};
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 34baa60f8a7b..ef2b104b254c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -726,6 +726,11 @@ static void update_curr(struct cfs_rq *cfs_rq)
726 account_cfs_rq_runtime(cfs_rq, delta_exec); 726 account_cfs_rq_runtime(cfs_rq, delta_exec);
727} 727}
728 728
729static void update_curr_fair(struct rq *rq)
730{
731 update_curr(cfs_rq_of(&rq->curr->se));
732}
733
729static inline void 734static inline void
730update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 735update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
731{ 736{
@@ -1180,6 +1185,13 @@ static void task_numa_compare(struct task_numa_env *env,
1180 raw_spin_unlock_irq(&dst_rq->lock); 1185 raw_spin_unlock_irq(&dst_rq->lock);
1181 1186
1182 /* 1187 /*
1188 * Because we have preemption enabled we can get migrated around and
1189 * end try selecting ourselves (current == env->p) as a swap candidate.
1190 */
1191 if (cur == env->p)
1192 goto unlock;
1193
1194 /*
1183 * "imp" is the fault differential for the source task between the 1195 * "imp" is the fault differential for the source task between the
1184 * source and destination node. Calculate the total differential for 1196 * source and destination node. Calculate the total differential for
1185 * the source task and potential destination task. The more negative 1197 * the source task and potential destination task. The more negative
@@ -7949,6 +7961,8 @@ const struct sched_class fair_sched_class = {
7949 7961
7950 .get_rr_interval = get_rr_interval_fair, 7962 .get_rr_interval = get_rr_interval_fair,
7951 7963
7964 .update_curr = update_curr_fair,
7965
7952#ifdef CONFIG_FAIR_GROUP_SCHED 7966#ifdef CONFIG_FAIR_GROUP_SCHED
7953 .task_move_group = task_move_group_fair, 7967 .task_move_group = task_move_group_fair,
7954#endif 7968#endif
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 67ad4e7f506a..c65dac8c97cd 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -75,6 +75,10 @@ static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task
75 return 0; 75 return 0;
76} 76}
77 77
78static void update_curr_idle(struct rq *rq)
79{
80}
81
78/* 82/*
79 * Simple, special scheduling class for the per-CPU idle tasks: 83 * Simple, special scheduling class for the per-CPU idle tasks:
80 */ 84 */
@@ -101,4 +105,5 @@ const struct sched_class idle_sched_class = {
101 105
102 .prio_changed = prio_changed_idle, 106 .prio_changed = prio_changed_idle,
103 .switched_to = switched_to_idle, 107 .switched_to = switched_to_idle,
108 .update_curr = update_curr_idle,
104}; 109};
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index d024e6ce30ba..20bca398084a 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2128,6 +2128,8 @@ const struct sched_class rt_sched_class = {
2128 2128
2129 .prio_changed = prio_changed_rt, 2129 .prio_changed = prio_changed_rt,
2130 .switched_to = switched_to_rt, 2130 .switched_to = switched_to_rt,
2131
2132 .update_curr = update_curr_rt,
2131}; 2133};
2132 2134
2133#ifdef CONFIG_SCHED_DEBUG 2135#ifdef CONFIG_SCHED_DEBUG
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 24156c8434d1..2df8ef067cc5 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1135,6 +1135,8 @@ struct sched_class {
1135 unsigned int (*get_rr_interval) (struct rq *rq, 1135 unsigned int (*get_rr_interval) (struct rq *rq,
1136 struct task_struct *task); 1136 struct task_struct *task);
1137 1137
1138 void (*update_curr) (struct rq *rq);
1139
1138#ifdef CONFIG_FAIR_GROUP_SCHED 1140#ifdef CONFIG_FAIR_GROUP_SCHED
1139 void (*task_move_group) (struct task_struct *p, int on_rq); 1141 void (*task_move_group) (struct task_struct *p, int on_rq);
1140#endif 1142#endif
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 67426e529f59..79ffec45a6ac 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -102,6 +102,10 @@ get_rr_interval_stop(struct rq *rq, struct task_struct *task)
102 return 0; 102 return 0;
103} 103}
104 104
105static void update_curr_stop(struct rq *rq)
106{
107}
108
105/* 109/*
106 * Simple, special scheduling class for the per-CPU stop tasks: 110 * Simple, special scheduling class for the per-CPU stop tasks:
107 */ 111 */
@@ -128,4 +132,5 @@ const struct sched_class stop_sched_class = {
128 132
129 .prio_changed = prio_changed_stop, 133 .prio_changed = prio_changed_stop,
130 .switched_to = switched_to_stop, 134 .switched_to = switched_to_stop,
135 .update_curr = update_curr_stop,
131}; 136};
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 492b986195d5..a16b67859e2a 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -553,7 +553,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock,
553 *sample = cputime_to_expires(cputime.utime); 553 *sample = cputime_to_expires(cputime.utime);
554 break; 554 break;
555 case CPUCLOCK_SCHED: 555 case CPUCLOCK_SCHED:
556 *sample = cputime.sum_exec_runtime + task_delta_exec(p); 556 *sample = cputime.sum_exec_runtime;
557 break; 557 break;
558 } 558 }
559 return 0; 559 return 0;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2d75c94ae87d..a56e07c8d15b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -538,16 +538,18 @@ static void rb_wake_up_waiters(struct irq_work *work)
538 * ring_buffer_wait - wait for input to the ring buffer 538 * ring_buffer_wait - wait for input to the ring buffer
539 * @buffer: buffer to wait on 539 * @buffer: buffer to wait on
540 * @cpu: the cpu buffer to wait on 540 * @cpu: the cpu buffer to wait on
541 * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
541 * 542 *
542 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 543 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
543 * as data is added to any of the @buffer's cpu buffers. Otherwise 544 * as data is added to any of the @buffer's cpu buffers. Otherwise
544 * it will wait for data to be added to a specific cpu buffer. 545 * it will wait for data to be added to a specific cpu buffer.
545 */ 546 */
546int ring_buffer_wait(struct ring_buffer *buffer, int cpu) 547int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
547{ 548{
548 struct ring_buffer_per_cpu *cpu_buffer; 549 struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
549 DEFINE_WAIT(wait); 550 DEFINE_WAIT(wait);
550 struct rb_irq_work *work; 551 struct rb_irq_work *work;
552 int ret = 0;
551 553
552 /* 554 /*
553 * Depending on what the caller is waiting for, either any 555 * Depending on what the caller is waiting for, either any
@@ -564,36 +566,61 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
564 } 566 }
565 567
566 568
567 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); 569 while (true) {
570 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
568 571
569 /* 572 /*
570 * The events can happen in critical sections where 573 * The events can happen in critical sections where
571 * checking a work queue can cause deadlocks. 574 * checking a work queue can cause deadlocks.
572 * After adding a task to the queue, this flag is set 575 * After adding a task to the queue, this flag is set
573 * only to notify events to try to wake up the queue 576 * only to notify events to try to wake up the queue
574 * using irq_work. 577 * using irq_work.
575 * 578 *
576 * We don't clear it even if the buffer is no longer 579 * We don't clear it even if the buffer is no longer
577 * empty. The flag only causes the next event to run 580 * empty. The flag only causes the next event to run
578 * irq_work to do the work queue wake up. The worse 581 * irq_work to do the work queue wake up. The worse
579 * that can happen if we race with !trace_empty() is that 582 * that can happen if we race with !trace_empty() is that
580 * an event will cause an irq_work to try to wake up 583 * an event will cause an irq_work to try to wake up
581 * an empty queue. 584 * an empty queue.
582 * 585 *
583 * There's no reason to protect this flag either, as 586 * There's no reason to protect this flag either, as
584 * the work queue and irq_work logic will do the necessary 587 * the work queue and irq_work logic will do the necessary
585 * synchronization for the wake ups. The only thing 588 * synchronization for the wake ups. The only thing
586 * that is necessary is that the wake up happens after 589 * that is necessary is that the wake up happens after
587 * a task has been queued. It's OK for spurious wake ups. 590 * a task has been queued. It's OK for spurious wake ups.
588 */ 591 */
589 work->waiters_pending = true; 592 work->waiters_pending = true;
593
594 if (signal_pending(current)) {
595 ret = -EINTR;
596 break;
597 }
598
599 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
600 break;
601
602 if (cpu != RING_BUFFER_ALL_CPUS &&
603 !ring_buffer_empty_cpu(buffer, cpu)) {
604 unsigned long flags;
605 bool pagebusy;
606
607 if (!full)
608 break;
609
610 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
611 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
612 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
613
614 if (!pagebusy)
615 break;
616 }
590 617
591 if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
592 (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
593 schedule(); 618 schedule();
619 }
594 620
595 finish_wait(&work->waiters, &wait); 621 finish_wait(&work->waiters, &wait);
596 return 0; 622
623 return ret;
597} 624}
598 625
599/** 626/**
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8a528392b1f4..92f4a6cee172 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1076,13 +1076,14 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1076} 1076}
1077#endif /* CONFIG_TRACER_MAX_TRACE */ 1077#endif /* CONFIG_TRACER_MAX_TRACE */
1078 1078
1079static int wait_on_pipe(struct trace_iterator *iter) 1079static int wait_on_pipe(struct trace_iterator *iter, bool full)
1080{ 1080{
1081 /* Iterators are static, they should be filled or empty */ 1081 /* Iterators are static, they should be filled or empty */
1082 if (trace_buffer_iter(iter, iter->cpu_file)) 1082 if (trace_buffer_iter(iter, iter->cpu_file))
1083 return 0; 1083 return 0;
1084 1084
1085 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); 1085 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1086 full);
1086} 1087}
1087 1088
1088#ifdef CONFIG_FTRACE_STARTUP_TEST 1089#ifdef CONFIG_FTRACE_STARTUP_TEST
@@ -4434,15 +4435,12 @@ static int tracing_wait_pipe(struct file *filp)
4434 4435
4435 mutex_unlock(&iter->mutex); 4436 mutex_unlock(&iter->mutex);
4436 4437
4437 ret = wait_on_pipe(iter); 4438 ret = wait_on_pipe(iter, false);
4438 4439
4439 mutex_lock(&iter->mutex); 4440 mutex_lock(&iter->mutex);
4440 4441
4441 if (ret) 4442 if (ret)
4442 return ret; 4443 return ret;
4443
4444 if (signal_pending(current))
4445 return -EINTR;
4446 } 4444 }
4447 4445
4448 return 1; 4446 return 1;
@@ -5372,16 +5370,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
5372 goto out_unlock; 5370 goto out_unlock;
5373 } 5371 }
5374 mutex_unlock(&trace_types_lock); 5372 mutex_unlock(&trace_types_lock);
5375 ret = wait_on_pipe(iter); 5373 ret = wait_on_pipe(iter, false);
5376 mutex_lock(&trace_types_lock); 5374 mutex_lock(&trace_types_lock);
5377 if (ret) { 5375 if (ret) {
5378 size = ret; 5376 size = ret;
5379 goto out_unlock; 5377 goto out_unlock;
5380 } 5378 }
5381 if (signal_pending(current)) {
5382 size = -EINTR;
5383 goto out_unlock;
5384 }
5385 goto again; 5379 goto again;
5386 } 5380 }
5387 size = 0; 5381 size = 0;
@@ -5500,7 +5494,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5500 }; 5494 };
5501 struct buffer_ref *ref; 5495 struct buffer_ref *ref;
5502 int entries, size, i; 5496 int entries, size, i;
5503 ssize_t ret; 5497 ssize_t ret = 0;
5504 5498
5505 mutex_lock(&trace_types_lock); 5499 mutex_lock(&trace_types_lock);
5506 5500
@@ -5538,13 +5532,16 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5538 int r; 5532 int r;
5539 5533
5540 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 5534 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5541 if (!ref) 5535 if (!ref) {
5536 ret = -ENOMEM;
5542 break; 5537 break;
5538 }
5543 5539
5544 ref->ref = 1; 5540 ref->ref = 1;
5545 ref->buffer = iter->trace_buffer->buffer; 5541 ref->buffer = iter->trace_buffer->buffer;
5546 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 5542 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5547 if (!ref->page) { 5543 if (!ref->page) {
5544 ret = -ENOMEM;
5548 kfree(ref); 5545 kfree(ref);
5549 break; 5546 break;
5550 } 5547 }
@@ -5582,19 +5579,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5582 5579
5583 /* did we read anything? */ 5580 /* did we read anything? */
5584 if (!spd.nr_pages) { 5581 if (!spd.nr_pages) {
5582 if (ret)
5583 goto out;
5584
5585 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) { 5585 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5586 ret = -EAGAIN; 5586 ret = -EAGAIN;
5587 goto out; 5587 goto out;
5588 } 5588 }
5589 mutex_unlock(&trace_types_lock); 5589 mutex_unlock(&trace_types_lock);
5590 ret = wait_on_pipe(iter); 5590 ret = wait_on_pipe(iter, true);
5591 mutex_lock(&trace_types_lock); 5591 mutex_lock(&trace_types_lock);
5592 if (ret) 5592 if (ret)
5593 goto out; 5593 goto out;
5594 if (signal_pending(current)) { 5594
5595 ret = -EINTR;
5596 goto out;
5597 }
5598 goto again; 5595 goto again;
5599 } 5596 }
5600 5597