aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@kernel.org>2014-09-12 07:18:26 -0400
committerIngo Molnar <mingo@kernel.org>2014-09-24 08:48:11 -0400
commitdc633982ff3f4fd74cdc11b5a6ae53d39a0b2451 (patch)
tree756b335d9cdf75ee9e3f4dd4bdfc21e05518f361 /kernel
parent4f7cf3a992cc0c15c97d2e34ea08a1cb7faace39 (diff)
perf: Do not POLLHUP event if it has children
Currently we return POLLHUP in event polling if the monitored process is done, but we didn't consider possible children, that might be still running and producing data. Before returning POLLHUP making sure that: 1) the monitored task has exited and that 2) we don't have any children to monitor Also adding parent wakeup when the child event is gone. Suggested-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Jiri Olsa <jolsa@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: http://lkml.kernel.org/r/1410520708-19275-1-git-send-email-jolsa@kernel.org Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Stephane Eranian <eranian@google.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Stephane Eranian <eranian@google.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 733c61636f0d..15e58d4ea035 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3587,6 +3587,19 @@ static int perf_event_read_one(struct perf_event *event,
3587 return n * sizeof(u64); 3587 return n * sizeof(u64);
3588} 3588}
3589 3589
3590static bool is_event_hup(struct perf_event *event)
3591{
3592 bool no_children;
3593
3594 if (event->state != PERF_EVENT_STATE_EXIT)
3595 return false;
3596
3597 mutex_lock(&event->child_mutex);
3598 no_children = list_empty(&event->child_list);
3599 mutex_unlock(&event->child_mutex);
3600 return no_children;
3601}
3602
3590/* 3603/*
3591 * Read the performance event - simple non blocking version for now 3604 * Read the performance event - simple non blocking version for now
3592 */ 3605 */
@@ -3632,7 +3645,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
3632 3645
3633 poll_wait(file, &event->waitq, wait); 3646 poll_wait(file, &event->waitq, wait);
3634 3647
3635 if (event->state == PERF_EVENT_STATE_EXIT) 3648 if (is_event_hup(event))
3636 return events; 3649 return events;
3637 3650
3638 /* 3651 /*
@@ -7580,6 +7593,12 @@ static void sync_child_event(struct perf_event *child_event,
7580 mutex_unlock(&parent_event->child_mutex); 7593 mutex_unlock(&parent_event->child_mutex);
7581 7594
7582 /* 7595 /*
7596 * Make sure user/parent get notified, that we just
7597 * lost one event.
7598 */
7599 perf_event_wakeup(parent_event);
7600
7601 /*
7583 * Release the parent event, if this was the last 7602 * Release the parent event, if this was the last
7584 * reference to it. 7603 * reference to it.
7585 */ 7604 */