diff options
author | Peter Zijlstra <peterz@infradead.org> | 2009-03-24 08:18:16 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:30:32 -0400 |
commit | c7138f37f905bb7987b1f9f5a8ee73667db39f25 (patch) | |
tree | 8d1c2bf404f1179d35e8b35b0166c7bf0b39840e /kernel/perf_counter.c | |
parent | f66c6b2066b44d4ab8e8ac1ee4cae543738fe2ac (diff) |
perf_counter: fix perf_poll()
Impact: fix kerneltop 100% CPU usage
Only return a poll event when there's actually been one, poll_wait()
doesn't actually wait for the waitq you pass it, it only enqueues
you on it.
Only once all FDs have been iterated and none of thm returned a
poll-event will it schedule().
Also make it return POLL_HUP when there's not mmap() area to read from.
Further, fix a silly bug in the write code.
Reported-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arjan van de Ven <arjan@infradead.org>
Orig-LKML-Reference: <1237897096.24918.181.camel@twins>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 14 |
1 files changed, 12 insertions, 2 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0dfe91094fd1..affe227d56a0 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -1161,7 +1161,16 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |||
1161 | static unsigned int perf_poll(struct file *file, poll_table *wait) | 1161 | static unsigned int perf_poll(struct file *file, poll_table *wait) |
1162 | { | 1162 | { |
1163 | struct perf_counter *counter = file->private_data; | 1163 | struct perf_counter *counter = file->private_data; |
1164 | unsigned int events = POLLIN; | 1164 | struct perf_mmap_data *data; |
1165 | unsigned int events; | ||
1166 | |||
1167 | rcu_read_lock(); | ||
1168 | data = rcu_dereference(counter->data); | ||
1169 | if (data) | ||
1170 | events = atomic_xchg(&data->wakeup, 0); | ||
1171 | else | ||
1172 | events = POLL_HUP; | ||
1173 | rcu_read_unlock(); | ||
1165 | 1174 | ||
1166 | poll_wait(file, &counter->waitq, wait); | 1175 | poll_wait(file, &counter->waitq, wait); |
1167 | 1176 | ||
@@ -1425,7 +1434,7 @@ static int perf_output_write(struct perf_counter *counter, int nmi, | |||
1425 | 1434 | ||
1426 | do { | 1435 | do { |
1427 | offset = head = atomic_read(&data->head); | 1436 | offset = head = atomic_read(&data->head); |
1428 | head += sizeof(u64); | 1437 | head += size; |
1429 | } while (atomic_cmpxchg(&data->head, offset, head) != offset); | 1438 | } while (atomic_cmpxchg(&data->head, offset, head) != offset); |
1430 | 1439 | ||
1431 | wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT); | 1440 | wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT); |
@@ -1446,6 +1455,7 @@ static int perf_output_write(struct perf_counter *counter, int nmi, | |||
1446 | * generate a poll() wakeup for every page boundary crossed | 1455 | * generate a poll() wakeup for every page boundary crossed |
1447 | */ | 1456 | */ |
1448 | if (wakeup) { | 1457 | if (wakeup) { |
1458 | atomic_xchg(&data->wakeup, POLL_IN); | ||
1449 | __perf_counter_update_userpage(counter, data); | 1459 | __perf_counter_update_userpage(counter, data); |
1450 | if (nmi) { | 1460 | if (nmi) { |
1451 | counter->wakeup_pending = 1; | 1461 | counter->wakeup_pending = 1; |