aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-30 13:07:03 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-06 03:30:37 -0400
commit38ff667b321b00f5e6830e93fb4ab11a653a2920 (patch)
tree37c24148228d978824a014899f4984072da4e077 /kernel/perf_counter.c
parent925d519ab82b6dd7aca9420d809ee83819c08db2 (diff)
perf_counter: fix update_userpage()
It just occured to me it is possible to have multiple contending updates of the userpage (mmap information vs overflow vs counter). This would break the seqlock logic. It appear the arch code uses this from NMI context, so we cannot possibly serialize its use, therefore separate the data_head update from it and let it return to its original use. The arch code needs to make sure there are no contending callers by disabling the counter before using it -- powerpc appears to do this nicely. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mackerras <paulus@samba.org> Orig-LKML-Reference: <20090330171023.241410660@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c38
1 files changed, 23 insertions, 15 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index f70ff80e79d7..c95e92329b97 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1316,10 +1316,22 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1316 return err; 1316 return err;
1317} 1317}
1318 1318
1319static void __perf_counter_update_userpage(struct perf_counter *counter, 1319/*
1320 struct perf_mmap_data *data) 1320 * Callers need to ensure there can be no nesting of this function, otherwise
1321 * the seqlock logic goes bad. We can not serialize this because the arch
1322 * code calls this from NMI context.
1323 */
1324void perf_counter_update_userpage(struct perf_counter *counter)
1321{ 1325{
1322 struct perf_counter_mmap_page *userpg = data->user_page; 1326 struct perf_mmap_data *data;
1327 struct perf_counter_mmap_page *userpg;
1328
1329 rcu_read_lock();
1330 data = rcu_dereference(counter->data);
1331 if (!data)
1332 goto unlock;
1333
1334 userpg = data->user_page;
1323 1335
1324 /* 1336 /*
1325 * Disable preemption so as to not let the corresponding user-space 1337 * Disable preemption so as to not let the corresponding user-space
@@ -1333,20 +1345,10 @@ static void __perf_counter_update_userpage(struct perf_counter *counter,
1333 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 1345 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1334 userpg->offset -= atomic64_read(&counter->hw.prev_count); 1346 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1335 1347
1336 userpg->data_head = atomic_read(&data->head);
1337 smp_wmb(); 1348 smp_wmb();
1338 ++userpg->lock; 1349 ++userpg->lock;
1339 preempt_enable(); 1350 preempt_enable();
1340} 1351unlock:
1341
1342void perf_counter_update_userpage(struct perf_counter *counter)
1343{
1344 struct perf_mmap_data *data;
1345
1346 rcu_read_lock();
1347 data = rcu_dereference(counter->data);
1348 if (data)
1349 __perf_counter_update_userpage(counter, data);
1350 rcu_read_unlock(); 1352 rcu_read_unlock();
1351} 1353}
1352 1354
@@ -1547,7 +1549,13 @@ void perf_counter_wakeup(struct perf_counter *counter)
1547 data = rcu_dereference(counter->data); 1549 data = rcu_dereference(counter->data);
1548 if (data) { 1550 if (data) {
1549 (void)atomic_xchg(&data->wakeup, POLL_IN); 1551 (void)atomic_xchg(&data->wakeup, POLL_IN);
1550 __perf_counter_update_userpage(counter, data); 1552 /*
1553 * Ensure all data writes are issued before updating the
1554 * user-space data head information. The matching rmb()
1555 * will be in userspace after reading this value.
1556 */
1557 smp_wmb();
1558 data->user_page->data_head = atomic_read(&data->head);
1551 } 1559 }
1552 rcu_read_unlock(); 1560 rcu_read_unlock();
1553 1561