aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_counter.h
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-30 13:07:03 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-06 03:30:37 -0400
commit38ff667b321b00f5e6830e93fb4ab11a653a2920 (patch)
tree37c24148228d978824a014899f4984072da4e077 /include/linux/perf_counter.h
parent925d519ab82b6dd7aca9420d809ee83819c08db2 (diff)
perf_counter: fix update_userpage()
It just occured to me it is possible to have multiple contending updates of the userpage (mmap information vs overflow vs counter). This would break the seqlock logic. It appear the arch code uses this from NMI context, so we cannot possibly serialize its use, therefore separate the data_head update from it and let it return to its original use. The arch code needs to make sure there are no contending callers by disabling the counter before using it -- powerpc appears to do this nicely. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mackerras <paulus@samba.org> Orig-LKML-Reference: <20090330171023.241410660@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r--include/linux/perf_counter.h35
1 files changed, 35 insertions, 0 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 0d833228eee5..8ac18852dcfe 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -160,10 +160,45 @@ struct perf_counter_hw_event {
160struct perf_counter_mmap_page { 160struct perf_counter_mmap_page {
161 __u32 version; /* version number of this structure */ 161 __u32 version; /* version number of this structure */
162 __u32 compat_version; /* lowest version this is compat with */ 162 __u32 compat_version; /* lowest version this is compat with */
163
164 /*
165 * Bits needed to read the hw counters in user-space.
166 *
167 * The index and offset should be read atomically using the seqlock:
168 *
169 * __u32 seq, index;
170 * __s64 offset;
171 *
172 * again:
173 * rmb();
174 * seq = pc->lock;
175 *
176 * if (unlikely(seq & 1)) {
177 * cpu_relax();
178 * goto again;
179 * }
180 *
181 * index = pc->index;
182 * offset = pc->offset;
183 *
184 * rmb();
185 * if (pc->lock != seq)
186 * goto again;
187 *
188 * After this, index contains architecture specific counter index + 1,
189 * so that 0 means unavailable, offset contains the value to be added
190 * to the result of the raw timer read to obtain this counter's value.
191 */
163 __u32 lock; /* seqlock for synchronization */ 192 __u32 lock; /* seqlock for synchronization */
164 __u32 index; /* hardware counter identifier */ 193 __u32 index; /* hardware counter identifier */
165 __s64 offset; /* add to hardware counter value */ 194 __s64 offset; /* add to hardware counter value */
166 195
196 /*
197 * Control data for the mmap() data buffer.
198 *
199 * User-space reading this value should issue an rmb(), on SMP capable
200 * platforms, after reading this value -- see perf_counter_wakeup().
201 */
167 __u32 data_head; /* head in the data section */ 202 __u32 data_head; /* head in the data section */
168}; 203};
169 204