aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-02 10:16:02 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-02 15:45:32 -0400
commit8e3747c13c39246c7e46def7cf495d9d21d4c5f9 (patch)
tree6a57736dd784947dbd40b75674effcb63e6696a1
parente4abb5d4f7ddabc1fc7c392cf0a10d8e5868c9ca (diff)
perf_counter: Change data head from u32 to u64
Since some people worried that 4G might not be a large enough as an mmap data window, extend it to 64 bit for capable platforms. Reported-by: Stephane Eranian <eranian@googlemail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/perf_counter.h7
-rw-r--r--kernel/perf_counter.c15
2 files changed, 12 insertions, 10 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index cef9931793fd..c046f7d97cfa 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -212,7 +212,7 @@ struct perf_counter_mmap_page {
212 * User-space reading this value should issue an rmb(), on SMP capable 212 * User-space reading this value should issue an rmb(), on SMP capable
213 * platforms, after reading this value -- see perf_counter_wakeup(). 213 * platforms, after reading this value -- see perf_counter_wakeup().
214 */ 214 */
215 __u32 data_head; /* head in the data section */ 215 __u64 data_head; /* head in the data section */
216}; 216};
217 217
218#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) 218#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
@@ -397,10 +397,11 @@ struct perf_mmap_data {
397 int nr_locked; /* nr pages mlocked */ 397 int nr_locked; /* nr pages mlocked */
398 398
399 atomic_t poll; /* POLL_ for wakeups */ 399 atomic_t poll; /* POLL_ for wakeups */
400 atomic_t head; /* write position */
401 atomic_t events; /* event limit */ 400 atomic_t events; /* event limit */
402 401
403 atomic_t done_head; /* completed head */ 402 atomic_long_t head; /* write position */
403 atomic_long_t done_head; /* completed head */
404
404 atomic_t lock; /* concurrent writes */ 405 atomic_t lock; /* concurrent writes */
405 406
406 atomic_t wakeup; /* needs a wakeup */ 407 atomic_t wakeup; /* needs a wakeup */
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 5ecd9981c035..3f11a2bc6c79 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2067,8 +2067,8 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2067struct perf_output_handle { 2067struct perf_output_handle {
2068 struct perf_counter *counter; 2068 struct perf_counter *counter;
2069 struct perf_mmap_data *data; 2069 struct perf_mmap_data *data;
2070 unsigned int offset; 2070 unsigned long head;
2071 unsigned int head; 2071 unsigned long offset;
2072 int nmi; 2072 int nmi;
2073 int overflow; 2073 int overflow;
2074 int locked; 2074 int locked;
@@ -2122,7 +2122,8 @@ static void perf_output_lock(struct perf_output_handle *handle)
2122static void perf_output_unlock(struct perf_output_handle *handle) 2122static void perf_output_unlock(struct perf_output_handle *handle)
2123{ 2123{
2124 struct perf_mmap_data *data = handle->data; 2124 struct perf_mmap_data *data = handle->data;
2125 int head, cpu; 2125 unsigned long head;
2126 int cpu;
2126 2127
2127 data->done_head = data->head; 2128 data->done_head = data->head;
2128 2129
@@ -2135,7 +2136,7 @@ again:
2135 * before we publish the new head, matched by a rmb() in userspace when 2136 * before we publish the new head, matched by a rmb() in userspace when
2136 * reading this position. 2137 * reading this position.
2137 */ 2138 */
2138 while ((head = atomic_xchg(&data->done_head, 0))) 2139 while ((head = atomic_long_xchg(&data->done_head, 0)))
2139 data->user_page->data_head = head; 2140 data->user_page->data_head = head;
2140 2141
2141 /* 2142 /*
@@ -2148,7 +2149,7 @@ again:
2148 /* 2149 /*
2149 * Therefore we have to validate we did not indeed do so. 2150 * Therefore we have to validate we did not indeed do so.
2150 */ 2151 */
2151 if (unlikely(atomic_read(&data->done_head))) { 2152 if (unlikely(atomic_long_read(&data->done_head))) {
2152 /* 2153 /*
2153 * Since we had it locked, we can lock it again. 2154 * Since we had it locked, we can lock it again.
2154 */ 2155 */
@@ -2195,7 +2196,7 @@ static int perf_output_begin(struct perf_output_handle *handle,
2195 do { 2196 do {
2196 offset = head = atomic_read(&data->head); 2197 offset = head = atomic_read(&data->head);
2197 head += size; 2198 head += size;
2198 } while (atomic_cmpxchg(&data->head, offset, head) != offset); 2199 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2199 2200
2200 handle->offset = offset; 2201 handle->offset = offset;
2201 handle->head = head; 2202 handle->head = head;
@@ -2246,7 +2247,7 @@ static void perf_output_copy(struct perf_output_handle *handle,
2246 * Check we didn't copy past our reservation window, taking the 2247 * Check we didn't copy past our reservation window, taking the
2247 * possible unsigned int wrap into account. 2248 * possible unsigned int wrap into account.
2248 */ 2249 */
2249 WARN_ON_ONCE(((int)(handle->head - handle->offset)) < 0); 2250 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2250} 2251}
2251 2252
2252#define perf_output_put(handle, x) \ 2253#define perf_output_put(handle, x) \