diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-03-25 07:30:27 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:30:35 -0400 |
commit | 00f0ad73ac90e3fba8b4cbe4cf21b2fb9a56cb72 (patch) | |
tree | 0d2eb8e299a2eb2362955820e3339090824dc05a | |
parent | 4c4ba21d2c3659e4c0421533939b58a8fd9f06c9 (diff) |
perf_counter: kerneltop: output event support
Teach kerneltop about the new output ABI.
XXX: anybody fancy integrating the PID/TID data into the output?
Bump the mmap_data pages a little because we bloated the output and
have to be more careful about overruns with structured data.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Orig-LKML-Reference: <20090325113317.192910290@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | Documentation/perf_counter/kerneltop.c | 65 |
1 files changed, 59 insertions, 6 deletions
diff --git a/Documentation/perf_counter/kerneltop.c b/Documentation/perf_counter/kerneltop.c index 3e45bf6591b2..fda1438365dc 100644 --- a/Documentation/perf_counter/kerneltop.c +++ b/Documentation/perf_counter/kerneltop.c | |||
@@ -134,6 +134,11 @@ | |||
134 | #endif | 134 | #endif |
135 | 135 | ||
136 | #define unlikely(x) __builtin_expect(!!(x), 0) | 136 | #define unlikely(x) __builtin_expect(!!(x), 0) |
137 | #define min(x, y) ({ \ | ||
138 | typeof(x) _min1 = (x); \ | ||
139 | typeof(y) _min2 = (y); \ | ||
140 | (void) (&_min1 == &_min2); \ | ||
141 | _min1 < _min2 ? _min1 : _min2; }) | ||
137 | 142 | ||
138 | asmlinkage int sys_perf_counter_open( | 143 | asmlinkage int sys_perf_counter_open( |
139 | struct perf_counter_hw_event *hw_event_uptr __user, | 144 | struct perf_counter_hw_event *hw_event_uptr __user, |
@@ -178,7 +183,7 @@ static int nr_cpus = 0; | |||
178 | static int nmi = 1; | 183 | static int nmi = 1; |
179 | static int group = 0; | 184 | static int group = 0; |
180 | static unsigned int page_size; | 185 | static unsigned int page_size; |
181 | static unsigned int mmap_pages = 4; | 186 | static unsigned int mmap_pages = 16; |
182 | 187 | ||
183 | static char *vmlinux; | 188 | static char *vmlinux; |
184 | 189 | ||
@@ -1147,28 +1152,75 @@ static void mmap_read(struct mmap_data *md) | |||
1147 | unsigned int head = mmap_read_head(md); | 1152 | unsigned int head = mmap_read_head(md); |
1148 | unsigned int old = md->prev; | 1153 | unsigned int old = md->prev; |
1149 | unsigned char *data = md->base + page_size; | 1154 | unsigned char *data = md->base + page_size; |
1155 | int diff; | ||
1150 | 1156 | ||
1151 | gettimeofday(&this_read, NULL); | 1157 | gettimeofday(&this_read, NULL); |
1152 | 1158 | ||
1153 | if (head - old > md->mask) { | 1159 | /* |
1160 | * If we're further behind than half the buffer, there's a chance | ||
1161 | * the writer will bite our tail and screw up the events under us. | ||
1162 | * | ||
1163 | * If we somehow ended up ahead of the head, we got messed up. | ||
1164 | * | ||
1165 | * In either case, truncate and restart at head. | ||
1166 | */ | ||
1167 | diff = head - old; | ||
1168 | if (diff > md->mask / 2 || diff < 0) { | ||
1154 | struct timeval iv; | 1169 | struct timeval iv; |
1155 | unsigned long msecs; | 1170 | unsigned long msecs; |
1156 | 1171 | ||
1157 | timersub(&this_read, &last_read, &iv); | 1172 | timersub(&this_read, &last_read, &iv); |
1158 | msecs = iv.tv_sec*1000 + iv.tv_usec/1000; | 1173 | msecs = iv.tv_sec*1000 + iv.tv_usec/1000; |
1159 | 1174 | ||
1160 | fprintf(stderr, "WARNING: failed to keep up with mmap data. Last read %lu msecs ago.\n", msecs); | 1175 | fprintf(stderr, "WARNING: failed to keep up with mmap data." |
1176 | " Last read %lu msecs ago.\n", msecs); | ||
1161 | 1177 | ||
1178 | /* | ||
1179 | * head points to a known good entry, start there. | ||
1180 | */ | ||
1162 | old = head; | 1181 | old = head; |
1163 | } | 1182 | } |
1164 | 1183 | ||
1165 | last_read = this_read; | 1184 | last_read = this_read; |
1166 | 1185 | ||
1167 | for (; old != head;) { | 1186 | for (; old != head;) { |
1168 | __u64 *ptr = (__u64 *)&data[old & md->mask]; | 1187 | struct event_struct { |
1169 | old += sizeof(__u64); | 1188 | struct perf_event_header header; |
1189 | __u64 ip; | ||
1190 | __u32 pid, tid; | ||
1191 | } *event = (struct event_struct *)&data[old & md->mask]; | ||
1192 | struct event_struct event_copy; | ||
1193 | |||
1194 | unsigned int size = event->header.size; | ||
1195 | |||
1196 | /* | ||
1197 | * Event straddles the mmap boundary -- header should always | ||
1198 | * be inside due to u64 alignment of output. | ||
1199 | */ | ||
1200 | if ((old & md->mask) + size != ((old + size) & md->mask)) { | ||
1201 | unsigned int offset = old; | ||
1202 | unsigned int len = sizeof(*event), cpy; | ||
1203 | void *dst = &event_copy; | ||
1204 | |||
1205 | do { | ||
1206 | cpy = min(md->mask + 1 - (offset & md->mask), len); | ||
1207 | memcpy(dst, &data[offset & md->mask], cpy); | ||
1208 | offset += cpy; | ||
1209 | dst += cpy; | ||
1210 | len -= cpy; | ||
1211 | } while (len); | ||
1212 | |||
1213 | event = &event_copy; | ||
1214 | } | ||
1170 | 1215 | ||
1171 | process_event(*ptr, md->counter); | 1216 | old += size; |
1217 | |||
1218 | switch (event->header.type) { | ||
1219 | case PERF_EVENT_IP: | ||
1220 | case PERF_EVENT_IP | __PERF_EVENT_TID: | ||
1221 | process_event(event->ip, md->counter); | ||
1222 | break; | ||
1223 | } | ||
1172 | } | 1224 | } |
1173 | 1225 | ||
1174 | md->prev = old; | 1226 | md->prev = old; |
@@ -1214,6 +1266,7 @@ int main(int argc, char *argv[]) | |||
1214 | hw_event.irq_period = event_count[counter]; | 1266 | hw_event.irq_period = event_count[counter]; |
1215 | hw_event.record_type = PERF_RECORD_IRQ; | 1267 | hw_event.record_type = PERF_RECORD_IRQ; |
1216 | hw_event.nmi = nmi; | 1268 | hw_event.nmi = nmi; |
1269 | hw_event.include_tid = 1; | ||
1217 | 1270 | ||
1218 | fd[i][counter] = sys_perf_counter_open(&hw_event, tid, cpu, group_fd, 0); | 1271 | fd[i][counter] = sys_perf_counter_open(&hw_event, tid, cpu, group_fd, 0); |
1219 | if (fd[i][counter] < 0) { | 1272 | if (fd[i][counter] < 0) { |