aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-06-05 10:50:29 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-05 10:50:29 -0400
commit918143e8b7d6153d7a83a3f854323407939f4a7e (patch)
tree34c0b1434516d146fc3811a6d071b052dd59342b /kernel/trace
parent64edbc562034f2ec3fce382cb208fab40586d005 (diff)
parent563af16c30ede41eda2d614195d88e07f7c7103d (diff)
Merge branch 'tip/tracing/ftrace-4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c104
-rw-r--r--kernel/trace/trace_output.c35
-rw-r--r--kernel/trace/trace_stack.c2
3 files changed, 81 insertions, 60 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 16b24d49604c..7102d7a2fadb 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -370,6 +370,9 @@ static inline int test_time_stamp(u64 delta)
370/* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 370/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
371#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 371#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
372 372
373/* Max number of timestamps that can fit on a page */
374#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
375
373int ring_buffer_print_page_header(struct trace_seq *s) 376int ring_buffer_print_page_header(struct trace_seq *s)
374{ 377{
375 struct buffer_data_page field; 378 struct buffer_data_page field;
@@ -1335,6 +1338,38 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1335 return event; 1338 return event;
1336} 1339}
1337 1340
1341static inline int
1342rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1343 struct ring_buffer_event *event)
1344{
1345 unsigned long new_index, old_index;
1346 struct buffer_page *bpage;
1347 unsigned long index;
1348 unsigned long addr;
1349
1350 new_index = rb_event_index(event);
1351 old_index = new_index + rb_event_length(event);
1352 addr = (unsigned long)event;
1353 addr &= PAGE_MASK;
1354
1355 bpage = cpu_buffer->tail_page;
1356
1357 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
1358 /*
1359 * This is on the tail page. It is possible that
1360 * a write could come in and move the tail page
1361 * and write to the next page. That is fine
1362 * because we just shorten what is on this page.
1363 */
1364 index = local_cmpxchg(&bpage->write, old_index, new_index);
1365 if (index == old_index)
1366 return 1;
1367 }
1368
1369 /* could not discard */
1370 return 0;
1371}
1372
1338static int 1373static int
1339rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, 1374rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1340 u64 *ts, u64 *delta) 1375 u64 *ts, u64 *delta)
@@ -1377,17 +1412,24 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1377 event->array[0] = *delta >> TS_SHIFT; 1412 event->array[0] = *delta >> TS_SHIFT;
1378 } else { 1413 } else {
1379 cpu_buffer->commit_page->page->time_stamp = *ts; 1414 cpu_buffer->commit_page->page->time_stamp = *ts;
1380 event->time_delta = 0; 1415 /* try to discard, since we do not need this */
1381 event->array[0] = 0; 1416 if (!rb_try_to_discard(cpu_buffer, event)) {
1417 /* nope, just zero it */
1418 event->time_delta = 0;
1419 event->array[0] = 0;
1420 }
1382 } 1421 }
1383 cpu_buffer->write_stamp = *ts; 1422 cpu_buffer->write_stamp = *ts;
1384 /* let the caller know this was the commit */ 1423 /* let the caller know this was the commit */
1385 ret = 1; 1424 ret = 1;
1386 } else { 1425 } else {
1387 /* Darn, this is just wasted space */ 1426 /* Try to discard the event */
1388 event->time_delta = 0; 1427 if (!rb_try_to_discard(cpu_buffer, event)) {
1389 event->array[0] = 0; 1428 /* Darn, this is just wasted space */
1390 ret = 0; 1429 event->time_delta = 0;
1430 event->array[0] = 0;
1431 ret = 0;
1432 }
1391 } 1433 }
1392 1434
1393 *delta = 0; 1435 *delta = 0;
@@ -1682,10 +1724,6 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
1682 struct ring_buffer_event *event) 1724 struct ring_buffer_event *event)
1683{ 1725{
1684 struct ring_buffer_per_cpu *cpu_buffer; 1726 struct ring_buffer_per_cpu *cpu_buffer;
1685 unsigned long new_index, old_index;
1686 struct buffer_page *bpage;
1687 unsigned long index;
1688 unsigned long addr;
1689 int cpu; 1727 int cpu;
1690 1728
1691 /* The event is discarded regardless */ 1729 /* The event is discarded regardless */
@@ -1701,24 +1739,8 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
1701 cpu = smp_processor_id(); 1739 cpu = smp_processor_id();
1702 cpu_buffer = buffer->buffers[cpu]; 1740 cpu_buffer = buffer->buffers[cpu];
1703 1741
1704 new_index = rb_event_index(event); 1742 if (!rb_try_to_discard(cpu_buffer, event))
1705 old_index = new_index + rb_event_length(event); 1743 goto out;
1706 addr = (unsigned long)event;
1707 addr &= PAGE_MASK;
1708
1709 bpage = cpu_buffer->tail_page;
1710
1711 if (bpage == (void *)addr && rb_page_write(bpage) == old_index) {
1712 /*
1713 * This is on the tail page. It is possible that
1714 * a write could come in and move the tail page
1715 * and write to the next page. That is fine
1716 * because we just shorten what is on this page.
1717 */
1718 index = local_cmpxchg(&bpage->write, old_index, new_index);
1719 if (index == old_index)
1720 goto out;
1721 }
1722 1744
1723 /* 1745 /*
1724 * The commit is still visible by the reader, so we 1746 * The commit is still visible by the reader, so we
@@ -2253,8 +2275,8 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
2253 * Check if we are at the end of the buffer. 2275 * Check if we are at the end of the buffer.
2254 */ 2276 */
2255 if (iter->head >= rb_page_size(iter->head_page)) { 2277 if (iter->head >= rb_page_size(iter->head_page)) {
2256 if (RB_WARN_ON(buffer, 2278 /* discarded commits can make the page empty */
2257 iter->head_page == cpu_buffer->commit_page)) 2279 if (iter->head_page == cpu_buffer->commit_page)
2258 return; 2280 return;
2259 rb_inc_iter(iter); 2281 rb_inc_iter(iter);
2260 return; 2282 return;
@@ -2297,12 +2319,10 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2297 /* 2319 /*
2298 * We repeat when a timestamp is encountered. It is possible 2320 * We repeat when a timestamp is encountered. It is possible
2299 * to get multiple timestamps from an interrupt entering just 2321 * to get multiple timestamps from an interrupt entering just
2300 * as one timestamp is about to be written. The max times 2322 * as one timestamp is about to be written, or from discarded
2301 * that this can happen is the number of nested interrupts we 2323 * commits. The most that we can have is the number on a single page.
2302 * can have. Nesting 10 deep of interrupts is clearly
2303 * an anomaly.
2304 */ 2324 */
2305 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10)) 2325 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
2306 return NULL; 2326 return NULL;
2307 2327
2308 reader = rb_get_reader_page(cpu_buffer); 2328 reader = rb_get_reader_page(cpu_buffer);
@@ -2368,14 +2388,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2368 2388
2369 again: 2389 again:
2370 /* 2390 /*
2371 * We repeat when a timestamp is encountered. It is possible 2391 * We repeat when a timestamp is encountered.
2372 * to get multiple timestamps from an interrupt entering just 2392 * We can get multiple timestamps by nested interrupts or also
2373 * as one timestamp is about to be written. The max times 2393 * if filtering is on (discarding commits). Since discarding
2374 * that this can happen is the number of nested interrupts we 2394 * commits can be frequent we can get a lot of timestamps.
2375 * can have. Nesting 10 deep of interrupts is clearly 2395 * But we limit them by not adding timestamps if they begin
2376 * an anomaly. 2396 * at the start of a page.
2377 */ 2397 */
2378 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10)) 2398 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
2379 return NULL; 2399 return NULL;
2380 2400
2381 if (rb_per_cpu_empty(cpu_buffer)) 2401 if (rb_per_cpu_empty(cpu_buffer))
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 0fe3b223f7ed..425725c1622d 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -223,10 +223,9 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
223{ 223{
224 unsigned long mask; 224 unsigned long mask;
225 const char *str; 225 const char *str;
226 const char *ret = p->buffer + p->len;
226 int i; 227 int i;
227 228
228 trace_seq_init(p);
229
230 for (i = 0; flag_array[i].name && flags; i++) { 229 for (i = 0; flag_array[i].name && flags; i++) {
231 230
232 mask = flag_array[i].mask; 231 mask = flag_array[i].mask;
@@ -249,7 +248,7 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
249 248
250 trace_seq_putc(p, 0); 249 trace_seq_putc(p, 0);
251 250
252 return p->buffer; 251 return ret;
253} 252}
254EXPORT_SYMBOL(ftrace_print_flags_seq); 253EXPORT_SYMBOL(ftrace_print_flags_seq);
255 254
@@ -258,8 +257,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
258 const struct trace_print_flags *symbol_array) 257 const struct trace_print_flags *symbol_array)
259{ 258{
260 int i; 259 int i;
261 260 const char *ret = p->buffer + p->len;
262 trace_seq_init(p);
263 261
264 for (i = 0; symbol_array[i].name; i++) { 262 for (i = 0; symbol_array[i].name; i++) {
265 263
@@ -275,7 +273,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
275 273
276 trace_seq_putc(p, 0); 274 trace_seq_putc(p, 0);
277 275
278 return p->buffer; 276 return ret;
279} 277}
280EXPORT_SYMBOL(ftrace_print_symbols_seq); 278EXPORT_SYMBOL(ftrace_print_symbols_seq);
281 279
@@ -389,17 +387,20 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
389 387
390 if (ip == ULONG_MAX || !ret) 388 if (ip == ULONG_MAX || !ret)
391 break; 389 break;
392 if (i && ret) 390 if (ret)
393 ret = trace_seq_puts(s, " <- "); 391 ret = trace_seq_puts(s, " => ");
394 if (!ip) { 392 if (!ip) {
395 if (ret) 393 if (ret)
396 ret = trace_seq_puts(s, "??"); 394 ret = trace_seq_puts(s, "??");
395 if (ret)
396 ret = trace_seq_puts(s, "\n");
397 continue; 397 continue;
398 } 398 }
399 if (!ret) 399 if (!ret)
400 break; 400 break;
401 if (ret) 401 if (ret)
402 ret = seq_print_user_ip(s, mm, ip, sym_flags); 402 ret = seq_print_user_ip(s, mm, ip, sym_flags);
403 ret = trace_seq_puts(s, "\n");
403 } 404 }
404 405
405 if (mm) 406 if (mm)
@@ -975,16 +976,16 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
975 976
976 trace_assign_type(field, iter->ent); 977 trace_assign_type(field, iter->ent);
977 978
979 if (!trace_seq_puts(s, "<stack trace>\n"))
980 goto partial;
978 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 981 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
979 if (!field->caller[i]) 982 if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
980 break; 983 break;
981 if (i) { 984 if (!trace_seq_puts(s, " => "))
982 if (!trace_seq_puts(s, " <= ")) 985 goto partial;
983 goto partial;
984 986
985 if (!seq_print_ip_sym(s, field->caller[i], flags)) 987 if (!seq_print_ip_sym(s, field->caller[i], flags))
986 goto partial; 988 goto partial;
987 }
988 if (!trace_seq_puts(s, "\n")) 989 if (!trace_seq_puts(s, "\n"))
989 goto partial; 990 goto partial;
990 } 991 }
@@ -1012,10 +1013,10 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1012 1013
1013 trace_assign_type(field, iter->ent); 1014 trace_assign_type(field, iter->ent);
1014 1015
1015 if (!seq_print_userip_objs(field, s, flags)) 1016 if (!trace_seq_puts(s, "<user stack trace>\n"))
1016 goto partial; 1017 goto partial;
1017 1018
1018 if (!trace_seq_putc(s, '\n')) 1019 if (!seq_print_userip_objs(field, s, flags))
1019 goto partial; 1020 goto partial;
1020 1021
1021 return TRACE_TYPE_HANDLED; 1022 return TRACE_TYPE_HANDLED;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 1796f00524e1..2d7aebd71dbd 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -265,7 +265,7 @@ static int t_show(struct seq_file *m, void *v)
265 seq_printf(m, " Depth Size Location" 265 seq_printf(m, " Depth Size Location"
266 " (%d entries)\n" 266 " (%d entries)\n"
267 " ----- ---- --------\n", 267 " ----- ---- --------\n",
268 max_stack_trace.nr_entries); 268 max_stack_trace.nr_entries - 1);
269 269
270 if (!stack_tracer_enabled && !max_stack_size) 270 if (!stack_tracer_enabled && !max_stack_size)
271 print_disabled(m); 271 print_disabled(m);