diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-09-29 23:02:42 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-14 04:38:59 -0400 |
commit | 777e208d40d0953efc6fb4ab58590da3f7d8f02d (patch) | |
tree | 1e5940ccafd26c958b358f7ce85926659f12c37d /kernel/trace | |
parent | 3928a8a2d98081d1bc3c0a84a2d70e29b90ecf1c (diff) |
ftrace: take advantage of variable length entries
Now that the underlining ring buffer for ftrace now hold variable length
entries, we can take advantage of this by only storing the size of the
actual event into the buffer. This happens to increase the number of
entries in the buffer dramatically.
We can also get rid of the "trace_cont" operation, but I'm keeping that
until we have no more users. Some of the ftrace tracers can now change
their code to adapt to this new feature.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.c | 439 | ||||
-rw-r--r-- | kernel/trace/trace.h | 81 | ||||
-rw-r--r-- | kernel/trace/trace_boot.c | 13 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 31 |
4 files changed, 301 insertions, 263 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ef80793858b8..ed9e47c18810 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -637,9 +637,9 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) | |||
637 | 637 | ||
638 | pc = preempt_count(); | 638 | pc = preempt_count(); |
639 | 639 | ||
640 | entry->field.preempt_count = pc & 0xff; | 640 | entry->preempt_count = pc & 0xff; |
641 | entry->field.pid = (tsk) ? tsk->pid : 0; | 641 | entry->pid = (tsk) ? tsk->pid : 0; |
642 | entry->field.flags = | 642 | entry->flags = |
643 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 643 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
644 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | | 644 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | |
645 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | 645 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | |
@@ -651,7 +651,7 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, | |||
651 | unsigned long ip, unsigned long parent_ip, unsigned long flags) | 651 | unsigned long ip, unsigned long parent_ip, unsigned long flags) |
652 | { | 652 | { |
653 | struct ring_buffer_event *event; | 653 | struct ring_buffer_event *event; |
654 | struct trace_entry *entry; | 654 | struct ftrace_entry *entry; |
655 | unsigned long irq_flags; | 655 | unsigned long irq_flags; |
656 | 656 | ||
657 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 657 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
@@ -659,10 +659,10 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, | |||
659 | if (!event) | 659 | if (!event) |
660 | return; | 660 | return; |
661 | entry = ring_buffer_event_data(event); | 661 | entry = ring_buffer_event_data(event); |
662 | tracing_generic_entry_update(entry, flags); | 662 | tracing_generic_entry_update(&entry->ent, flags); |
663 | entry->type = TRACE_FN; | 663 | entry->ent.type = TRACE_FN; |
664 | entry->field.fn.ip = ip; | 664 | entry->ip = ip; |
665 | entry->field.fn.parent_ip = parent_ip; | 665 | entry->parent_ip = parent_ip; |
666 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 666 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
667 | } | 667 | } |
668 | 668 | ||
@@ -680,7 +680,7 @@ void __trace_stack(struct trace_array *tr, | |||
680 | int skip) | 680 | int skip) |
681 | { | 681 | { |
682 | struct ring_buffer_event *event; | 682 | struct ring_buffer_event *event; |
683 | struct trace_entry *entry; | 683 | struct stack_entry *entry; |
684 | struct stack_trace trace; | 684 | struct stack_trace trace; |
685 | unsigned long irq_flags; | 685 | unsigned long irq_flags; |
686 | 686 | ||
@@ -692,15 +692,15 @@ void __trace_stack(struct trace_array *tr, | |||
692 | if (!event) | 692 | if (!event) |
693 | return; | 693 | return; |
694 | entry = ring_buffer_event_data(event); | 694 | entry = ring_buffer_event_data(event); |
695 | tracing_generic_entry_update(entry, flags); | 695 | tracing_generic_entry_update(&entry->ent, flags); |
696 | entry->type = TRACE_STACK; | 696 | entry->ent.type = TRACE_STACK; |
697 | 697 | ||
698 | memset(&entry->field.stack, 0, sizeof(entry->field.stack)); | 698 | memset(&entry->caller, 0, sizeof(entry->caller)); |
699 | 699 | ||
700 | trace.nr_entries = 0; | 700 | trace.nr_entries = 0; |
701 | trace.max_entries = FTRACE_STACK_ENTRIES; | 701 | trace.max_entries = FTRACE_STACK_ENTRIES; |
702 | trace.skip = skip; | 702 | trace.skip = skip; |
703 | trace.entries = entry->field.stack.caller; | 703 | trace.entries = entry->caller; |
704 | 704 | ||
705 | save_stack_trace(&trace); | 705 | save_stack_trace(&trace); |
706 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 706 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
@@ -713,7 +713,7 @@ __trace_special(void *__tr, void *__data, | |||
713 | struct ring_buffer_event *event; | 713 | struct ring_buffer_event *event; |
714 | struct trace_array_cpu *data = __data; | 714 | struct trace_array_cpu *data = __data; |
715 | struct trace_array *tr = __tr; | 715 | struct trace_array *tr = __tr; |
716 | struct trace_entry *entry; | 716 | struct special_entry *entry; |
717 | unsigned long irq_flags; | 717 | unsigned long irq_flags; |
718 | 718 | ||
719 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 719 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
@@ -721,11 +721,11 @@ __trace_special(void *__tr, void *__data, | |||
721 | if (!event) | 721 | if (!event) |
722 | return; | 722 | return; |
723 | entry = ring_buffer_event_data(event); | 723 | entry = ring_buffer_event_data(event); |
724 | tracing_generic_entry_update(entry, 0); | 724 | tracing_generic_entry_update(&entry->ent, 0); |
725 | entry->type = TRACE_SPECIAL; | 725 | entry->ent.type = TRACE_SPECIAL; |
726 | entry->field.special.arg1 = arg1; | 726 | entry->arg1 = arg1; |
727 | entry->field.special.arg2 = arg2; | 727 | entry->arg2 = arg2; |
728 | entry->field.special.arg3 = arg3; | 728 | entry->arg3 = arg3; |
729 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 729 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
730 | __trace_stack(tr, data, irq_flags, 4); | 730 | __trace_stack(tr, data, irq_flags, 4); |
731 | 731 | ||
@@ -740,7 +740,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
740 | unsigned long flags) | 740 | unsigned long flags) |
741 | { | 741 | { |
742 | struct ring_buffer_event *event; | 742 | struct ring_buffer_event *event; |
743 | struct trace_entry *entry; | 743 | struct ctx_switch_entry *entry; |
744 | unsigned long irq_flags; | 744 | unsigned long irq_flags; |
745 | 745 | ||
746 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 746 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
@@ -748,15 +748,15 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
748 | if (!event) | 748 | if (!event) |
749 | return; | 749 | return; |
750 | entry = ring_buffer_event_data(event); | 750 | entry = ring_buffer_event_data(event); |
751 | tracing_generic_entry_update(entry, flags); | 751 | tracing_generic_entry_update(&entry->ent, flags); |
752 | entry->type = TRACE_CTX; | 752 | entry->ent.type = TRACE_CTX; |
753 | entry->field.ctx.prev_pid = prev->pid; | 753 | entry->prev_pid = prev->pid; |
754 | entry->field.ctx.prev_prio = prev->prio; | 754 | entry->prev_prio = prev->prio; |
755 | entry->field.ctx.prev_state = prev->state; | 755 | entry->prev_state = prev->state; |
756 | entry->field.ctx.next_pid = next->pid; | 756 | entry->next_pid = next->pid; |
757 | entry->field.ctx.next_prio = next->prio; | 757 | entry->next_prio = next->prio; |
758 | entry->field.ctx.next_state = next->state; | 758 | entry->next_state = next->state; |
759 | entry->field.ctx.next_cpu = task_cpu(next); | 759 | entry->next_cpu = task_cpu(next); |
760 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 760 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
761 | __trace_stack(tr, data, flags, 5); | 761 | __trace_stack(tr, data, flags, 5); |
762 | } | 762 | } |
@@ -769,7 +769,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
769 | unsigned long flags) | 769 | unsigned long flags) |
770 | { | 770 | { |
771 | struct ring_buffer_event *event; | 771 | struct ring_buffer_event *event; |
772 | struct trace_entry *entry; | 772 | struct ctx_switch_entry *entry; |
773 | unsigned long irq_flags; | 773 | unsigned long irq_flags; |
774 | 774 | ||
775 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 775 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
@@ -777,15 +777,15 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
777 | if (!event) | 777 | if (!event) |
778 | return; | 778 | return; |
779 | entry = ring_buffer_event_data(event); | 779 | entry = ring_buffer_event_data(event); |
780 | tracing_generic_entry_update(entry, flags); | 780 | tracing_generic_entry_update(&entry->ent, flags); |
781 | entry->type = TRACE_WAKE; | 781 | entry->ent.type = TRACE_WAKE; |
782 | entry->field.ctx.prev_pid = curr->pid; | 782 | entry->prev_pid = curr->pid; |
783 | entry->field.ctx.prev_prio = curr->prio; | 783 | entry->prev_prio = curr->prio; |
784 | entry->field.ctx.prev_state = curr->state; | 784 | entry->prev_state = curr->state; |
785 | entry->field.ctx.next_pid = wakee->pid; | 785 | entry->next_pid = wakee->pid; |
786 | entry->field.ctx.next_prio = wakee->prio; | 786 | entry->next_prio = wakee->prio; |
787 | entry->field.ctx.next_state = wakee->state; | 787 | entry->next_state = wakee->state; |
788 | entry->field.ctx.next_cpu = task_cpu(wakee); | 788 | entry->next_cpu = task_cpu(wakee); |
789 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 789 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
790 | __trace_stack(tr, data, flags, 6); | 790 | __trace_stack(tr, data, flags, 6); |
791 | 791 | ||
@@ -1173,20 +1173,19 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
1173 | static void | 1173 | static void |
1174 | lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) | 1174 | lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) |
1175 | { | 1175 | { |
1176 | struct trace_field *field = &entry->field; | ||
1177 | int hardirq, softirq; | 1176 | int hardirq, softirq; |
1178 | char *comm; | 1177 | char *comm; |
1179 | 1178 | ||
1180 | comm = trace_find_cmdline(field->pid); | 1179 | comm = trace_find_cmdline(entry->pid); |
1181 | 1180 | ||
1182 | trace_seq_printf(s, "%8.8s-%-5d ", comm, field->pid); | 1181 | trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); |
1183 | trace_seq_printf(s, "%3d", cpu); | 1182 | trace_seq_printf(s, "%3d", cpu); |
1184 | trace_seq_printf(s, "%c%c", | 1183 | trace_seq_printf(s, "%c%c", |
1185 | (field->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', | 1184 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', |
1186 | ((field->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); | 1185 | ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); |
1187 | 1186 | ||
1188 | hardirq = field->flags & TRACE_FLAG_HARDIRQ; | 1187 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; |
1189 | softirq = field->flags & TRACE_FLAG_SOFTIRQ; | 1188 | softirq = entry->flags & TRACE_FLAG_SOFTIRQ; |
1190 | if (hardirq && softirq) { | 1189 | if (hardirq && softirq) { |
1191 | trace_seq_putc(s, 'H'); | 1190 | trace_seq_putc(s, 'H'); |
1192 | } else { | 1191 | } else { |
@@ -1200,8 +1199,8 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) | |||
1200 | } | 1199 | } |
1201 | } | 1200 | } |
1202 | 1201 | ||
1203 | if (field->preempt_count) | 1202 | if (entry->preempt_count) |
1204 | trace_seq_printf(s, "%x", field->preempt_count); | 1203 | trace_seq_printf(s, "%x", entry->preempt_count); |
1205 | else | 1204 | else |
1206 | trace_seq_puts(s, "."); | 1205 | trace_seq_puts(s, "."); |
1207 | } | 1206 | } |
@@ -1230,6 +1229,7 @@ static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; | |||
1230 | void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) | 1229 | void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) |
1231 | { | 1230 | { |
1232 | struct trace_entry *ent; | 1231 | struct trace_entry *ent; |
1232 | struct trace_field_cont *cont; | ||
1233 | bool ok = true; | 1233 | bool ok = true; |
1234 | 1234 | ||
1235 | ent = peek_next_entry(iter, iter->cpu, NULL); | 1235 | ent = peek_next_entry(iter, iter->cpu, NULL); |
@@ -1239,8 +1239,9 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) | |||
1239 | } | 1239 | } |
1240 | 1240 | ||
1241 | do { | 1241 | do { |
1242 | cont = (struct trace_field_cont *)ent; | ||
1242 | if (ok) | 1243 | if (ok) |
1243 | ok = (trace_seq_printf(s, "%s", ent->cont.buf) > 0); | 1244 | ok = (trace_seq_printf(s, "%s", cont->buf) > 0); |
1244 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | 1245 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); |
1245 | ent = peek_next_entry(iter, iter->cpu, NULL); | 1246 | ent = peek_next_entry(iter, iter->cpu, NULL); |
1246 | } while (ent && ent->type == TRACE_CONT); | 1247 | } while (ent && ent->type == TRACE_CONT); |
@@ -1257,7 +1258,6 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | |||
1257 | struct trace_entry *next_entry; | 1258 | struct trace_entry *next_entry; |
1258 | unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); | 1259 | unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); |
1259 | struct trace_entry *entry = iter->ent; | 1260 | struct trace_entry *entry = iter->ent; |
1260 | struct trace_field *field = &entry->field; | ||
1261 | unsigned long abs_usecs; | 1261 | unsigned long abs_usecs; |
1262 | unsigned long rel_usecs; | 1262 | unsigned long rel_usecs; |
1263 | u64 next_ts; | 1263 | u64 next_ts; |
@@ -1276,12 +1276,12 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | |||
1276 | abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); | 1276 | abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); |
1277 | 1277 | ||
1278 | if (verbose) { | 1278 | if (verbose) { |
1279 | comm = trace_find_cmdline(field->pid); | 1279 | comm = trace_find_cmdline(entry->pid); |
1280 | trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]" | 1280 | trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]" |
1281 | " %ld.%03ldms (+%ld.%03ldms): ", | 1281 | " %ld.%03ldms (+%ld.%03ldms): ", |
1282 | comm, | 1282 | comm, |
1283 | field->pid, cpu, field->flags, | 1283 | entry->pid, cpu, entry->flags, |
1284 | field->preempt_count, trace_idx, | 1284 | entry->preempt_count, trace_idx, |
1285 | ns2usecs(iter->ts), | 1285 | ns2usecs(iter->ts), |
1286 | abs_usecs/1000, | 1286 | abs_usecs/1000, |
1287 | abs_usecs % 1000, rel_usecs/1000, | 1287 | abs_usecs % 1000, rel_usecs/1000, |
@@ -1291,53 +1291,69 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | |||
1291 | lat_print_timestamp(s, abs_usecs, rel_usecs); | 1291 | lat_print_timestamp(s, abs_usecs, rel_usecs); |
1292 | } | 1292 | } |
1293 | switch (entry->type) { | 1293 | switch (entry->type) { |
1294 | case TRACE_FN: | 1294 | case TRACE_FN: { |
1295 | seq_print_ip_sym(s, field->fn.ip, sym_flags); | 1295 | struct ftrace_entry *field = (struct ftrace_entry *)entry; |
1296 | |||
1297 | seq_print_ip_sym(s, field->ip, sym_flags); | ||
1296 | trace_seq_puts(s, " ("); | 1298 | trace_seq_puts(s, " ("); |
1297 | if (kretprobed(field->fn.parent_ip)) | 1299 | if (kretprobed(field->parent_ip)) |
1298 | trace_seq_puts(s, KRETPROBE_MSG); | 1300 | trace_seq_puts(s, KRETPROBE_MSG); |
1299 | else | 1301 | else |
1300 | seq_print_ip_sym(s, field->fn.parent_ip, sym_flags); | 1302 | seq_print_ip_sym(s, field->parent_ip, sym_flags); |
1301 | trace_seq_puts(s, ")\n"); | 1303 | trace_seq_puts(s, ")\n"); |
1302 | break; | 1304 | break; |
1305 | } | ||
1303 | case TRACE_CTX: | 1306 | case TRACE_CTX: |
1304 | case TRACE_WAKE: | 1307 | case TRACE_WAKE: { |
1305 | T = field->ctx.next_state < sizeof(state_to_char) ? | 1308 | struct ctx_switch_entry *field = |
1306 | state_to_char[field->ctx.next_state] : 'X'; | 1309 | (struct ctx_switch_entry *)entry; |
1310 | |||
1311 | T = field->next_state < sizeof(state_to_char) ? | ||
1312 | state_to_char[field->next_state] : 'X'; | ||
1307 | 1313 | ||
1308 | state = field->ctx.prev_state ? | 1314 | state = field->prev_state ? |
1309 | __ffs(field->ctx.prev_state) + 1 : 0; | 1315 | __ffs(field->prev_state) + 1 : 0; |
1310 | S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X'; | 1316 | S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X'; |
1311 | comm = trace_find_cmdline(field->ctx.next_pid); | 1317 | comm = trace_find_cmdline(field->next_pid); |
1312 | trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", | 1318 | trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", |
1313 | field->ctx.prev_pid, | 1319 | field->prev_pid, |
1314 | field->ctx.prev_prio, | 1320 | field->prev_prio, |
1315 | S, entry->type == TRACE_CTX ? "==>" : " +", | 1321 | S, entry->type == TRACE_CTX ? "==>" : " +", |
1316 | field->ctx.next_cpu, | 1322 | field->next_cpu, |
1317 | field->ctx.next_pid, | 1323 | field->next_pid, |
1318 | field->ctx.next_prio, | 1324 | field->next_prio, |
1319 | T, comm); | 1325 | T, comm); |
1320 | break; | 1326 | break; |
1321 | case TRACE_SPECIAL: | 1327 | } |
1328 | case TRACE_SPECIAL: { | ||
1329 | struct special_entry *field = (struct special_entry *)entry; | ||
1330 | |||
1322 | trace_seq_printf(s, "# %ld %ld %ld\n", | 1331 | trace_seq_printf(s, "# %ld %ld %ld\n", |
1323 | field->special.arg1, | 1332 | field->arg1, |
1324 | field->special.arg2, | 1333 | field->arg2, |
1325 | field->special.arg3); | 1334 | field->arg3); |
1326 | break; | 1335 | break; |
1327 | case TRACE_STACK: | 1336 | } |
1337 | case TRACE_STACK: { | ||
1338 | struct stack_entry *field = (struct stack_entry *)entry; | ||
1339 | |||
1328 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | 1340 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { |
1329 | if (i) | 1341 | if (i) |
1330 | trace_seq_puts(s, " <= "); | 1342 | trace_seq_puts(s, " <= "); |
1331 | seq_print_ip_sym(s, field->stack.caller[i], sym_flags); | 1343 | seq_print_ip_sym(s, field->caller[i], sym_flags); |
1332 | } | 1344 | } |
1333 | trace_seq_puts(s, "\n"); | 1345 | trace_seq_puts(s, "\n"); |
1334 | break; | 1346 | break; |
1335 | case TRACE_PRINT: | 1347 | } |
1336 | seq_print_ip_sym(s, field->print.ip, sym_flags); | 1348 | case TRACE_PRINT: { |
1337 | trace_seq_printf(s, ": %s", field->print.buf); | 1349 | struct print_entry *field = (struct print_entry *)entry; |
1338 | if (field->flags & TRACE_FLAG_CONT) | 1350 | |
1351 | seq_print_ip_sym(s, field->ip, sym_flags); | ||
1352 | trace_seq_printf(s, ": %s", field->buf); | ||
1353 | if (entry->flags & TRACE_FLAG_CONT) | ||
1339 | trace_seq_print_cont(s, iter); | 1354 | trace_seq_print_cont(s, iter); |
1340 | break; | 1355 | break; |
1356 | } | ||
1341 | default: | 1357 | default: |
1342 | trace_seq_printf(s, "Unknown type %d\n", entry->type); | 1358 | trace_seq_printf(s, "Unknown type %d\n", entry->type); |
1343 | } | 1359 | } |
@@ -1349,7 +1365,6 @@ static int print_trace_fmt(struct trace_iterator *iter) | |||
1349 | struct trace_seq *s = &iter->seq; | 1365 | struct trace_seq *s = &iter->seq; |
1350 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1366 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
1351 | struct trace_entry *entry; | 1367 | struct trace_entry *entry; |
1352 | struct trace_field *field; | ||
1353 | unsigned long usec_rem; | 1368 | unsigned long usec_rem; |
1354 | unsigned long long t; | 1369 | unsigned long long t; |
1355 | unsigned long secs; | 1370 | unsigned long secs; |
@@ -1363,15 +1378,13 @@ static int print_trace_fmt(struct trace_iterator *iter) | |||
1363 | if (entry->type == TRACE_CONT) | 1378 | if (entry->type == TRACE_CONT) |
1364 | return 1; | 1379 | return 1; |
1365 | 1380 | ||
1366 | field = &entry->field; | 1381 | comm = trace_find_cmdline(iter->ent->pid); |
1367 | |||
1368 | comm = trace_find_cmdline(iter->ent->field.pid); | ||
1369 | 1382 | ||
1370 | t = ns2usecs(iter->ts); | 1383 | t = ns2usecs(iter->ts); |
1371 | usec_rem = do_div(t, 1000000ULL); | 1384 | usec_rem = do_div(t, 1000000ULL); |
1372 | secs = (unsigned long)t; | 1385 | secs = (unsigned long)t; |
1373 | 1386 | ||
1374 | ret = trace_seq_printf(s, "%16s-%-5d ", comm, field->pid); | 1387 | ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); |
1375 | if (!ret) | 1388 | if (!ret) |
1376 | return 0; | 1389 | return 0; |
1377 | ret = trace_seq_printf(s, "[%03d] ", iter->cpu); | 1390 | ret = trace_seq_printf(s, "[%03d] ", iter->cpu); |
@@ -1382,20 +1395,22 @@ static int print_trace_fmt(struct trace_iterator *iter) | |||
1382 | return 0; | 1395 | return 0; |
1383 | 1396 | ||
1384 | switch (entry->type) { | 1397 | switch (entry->type) { |
1385 | case TRACE_FN: | 1398 | case TRACE_FN: { |
1386 | ret = seq_print_ip_sym(s, field->fn.ip, sym_flags); | 1399 | struct ftrace_entry *field = (struct ftrace_entry *)entry; |
1400 | |||
1401 | ret = seq_print_ip_sym(s, field->ip, sym_flags); | ||
1387 | if (!ret) | 1402 | if (!ret) |
1388 | return 0; | 1403 | return 0; |
1389 | if ((sym_flags & TRACE_ITER_PRINT_PARENT) && | 1404 | if ((sym_flags & TRACE_ITER_PRINT_PARENT) && |
1390 | field->fn.parent_ip) { | 1405 | field->parent_ip) { |
1391 | ret = trace_seq_printf(s, " <-"); | 1406 | ret = trace_seq_printf(s, " <-"); |
1392 | if (!ret) | 1407 | if (!ret) |
1393 | return 0; | 1408 | return 0; |
1394 | if (kretprobed(field->fn.parent_ip)) | 1409 | if (kretprobed(field->parent_ip)) |
1395 | ret = trace_seq_puts(s, KRETPROBE_MSG); | 1410 | ret = trace_seq_puts(s, KRETPROBE_MSG); |
1396 | else | 1411 | else |
1397 | ret = seq_print_ip_sym(s, | 1412 | ret = seq_print_ip_sym(s, |
1398 | field->fn.parent_ip, | 1413 | field->parent_ip, |
1399 | sym_flags); | 1414 | sym_flags); |
1400 | if (!ret) | 1415 | if (!ret) |
1401 | return 0; | 1416 | return 0; |
@@ -1404,40 +1419,50 @@ static int print_trace_fmt(struct trace_iterator *iter) | |||
1404 | if (!ret) | 1419 | if (!ret) |
1405 | return 0; | 1420 | return 0; |
1406 | break; | 1421 | break; |
1422 | } | ||
1407 | case TRACE_CTX: | 1423 | case TRACE_CTX: |
1408 | case TRACE_WAKE: | 1424 | case TRACE_WAKE: { |
1409 | S = field->ctx.prev_state < sizeof(state_to_char) ? | 1425 | struct ctx_switch_entry *field = |
1410 | state_to_char[field->ctx.prev_state] : 'X'; | 1426 | (struct ctx_switch_entry *)entry; |
1411 | T = field->ctx.next_state < sizeof(state_to_char) ? | 1427 | |
1412 | state_to_char[field->ctx.next_state] : 'X'; | 1428 | S = field->prev_state < sizeof(state_to_char) ? |
1429 | state_to_char[field->prev_state] : 'X'; | ||
1430 | T = field->next_state < sizeof(state_to_char) ? | ||
1431 | state_to_char[field->next_state] : 'X'; | ||
1413 | ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", | 1432 | ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", |
1414 | field->ctx.prev_pid, | 1433 | field->prev_pid, |
1415 | field->ctx.prev_prio, | 1434 | field->prev_prio, |
1416 | S, | 1435 | S, |
1417 | entry->type == TRACE_CTX ? "==>" : " +", | 1436 | entry->type == TRACE_CTX ? "==>" : " +", |
1418 | field->ctx.next_cpu, | 1437 | field->next_cpu, |
1419 | field->ctx.next_pid, | 1438 | field->next_pid, |
1420 | field->ctx.next_prio, | 1439 | field->next_prio, |
1421 | T); | 1440 | T); |
1422 | if (!ret) | 1441 | if (!ret) |
1423 | return 0; | 1442 | return 0; |
1424 | break; | 1443 | break; |
1425 | case TRACE_SPECIAL: | 1444 | } |
1445 | case TRACE_SPECIAL: { | ||
1446 | struct special_entry *field = (struct special_entry *)entry; | ||
1447 | |||
1426 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", | 1448 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", |
1427 | field->special.arg1, | 1449 | field->arg1, |
1428 | field->special.arg2, | 1450 | field->arg2, |
1429 | field->special.arg3); | 1451 | field->arg3); |
1430 | if (!ret) | 1452 | if (!ret) |
1431 | return 0; | 1453 | return 0; |
1432 | break; | 1454 | break; |
1433 | case TRACE_STACK: | 1455 | } |
1456 | case TRACE_STACK: { | ||
1457 | struct stack_entry *field = (struct stack_entry *)entry; | ||
1458 | |||
1434 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | 1459 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { |
1435 | if (i) { | 1460 | if (i) { |
1436 | ret = trace_seq_puts(s, " <= "); | 1461 | ret = trace_seq_puts(s, " <= "); |
1437 | if (!ret) | 1462 | if (!ret) |
1438 | return 0; | 1463 | return 0; |
1439 | } | 1464 | } |
1440 | ret = seq_print_ip_sym(s, field->stack.caller[i], | 1465 | ret = seq_print_ip_sym(s, field->caller[i], |
1441 | sym_flags); | 1466 | sym_flags); |
1442 | if (!ret) | 1467 | if (!ret) |
1443 | return 0; | 1468 | return 0; |
@@ -1446,13 +1471,17 @@ static int print_trace_fmt(struct trace_iterator *iter) | |||
1446 | if (!ret) | 1471 | if (!ret) |
1447 | return 0; | 1472 | return 0; |
1448 | break; | 1473 | break; |
1449 | case TRACE_PRINT: | 1474 | } |
1450 | seq_print_ip_sym(s, field->print.ip, sym_flags); | 1475 | case TRACE_PRINT: { |
1451 | trace_seq_printf(s, ": %s", field->print.buf); | 1476 | struct print_entry *field = (struct print_entry *)entry; |
1452 | if (field->flags & TRACE_FLAG_CONT) | 1477 | |
1478 | seq_print_ip_sym(s, field->ip, sym_flags); | ||
1479 | trace_seq_printf(s, ": %s", field->buf); | ||
1480 | if (entry->flags & TRACE_FLAG_CONT) | ||
1453 | trace_seq_print_cont(s, iter); | 1481 | trace_seq_print_cont(s, iter); |
1454 | break; | 1482 | break; |
1455 | } | 1483 | } |
1484 | } | ||
1456 | return 1; | 1485 | return 1; |
1457 | } | 1486 | } |
1458 | 1487 | ||
@@ -1460,7 +1489,6 @@ static int print_raw_fmt(struct trace_iterator *iter) | |||
1460 | { | 1489 | { |
1461 | struct trace_seq *s = &iter->seq; | 1490 | struct trace_seq *s = &iter->seq; |
1462 | struct trace_entry *entry; | 1491 | struct trace_entry *entry; |
1463 | struct trace_field *field; | ||
1464 | int ret; | 1492 | int ret; |
1465 | int S, T; | 1493 | int S, T; |
1466 | 1494 | ||
@@ -1469,56 +1497,66 @@ static int print_raw_fmt(struct trace_iterator *iter) | |||
1469 | if (entry->type == TRACE_CONT) | 1497 | if (entry->type == TRACE_CONT) |
1470 | return 1; | 1498 | return 1; |
1471 | 1499 | ||
1472 | field = &entry->field; | ||
1473 | |||
1474 | ret = trace_seq_printf(s, "%d %d %llu ", | 1500 | ret = trace_seq_printf(s, "%d %d %llu ", |
1475 | field->pid, iter->cpu, iter->ts); | 1501 | entry->pid, iter->cpu, iter->ts); |
1476 | if (!ret) | 1502 | if (!ret) |
1477 | return 0; | 1503 | return 0; |
1478 | 1504 | ||
1479 | switch (entry->type) { | 1505 | switch (entry->type) { |
1480 | case TRACE_FN: | 1506 | case TRACE_FN: { |
1507 | struct ftrace_entry *field = (struct ftrace_entry *)entry; | ||
1508 | |||
1481 | ret = trace_seq_printf(s, "%x %x\n", | 1509 | ret = trace_seq_printf(s, "%x %x\n", |
1482 | field->fn.ip, | 1510 | field->ip, |
1483 | field->fn.parent_ip); | 1511 | field->parent_ip); |
1484 | if (!ret) | 1512 | if (!ret) |
1485 | return 0; | 1513 | return 0; |
1486 | break; | 1514 | break; |
1515 | } | ||
1487 | case TRACE_CTX: | 1516 | case TRACE_CTX: |
1488 | case TRACE_WAKE: | 1517 | case TRACE_WAKE: { |
1489 | S = field->ctx.prev_state < sizeof(state_to_char) ? | 1518 | struct ctx_switch_entry *field = |
1490 | state_to_char[field->ctx.prev_state] : 'X'; | 1519 | (struct ctx_switch_entry *)entry; |
1491 | T = field->ctx.next_state < sizeof(state_to_char) ? | 1520 | |
1492 | state_to_char[field->ctx.next_state] : 'X'; | 1521 | S = field->prev_state < sizeof(state_to_char) ? |
1522 | state_to_char[field->prev_state] : 'X'; | ||
1523 | T = field->next_state < sizeof(state_to_char) ? | ||
1524 | state_to_char[field->next_state] : 'X'; | ||
1493 | if (entry->type == TRACE_WAKE) | 1525 | if (entry->type == TRACE_WAKE) |
1494 | S = '+'; | 1526 | S = '+'; |
1495 | ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", | 1527 | ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", |
1496 | field->ctx.prev_pid, | 1528 | field->prev_pid, |
1497 | field->ctx.prev_prio, | 1529 | field->prev_prio, |
1498 | S, | 1530 | S, |
1499 | field->ctx.next_cpu, | 1531 | field->next_cpu, |
1500 | field->ctx.next_pid, | 1532 | field->next_pid, |
1501 | field->ctx.next_prio, | 1533 | field->next_prio, |
1502 | T); | 1534 | T); |
1503 | if (!ret) | 1535 | if (!ret) |
1504 | return 0; | 1536 | return 0; |
1505 | break; | 1537 | break; |
1538 | } | ||
1506 | case TRACE_SPECIAL: | 1539 | case TRACE_SPECIAL: |
1507 | case TRACE_STACK: | 1540 | case TRACE_STACK: { |
1541 | struct special_entry *field = (struct special_entry *)entry; | ||
1542 | |||
1508 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", | 1543 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", |
1509 | field->special.arg1, | 1544 | field->arg1, |
1510 | field->special.arg2, | 1545 | field->arg2, |
1511 | field->special.arg3); | 1546 | field->arg3); |
1512 | if (!ret) | 1547 | if (!ret) |
1513 | return 0; | 1548 | return 0; |
1514 | break; | 1549 | break; |
1515 | case TRACE_PRINT: | 1550 | } |
1516 | trace_seq_printf(s, "# %lx %s", | 1551 | case TRACE_PRINT: { |
1517 | field->print.ip, field->print.buf); | 1552 | struct print_entry *field = (struct print_entry *)entry; |
1518 | if (field->flags & TRACE_FLAG_CONT) | 1553 | |
1554 | trace_seq_printf(s, "# %lx %s", field->ip, field->buf); | ||
1555 | if (entry->flags & TRACE_FLAG_CONT) | ||
1519 | trace_seq_print_cont(s, iter); | 1556 | trace_seq_print_cont(s, iter); |
1520 | break; | 1557 | break; |
1521 | } | 1558 | } |
1559 | } | ||
1522 | return 1; | 1560 | return 1; |
1523 | } | 1561 | } |
1524 | 1562 | ||
@@ -1539,7 +1577,6 @@ static int print_hex_fmt(struct trace_iterator *iter) | |||
1539 | struct trace_seq *s = &iter->seq; | 1577 | struct trace_seq *s = &iter->seq; |
1540 | unsigned char newline = '\n'; | 1578 | unsigned char newline = '\n'; |
1541 | struct trace_entry *entry; | 1579 | struct trace_entry *entry; |
1542 | struct trace_field *field; | ||
1543 | int S, T; | 1580 | int S, T; |
1544 | 1581 | ||
1545 | entry = iter->ent; | 1582 | entry = iter->ent; |
@@ -1547,40 +1584,48 @@ static int print_hex_fmt(struct trace_iterator *iter) | |||
1547 | if (entry->type == TRACE_CONT) | 1584 | if (entry->type == TRACE_CONT) |
1548 | return 1; | 1585 | return 1; |
1549 | 1586 | ||
1550 | field = &entry->field; | 1587 | SEQ_PUT_HEX_FIELD_RET(s, entry->pid); |
1551 | |||
1552 | SEQ_PUT_HEX_FIELD_RET(s, field->pid); | ||
1553 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); | 1588 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); |
1554 | SEQ_PUT_HEX_FIELD_RET(s, iter->ts); | 1589 | SEQ_PUT_HEX_FIELD_RET(s, iter->ts); |
1555 | 1590 | ||
1556 | switch (entry->type) { | 1591 | switch (entry->type) { |
1557 | case TRACE_FN: | 1592 | case TRACE_FN: { |
1558 | SEQ_PUT_HEX_FIELD_RET(s, field->fn.ip); | 1593 | struct ftrace_entry *field = (struct ftrace_entry *)entry; |
1559 | SEQ_PUT_HEX_FIELD_RET(s, field->fn.parent_ip); | 1594 | |
1595 | SEQ_PUT_HEX_FIELD_RET(s, field->ip); | ||
1596 | SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); | ||
1560 | break; | 1597 | break; |
1598 | } | ||
1561 | case TRACE_CTX: | 1599 | case TRACE_CTX: |
1562 | case TRACE_WAKE: | 1600 | case TRACE_WAKE: { |
1563 | S = field->ctx.prev_state < sizeof(state_to_char) ? | 1601 | struct ctx_switch_entry *field = |
1564 | state_to_char[field->ctx.prev_state] : 'X'; | 1602 | (struct ctx_switch_entry *)entry; |
1565 | T = field->ctx.next_state < sizeof(state_to_char) ? | 1603 | |
1566 | state_to_char[field->ctx.next_state] : 'X'; | 1604 | S = field->prev_state < sizeof(state_to_char) ? |
1605 | state_to_char[field->prev_state] : 'X'; | ||
1606 | T = field->next_state < sizeof(state_to_char) ? | ||
1607 | state_to_char[field->next_state] : 'X'; | ||
1567 | if (entry->type == TRACE_WAKE) | 1608 | if (entry->type == TRACE_WAKE) |
1568 | S = '+'; | 1609 | S = '+'; |
1569 | SEQ_PUT_HEX_FIELD_RET(s, field->ctx.prev_pid); | 1610 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); |
1570 | SEQ_PUT_HEX_FIELD_RET(s, field->ctx.prev_prio); | 1611 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); |
1571 | SEQ_PUT_HEX_FIELD_RET(s, S); | 1612 | SEQ_PUT_HEX_FIELD_RET(s, S); |
1572 | SEQ_PUT_HEX_FIELD_RET(s, field->ctx.next_cpu); | 1613 | SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); |
1573 | SEQ_PUT_HEX_FIELD_RET(s, field->ctx.next_pid); | 1614 | SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); |
1574 | SEQ_PUT_HEX_FIELD_RET(s, field->ctx.next_prio); | 1615 | SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); |
1575 | SEQ_PUT_HEX_FIELD_RET(s, T); | 1616 | SEQ_PUT_HEX_FIELD_RET(s, T); |
1576 | break; | 1617 | break; |
1618 | } | ||
1577 | case TRACE_SPECIAL: | 1619 | case TRACE_SPECIAL: |
1578 | case TRACE_STACK: | 1620 | case TRACE_STACK: { |
1579 | SEQ_PUT_HEX_FIELD_RET(s, field->special.arg1); | 1621 | struct special_entry *field = (struct special_entry *)entry; |
1580 | SEQ_PUT_HEX_FIELD_RET(s, field->special.arg2); | 1622 | |
1581 | SEQ_PUT_HEX_FIELD_RET(s, field->special.arg3); | 1623 | SEQ_PUT_HEX_FIELD_RET(s, field->arg1); |
1624 | SEQ_PUT_HEX_FIELD_RET(s, field->arg2); | ||
1625 | SEQ_PUT_HEX_FIELD_RET(s, field->arg3); | ||
1582 | break; | 1626 | break; |
1583 | } | 1627 | } |
1628 | } | ||
1584 | SEQ_PUT_FIELD_RET(s, newline); | 1629 | SEQ_PUT_FIELD_RET(s, newline); |
1585 | 1630 | ||
1586 | return 1; | 1631 | return 1; |
@@ -1590,39 +1635,46 @@ static int print_bin_fmt(struct trace_iterator *iter) | |||
1590 | { | 1635 | { |
1591 | struct trace_seq *s = &iter->seq; | 1636 | struct trace_seq *s = &iter->seq; |
1592 | struct trace_entry *entry; | 1637 | struct trace_entry *entry; |
1593 | struct trace_field *field; | ||
1594 | 1638 | ||
1595 | entry = iter->ent; | 1639 | entry = iter->ent; |
1596 | 1640 | ||
1597 | if (entry->type == TRACE_CONT) | 1641 | if (entry->type == TRACE_CONT) |
1598 | return 1; | 1642 | return 1; |
1599 | 1643 | ||
1600 | field = &entry->field; | 1644 | SEQ_PUT_FIELD_RET(s, entry->pid); |
1601 | 1645 | SEQ_PUT_FIELD_RET(s, iter->cpu); | |
1602 | SEQ_PUT_FIELD_RET(s, field->pid); | ||
1603 | SEQ_PUT_FIELD_RET(s, field->cpu); | ||
1604 | SEQ_PUT_FIELD_RET(s, iter->ts); | 1646 | SEQ_PUT_FIELD_RET(s, iter->ts); |
1605 | 1647 | ||
1606 | switch (entry->type) { | 1648 | switch (entry->type) { |
1607 | case TRACE_FN: | 1649 | case TRACE_FN: { |
1608 | SEQ_PUT_FIELD_RET(s, field->fn.ip); | 1650 | struct ftrace_entry *field = (struct ftrace_entry *)entry; |
1609 | SEQ_PUT_FIELD_RET(s, field->fn.parent_ip); | 1651 | |
1652 | SEQ_PUT_FIELD_RET(s, field->ip); | ||
1653 | SEQ_PUT_FIELD_RET(s, field->parent_ip); | ||
1610 | break; | 1654 | break; |
1611 | case TRACE_CTX: | 1655 | } |
1612 | SEQ_PUT_FIELD_RET(s, field->ctx.prev_pid); | 1656 | case TRACE_CTX: { |
1613 | SEQ_PUT_FIELD_RET(s, field->ctx.prev_prio); | 1657 | struct ctx_switch_entry *field = |
1614 | SEQ_PUT_FIELD_RET(s, field->ctx.prev_state); | 1658 | (struct ctx_switch_entry *)entry; |
1615 | SEQ_PUT_FIELD_RET(s, field->ctx.next_pid); | 1659 | |
1616 | SEQ_PUT_FIELD_RET(s, field->ctx.next_prio); | 1660 | SEQ_PUT_FIELD_RET(s, field->prev_pid); |
1617 | SEQ_PUT_FIELD_RET(s, field->ctx.next_state); | 1661 | SEQ_PUT_FIELD_RET(s, field->prev_prio); |
1662 | SEQ_PUT_FIELD_RET(s, field->prev_state); | ||
1663 | SEQ_PUT_FIELD_RET(s, field->next_pid); | ||
1664 | SEQ_PUT_FIELD_RET(s, field->next_prio); | ||
1665 | SEQ_PUT_FIELD_RET(s, field->next_state); | ||
1618 | break; | 1666 | break; |
1667 | } | ||
1619 | case TRACE_SPECIAL: | 1668 | case TRACE_SPECIAL: |
1620 | case TRACE_STACK: | 1669 | case TRACE_STACK: { |
1621 | SEQ_PUT_FIELD_RET(s, field->special.arg1); | 1670 | struct special_entry *field = (struct special_entry *)entry; |
1622 | SEQ_PUT_FIELD_RET(s, field->special.arg2); | 1671 | |
1623 | SEQ_PUT_FIELD_RET(s, field->special.arg3); | 1672 | SEQ_PUT_FIELD_RET(s, field->arg1); |
1673 | SEQ_PUT_FIELD_RET(s, field->arg2); | ||
1674 | SEQ_PUT_FIELD_RET(s, field->arg3); | ||
1624 | break; | 1675 | break; |
1625 | } | 1676 | } |
1677 | } | ||
1626 | return 1; | 1678 | return 1; |
1627 | } | 1679 | } |
1628 | 1680 | ||
@@ -2818,10 +2870,10 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
2818 | struct ring_buffer_event *event; | 2870 | struct ring_buffer_event *event; |
2819 | struct trace_array *tr = &global_trace; | 2871 | struct trace_array *tr = &global_trace; |
2820 | struct trace_array_cpu *data; | 2872 | struct trace_array_cpu *data; |
2821 | struct trace_entry *entry; | 2873 | struct print_entry *entry; |
2822 | unsigned long flags, irq_flags; | 2874 | unsigned long flags, irq_flags; |
2823 | long disabled; | 2875 | long disabled; |
2824 | int cpu, len = 0, write, written = 0; | 2876 | int cpu, len = 0, size; |
2825 | 2877 | ||
2826 | if (!tr->ctrl || tracing_disabled) | 2878 | if (!tr->ctrl || tracing_disabled) |
2827 | return 0; | 2879 | return 0; |
@@ -2840,40 +2892,19 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
2840 | len = min(len, TRACE_BUF_SIZE-1); | 2892 | len = min(len, TRACE_BUF_SIZE-1); |
2841 | trace_buf[len] = 0; | 2893 | trace_buf[len] = 0; |
2842 | 2894 | ||
2843 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 2895 | size = sizeof(*entry) + len + 1; |
2844 | &irq_flags); | 2896 | event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags); |
2845 | if (!event) | 2897 | if (!event) |
2846 | goto out_unlock; | 2898 | goto out_unlock; |
2847 | entry = ring_buffer_event_data(event); | 2899 | entry = ring_buffer_event_data(event); |
2848 | tracing_generic_entry_update(entry, flags); | 2900 | tracing_generic_entry_update(&entry->ent, flags); |
2849 | entry->type = TRACE_PRINT; | 2901 | entry->ent.type = TRACE_PRINT; |
2850 | entry->field.print.ip = ip; | 2902 | entry->ip = ip; |
2851 | 2903 | ||
2852 | write = min(len, (int)(TRACE_PRINT_BUF_SIZE-1)); | 2904 | memcpy(&entry->buf, trace_buf, len); |
2853 | 2905 | entry->buf[len] = 0; | |
2854 | memcpy(&entry->field.print.buf, trace_buf, write); | ||
2855 | entry->field.print.buf[write] = 0; | ||
2856 | written = write; | ||
2857 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 2906 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
2858 | 2907 | ||
2859 | if (written != len) | ||
2860 | entry->field.flags |= TRACE_FLAG_CONT; | ||
2861 | |||
2862 | while (written != len) { | ||
2863 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
2864 | &irq_flags); | ||
2865 | if (!event) | ||
2866 | goto out_unlock; | ||
2867 | entry = ring_buffer_event_data(event); | ||
2868 | |||
2869 | entry->type = TRACE_CONT; | ||
2870 | write = min(len - written, (int)(TRACE_CONT_BUF_SIZE-1)); | ||
2871 | memcpy(&entry->cont.buf, trace_buf+written, write); | ||
2872 | entry->cont.buf[write] = 0; | ||
2873 | written += write; | ||
2874 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
2875 | } | ||
2876 | |||
2877 | out_unlock: | 2908 | out_unlock: |
2878 | spin_unlock(&trace_buf_lock); | 2909 | spin_unlock(&trace_buf_lock); |
2879 | 2910 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f6965f775b43..e541a6b7e312 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -27,9 +27,24 @@ enum trace_type { | |||
27 | }; | 27 | }; |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * The trace entry - the most basic unit of tracing. This is what | ||
31 | * is printed in the end as a single line in the trace output, such as: | ||
32 | * | ||
33 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter | ||
34 | */ | ||
35 | struct trace_entry { | ||
36 | unsigned char type; | ||
37 | unsigned char cpu; | ||
38 | unsigned char flags; | ||
39 | unsigned char preempt_count; | ||
40 | int pid; | ||
41 | }; | ||
42 | |||
43 | /* | ||
30 | * Function trace entry - function address and parent function addres: | 44 | * Function trace entry - function address and parent function addres: |
31 | */ | 45 | */ |
32 | struct ftrace_entry { | 46 | struct ftrace_entry { |
47 | struct trace_entry ent; | ||
33 | unsigned long ip; | 48 | unsigned long ip; |
34 | unsigned long parent_ip; | 49 | unsigned long parent_ip; |
35 | }; | 50 | }; |
@@ -39,6 +54,7 @@ extern struct tracer boot_tracer; | |||
39 | * Context switch trace entry - which task (and prio) we switched from/to: | 54 | * Context switch trace entry - which task (and prio) we switched from/to: |
40 | */ | 55 | */ |
41 | struct ctx_switch_entry { | 56 | struct ctx_switch_entry { |
57 | struct trace_entry ent; | ||
42 | unsigned int prev_pid; | 58 | unsigned int prev_pid; |
43 | unsigned char prev_prio; | 59 | unsigned char prev_prio; |
44 | unsigned char prev_state; | 60 | unsigned char prev_state; |
@@ -52,6 +68,7 @@ struct ctx_switch_entry { | |||
52 | * Special (free-form) trace entry: | 68 | * Special (free-form) trace entry: |
53 | */ | 69 | */ |
54 | struct special_entry { | 70 | struct special_entry { |
71 | struct trace_entry ent; | ||
55 | unsigned long arg1; | 72 | unsigned long arg1; |
56 | unsigned long arg2; | 73 | unsigned long arg2; |
57 | unsigned long arg3; | 74 | unsigned long arg3; |
@@ -64,6 +81,7 @@ struct special_entry { | |||
64 | #define FTRACE_STACK_ENTRIES 8 | 81 | #define FTRACE_STACK_ENTRIES 8 |
65 | 82 | ||
66 | struct stack_entry { | 83 | struct stack_entry { |
84 | struct trace_entry ent; | ||
67 | unsigned long caller[FTRACE_STACK_ENTRIES]; | 85 | unsigned long caller[FTRACE_STACK_ENTRIES]; |
68 | }; | 86 | }; |
69 | 87 | ||
@@ -71,10 +89,34 @@ struct stack_entry { | |||
71 | * ftrace_printk entry: | 89 | * ftrace_printk entry: |
72 | */ | 90 | */ |
73 | struct print_entry { | 91 | struct print_entry { |
92 | struct trace_entry ent; | ||
74 | unsigned long ip; | 93 | unsigned long ip; |
75 | char buf[]; | 94 | char buf[]; |
76 | }; | 95 | }; |
77 | 96 | ||
97 | #define TRACE_OLD_SIZE 88 | ||
98 | |||
99 | struct trace_field_cont { | ||
100 | unsigned char type; | ||
101 | /* Temporary till we get rid of this completely */ | ||
102 | char buf[TRACE_OLD_SIZE - 1]; | ||
103 | }; | ||
104 | |||
105 | struct trace_mmiotrace_rw { | ||
106 | struct trace_entry ent; | ||
107 | struct mmiotrace_rw rw; | ||
108 | }; | ||
109 | |||
110 | struct trace_mmiotrace_map { | ||
111 | struct trace_entry ent; | ||
112 | struct mmiotrace_map map; | ||
113 | }; | ||
114 | |||
115 | struct trace_boot { | ||
116 | struct trace_entry ent; | ||
117 | struct boot_trace initcall; | ||
118 | }; | ||
119 | |||
78 | /* | 120 | /* |
79 | * trace_flag_type is an enumeration that holds different | 121 | * trace_flag_type is an enumeration that holds different |
80 | * states when a trace occurs. These are: | 122 | * states when a trace occurs. These are: |
@@ -92,46 +134,7 @@ enum trace_flag_type { | |||
92 | TRACE_FLAG_CONT = 0x10, | 134 | TRACE_FLAG_CONT = 0x10, |
93 | }; | 135 | }; |
94 | 136 | ||
95 | /* | ||
96 | * The trace field - the most basic unit of tracing. This is what | ||
97 | * is printed in the end as a single line in the trace output, such as: | ||
98 | * | ||
99 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter | ||
100 | */ | ||
101 | struct trace_field { | ||
102 | char cpu; | ||
103 | char flags; | ||
104 | char preempt_count; | ||
105 | int pid; | ||
106 | union { | ||
107 | struct ftrace_entry fn; | ||
108 | struct ctx_switch_entry ctx; | ||
109 | struct special_entry special; | ||
110 | struct stack_entry stack; | ||
111 | struct print_entry print; | ||
112 | struct mmiotrace_rw mmiorw; | ||
113 | struct mmiotrace_map mmiomap; | ||
114 | struct boot_trace initcall; | ||
115 | }; | ||
116 | }; | ||
117 | |||
118 | struct trace_field_cont { | ||
119 | char buf[sizeof(struct trace_field)]; | ||
120 | }; | ||
121 | |||
122 | struct trace_entry { | ||
123 | char type; | ||
124 | union { | ||
125 | struct trace_field field; | ||
126 | struct trace_field_cont cont; | ||
127 | }; | ||
128 | }; | ||
129 | |||
130 | #define TRACE_ENTRY_SIZE sizeof(struct trace_entry) | ||
131 | #define TRACE_BUF_SIZE 1024 | 137 | #define TRACE_BUF_SIZE 1024 |
132 | #define TRACE_PRINT_BUF_SIZE \ | ||
133 | (sizeof(struct trace_field) - offsetof(struct trace_field, print.buf)) | ||
134 | #define TRACE_CONT_BUF_SIZE sizeof(struct trace_field) | ||
135 | 138 | ||
136 | /* | 139 | /* |
137 | * The CPU trace array - it consists of thousands of trace entries | 140 | * The CPU trace array - it consists of thousands of trace entries |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 3657eec6b87d..fa8cca1be115 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
@@ -49,10 +49,11 @@ static int initcall_print_line(struct trace_iterator *iter) | |||
49 | { | 49 | { |
50 | int ret = 0; | 50 | int ret = 0; |
51 | struct trace_entry *entry = iter->ent; | 51 | struct trace_entry *entry = iter->ent; |
52 | struct boot_trace *it = &entry->field.initcall; | 52 | struct trace_boot *field = (struct trace_boot *)entry; |
53 | struct boot_trace *it = &field->initcall; | ||
53 | struct trace_seq *s = &iter->seq; | 54 | struct trace_seq *s = &iter->seq; |
54 | 55 | ||
55 | if (iter->ent->type == TRACE_BOOT) | 56 | if (entry->type == TRACE_BOOT) |
56 | ret = trace_seq_printf(s, "%pF called from %i " | 57 | ret = trace_seq_printf(s, "%pF called from %i " |
57 | "returned %d after %lld msecs\n", | 58 | "returned %d after %lld msecs\n", |
58 | it->func, it->caller, it->result, | 59 | it->func, it->caller, it->result, |
@@ -75,7 +76,7 @@ struct tracer boot_tracer __read_mostly = | |||
75 | void trace_boot(struct boot_trace *it) | 76 | void trace_boot(struct boot_trace *it) |
76 | { | 77 | { |
77 | struct ring_buffer_event *event; | 78 | struct ring_buffer_event *event; |
78 | struct trace_entry *entry; | 79 | struct trace_boot *entry; |
79 | struct trace_array_cpu *data; | 80 | struct trace_array_cpu *data; |
80 | unsigned long irq_flags; | 81 | unsigned long irq_flags; |
81 | struct trace_array *tr = boot_trace; | 82 | struct trace_array *tr = boot_trace; |
@@ -91,9 +92,9 @@ void trace_boot(struct boot_trace *it) | |||
91 | if (!event) | 92 | if (!event) |
92 | goto out; | 93 | goto out; |
93 | entry = ring_buffer_event_data(event); | 94 | entry = ring_buffer_event_data(event); |
94 | tracing_generic_entry_update(entry, 0); | 95 | tracing_generic_entry_update(&entry->ent, 0); |
95 | entry->type = TRACE_BOOT; | 96 | entry->ent.type = TRACE_BOOT; |
96 | entry->field.initcall = *it; | 97 | entry->initcall = *it; |
97 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 98 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
98 | 99 | ||
99 | trace_wake_up(); | 100 | trace_wake_up(); |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index bdbf09d8413c..3df441ea2749 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -178,14 +178,16 @@ print_out: | |||
178 | static int mmio_print_rw(struct trace_iterator *iter) | 178 | static int mmio_print_rw(struct trace_iterator *iter) |
179 | { | 179 | { |
180 | struct trace_entry *entry = iter->ent; | 180 | struct trace_entry *entry = iter->ent; |
181 | struct mmiotrace_rw *rw = &entry->field.mmiorw; | 181 | struct trace_mmiotrace_rw *field = |
182 | (struct trace_mmiotrace_rw *)entry; | ||
183 | struct mmiotrace_rw *rw = &field->rw; | ||
182 | struct trace_seq *s = &iter->seq; | 184 | struct trace_seq *s = &iter->seq; |
183 | unsigned long long t = ns2usecs(iter->ts); | 185 | unsigned long long t = ns2usecs(iter->ts); |
184 | unsigned long usec_rem = do_div(t, 1000000ULL); | 186 | unsigned long usec_rem = do_div(t, 1000000ULL); |
185 | unsigned secs = (unsigned long)t; | 187 | unsigned secs = (unsigned long)t; |
186 | int ret = 1; | 188 | int ret = 1; |
187 | 189 | ||
188 | switch (entry->field.mmiorw.opcode) { | 190 | switch (rw->opcode) { |
189 | case MMIO_READ: | 191 | case MMIO_READ: |
190 | ret = trace_seq_printf(s, | 192 | ret = trace_seq_printf(s, |
191 | "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", | 193 | "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
@@ -220,14 +222,14 @@ static int mmio_print_rw(struct trace_iterator *iter) | |||
220 | static int mmio_print_map(struct trace_iterator *iter) | 222 | static int mmio_print_map(struct trace_iterator *iter) |
221 | { | 223 | { |
222 | struct trace_entry *entry = iter->ent; | 224 | struct trace_entry *entry = iter->ent; |
223 | struct mmiotrace_map *m = &entry->field.mmiomap; | 225 | struct mmiotrace_map *m = (struct mmiotrace_map *)entry; |
224 | struct trace_seq *s = &iter->seq; | 226 | struct trace_seq *s = &iter->seq; |
225 | unsigned long long t = ns2usecs(iter->ts); | 227 | unsigned long long t = ns2usecs(iter->ts); |
226 | unsigned long usec_rem = do_div(t, 1000000ULL); | 228 | unsigned long usec_rem = do_div(t, 1000000ULL); |
227 | unsigned secs = (unsigned long)t; | 229 | unsigned secs = (unsigned long)t; |
228 | int ret = 1; | 230 | int ret = 1; |
229 | 231 | ||
230 | switch (entry->field.mmiorw.opcode) { | 232 | switch (m->opcode) { |
231 | case MMIO_PROBE: | 233 | case MMIO_PROBE: |
232 | ret = trace_seq_printf(s, | 234 | ret = trace_seq_printf(s, |
233 | "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", | 235 | "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", |
@@ -252,7 +254,8 @@ static int mmio_print_map(struct trace_iterator *iter) | |||
252 | static int mmio_print_mark(struct trace_iterator *iter) | 254 | static int mmio_print_mark(struct trace_iterator *iter) |
253 | { | 255 | { |
254 | struct trace_entry *entry = iter->ent; | 256 | struct trace_entry *entry = iter->ent; |
255 | const char *msg = entry->field.print.buf; | 257 | struct print_entry *print = (struct print_entry *)entry; |
258 | const char *msg = print->buf; | ||
256 | struct trace_seq *s = &iter->seq; | 259 | struct trace_seq *s = &iter->seq; |
257 | unsigned long long t = ns2usecs(iter->ts); | 260 | unsigned long long t = ns2usecs(iter->ts); |
258 | unsigned long usec_rem = do_div(t, 1000000ULL); | 261 | unsigned long usec_rem = do_div(t, 1000000ULL); |
@@ -264,7 +267,7 @@ static int mmio_print_mark(struct trace_iterator *iter) | |||
264 | if (!ret) | 267 | if (!ret) |
265 | return 0; | 268 | return 0; |
266 | 269 | ||
267 | if (entry->field.flags & TRACE_FLAG_CONT) | 270 | if (entry->flags & TRACE_FLAG_CONT) |
268 | trace_seq_print_cont(s, iter); | 271 | trace_seq_print_cont(s, iter); |
269 | 272 | ||
270 | return 1; | 273 | return 1; |
@@ -308,7 +311,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
308 | struct mmiotrace_rw *rw) | 311 | struct mmiotrace_rw *rw) |
309 | { | 312 | { |
310 | struct ring_buffer_event *event; | 313 | struct ring_buffer_event *event; |
311 | struct trace_entry *entry; | 314 | struct trace_mmiotrace_rw *entry; |
312 | unsigned long irq_flags; | 315 | unsigned long irq_flags; |
313 | 316 | ||
314 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 317 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
@@ -316,9 +319,9 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
316 | if (!event) | 319 | if (!event) |
317 | return; | 320 | return; |
318 | entry = ring_buffer_event_data(event); | 321 | entry = ring_buffer_event_data(event); |
319 | tracing_generic_entry_update(entry, 0); | 322 | tracing_generic_entry_update(&entry->ent, 0); |
320 | entry->type = TRACE_MMIO_RW; | 323 | entry->ent.type = TRACE_MMIO_RW; |
321 | entry->field.mmiorw = *rw; | 324 | entry->rw = *rw; |
322 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 325 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
323 | 326 | ||
324 | trace_wake_up(); | 327 | trace_wake_up(); |
@@ -336,7 +339,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
336 | struct mmiotrace_map *map) | 339 | struct mmiotrace_map *map) |
337 | { | 340 | { |
338 | struct ring_buffer_event *event; | 341 | struct ring_buffer_event *event; |
339 | struct trace_entry *entry; | 342 | struct trace_mmiotrace_map *entry; |
340 | unsigned long irq_flags; | 343 | unsigned long irq_flags; |
341 | 344 | ||
342 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 345 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
@@ -344,9 +347,9 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
344 | if (!event) | 347 | if (!event) |
345 | return; | 348 | return; |
346 | entry = ring_buffer_event_data(event); | 349 | entry = ring_buffer_event_data(event); |
347 | tracing_generic_entry_update(entry, 0); | 350 | tracing_generic_entry_update(&entry->ent, 0); |
348 | entry->type = TRACE_MMIO_MAP; | 351 | entry->ent.type = TRACE_MMIO_MAP; |
349 | entry->field.mmiomap = *map; | 352 | entry->map = *map; |
350 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 353 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
351 | 354 | ||
352 | trace_wake_up(); | 355 | trace_wake_up(); |