aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2009-02-05 13:14:13 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-05 19:01:41 -0500
commit51a763dd84253bab1d0a1e68e11a7753d1b702ca (patch)
tree2cc2cf0509db480391c585786285267e360c1338 /kernel/trace/trace.c
parent0a9877514c4fed10a70720293b37213dd172ee3e (diff)
tracing: Introduce trace_buffer_{lock_reserve,unlock_commit}
Impact: new API These new functions do what previously was being open coded, reducing the number of details ftrace plugin writers have to worry about. It also standardizes the handling of stacktrace, userstacktrace and other trace options we may introduce in the future. With this patch, for instance, the blk tracer (and some others already in the tree) can use the "userstacktrace" /d/tracing/trace_options facility. $ codiff /tmp/vmlinux.before /tmp/vmlinux.after linux-2.6-tip/kernel/trace/trace.c: trace_vprintk | -5 trace_graph_return | -22 trace_graph_entry | -26 trace_function | -45 __ftrace_trace_stack | -27 ftrace_trace_userstack | -29 tracing_sched_switch_trace | -66 tracing_stop | +1 trace_seq_to_user | -1 ftrace_trace_special | -63 ftrace_special | +1 tracing_sched_wakeup_trace | -70 tracing_reset_online_cpus | -1 13 functions changed, 2 bytes added, 355 bytes removed, diff: -353 linux-2.6-tip/block/blktrace.c: __blk_add_trace | -58 1 function changed, 58 bytes removed, diff: -58 linux-2.6-tip/kernel/trace/trace.c: trace_buffer_lock_reserve | +88 trace_buffer_unlock_commit | +86 2 functions changed, 174 bytes added, diff: +174 /tmp/vmlinux.after: 16 functions changed, 176 bytes added, 413 bytes removed, diff: -237 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Acked-by: Frédéric Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c94
1 files changed, 53 insertions, 41 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index eb453a238a6f..8fad3776e843 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -776,6 +776,39 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
776 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 776 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
777} 777}
778 778
779struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
780 unsigned char type,
781 unsigned long len,
782 unsigned long flags, int pc)
783{
784 struct ring_buffer_event *event;
785
786 event = ring_buffer_lock_reserve(tr->buffer, len);
787 if (event != NULL) {
788 struct trace_entry *ent = ring_buffer_event_data(event);
789
790 tracing_generic_entry_update(ent, flags, pc);
791 ent->type = type;
792 }
793
794 return event;
795}
796static void ftrace_trace_stack(struct trace_array *tr,
797 unsigned long flags, int skip, int pc);
798static void ftrace_trace_userstack(struct trace_array *tr,
799 unsigned long flags, int pc);
800
801void trace_buffer_unlock_commit(struct trace_array *tr,
802 struct ring_buffer_event *event,
803 unsigned long flags, int pc)
804{
805 ring_buffer_unlock_commit(tr->buffer, event);
806
807 ftrace_trace_stack(tr, flags, 6, pc);
808 ftrace_trace_userstack(tr, flags, pc);
809 trace_wake_up();
810}
811
779void 812void
780trace_function(struct trace_array *tr, 813trace_function(struct trace_array *tr,
781 unsigned long ip, unsigned long parent_ip, unsigned long flags, 814 unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@ -788,12 +821,11 @@ trace_function(struct trace_array *tr,
788 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 821 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
789 return; 822 return;
790 823
791 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); 824 event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry),
825 flags, pc);
792 if (!event) 826 if (!event)
793 return; 827 return;
794 entry = ring_buffer_event_data(event); 828 entry = ring_buffer_event_data(event);
795 tracing_generic_entry_update(&entry->ent, flags, pc);
796 entry->ent.type = TRACE_FN;
797 entry->ip = ip; 829 entry->ip = ip;
798 entry->parent_ip = parent_ip; 830 entry->parent_ip = parent_ip;
799 ring_buffer_unlock_commit(tr->buffer, event); 831 ring_buffer_unlock_commit(tr->buffer, event);
@@ -811,12 +843,11 @@ static void __trace_graph_entry(struct trace_array *tr,
811 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 843 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
812 return; 844 return;
813 845
814 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry)); 846 event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
847 sizeof(*entry), flags, pc);
815 if (!event) 848 if (!event)
816 return; 849 return;
817 entry = ring_buffer_event_data(event); 850 entry = ring_buffer_event_data(event);
818 tracing_generic_entry_update(&entry->ent, flags, pc);
819 entry->ent.type = TRACE_GRAPH_ENT;
820 entry->graph_ent = *trace; 851 entry->graph_ent = *trace;
821 ring_buffer_unlock_commit(global_trace.buffer, event); 852 ring_buffer_unlock_commit(global_trace.buffer, event);
822} 853}
@@ -832,12 +863,11 @@ static void __trace_graph_return(struct trace_array *tr,
832 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 863 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
833 return; 864 return;
834 865
835 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry)); 866 event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
867 sizeof(*entry), flags, pc);
836 if (!event) 868 if (!event)
837 return; 869 return;
838 entry = ring_buffer_event_data(event); 870 entry = ring_buffer_event_data(event);
839 tracing_generic_entry_update(&entry->ent, flags, pc);
840 entry->ent.type = TRACE_GRAPH_RET;
841 entry->ret = *trace; 871 entry->ret = *trace;
842 ring_buffer_unlock_commit(global_trace.buffer, event); 872 ring_buffer_unlock_commit(global_trace.buffer, event);
843} 873}
@@ -861,13 +891,11 @@ static void __ftrace_trace_stack(struct trace_array *tr,
861 struct stack_entry *entry; 891 struct stack_entry *entry;
862 struct stack_trace trace; 892 struct stack_trace trace;
863 893
864 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); 894 event = trace_buffer_lock_reserve(tr, TRACE_STACK,
895 sizeof(*entry), flags, pc);
865 if (!event) 896 if (!event)
866 return; 897 return;
867 entry = ring_buffer_event_data(event); 898 entry = ring_buffer_event_data(event);
868 tracing_generic_entry_update(&entry->ent, flags, pc);
869 entry->ent.type = TRACE_STACK;
870
871 memset(&entry->caller, 0, sizeof(entry->caller)); 899 memset(&entry->caller, 0, sizeof(entry->caller));
872 900
873 trace.nr_entries = 0; 901 trace.nr_entries = 0;
@@ -908,12 +936,11 @@ static void ftrace_trace_userstack(struct trace_array *tr,
908 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 936 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
909 return; 937 return;
910 938
911 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); 939 event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK,
940 sizeof(*entry), flags, pc);
912 if (!event) 941 if (!event)
913 return; 942 return;
914 entry = ring_buffer_event_data(event); 943 entry = ring_buffer_event_data(event);
915 tracing_generic_entry_update(&entry->ent, flags, pc);
916 entry->ent.type = TRACE_USER_STACK;
917 944
918 memset(&entry->caller, 0, sizeof(entry->caller)); 945 memset(&entry->caller, 0, sizeof(entry->caller));
919 946
@@ -941,20 +968,15 @@ ftrace_trace_special(void *__tr,
941 struct trace_array *tr = __tr; 968 struct trace_array *tr = __tr;
942 struct special_entry *entry; 969 struct special_entry *entry;
943 970
944 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); 971 event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL,
972 sizeof(*entry), 0, pc);
945 if (!event) 973 if (!event)
946 return; 974 return;
947 entry = ring_buffer_event_data(event); 975 entry = ring_buffer_event_data(event);
948 tracing_generic_entry_update(&entry->ent, 0, pc);
949 entry->ent.type = TRACE_SPECIAL;
950 entry->arg1 = arg1; 976 entry->arg1 = arg1;
951 entry->arg2 = arg2; 977 entry->arg2 = arg2;
952 entry->arg3 = arg3; 978 entry->arg3 = arg3;
953 ring_buffer_unlock_commit(tr->buffer, event); 979 trace_buffer_unlock_commit(tr, event, 0, pc);
954 ftrace_trace_stack(tr, 0, 4, pc);
955 ftrace_trace_userstack(tr, 0, pc);
956
957 trace_wake_up();
958} 980}
959 981
960void 982void
@@ -973,12 +995,11 @@ tracing_sched_switch_trace(struct trace_array *tr,
973 struct ring_buffer_event *event; 995 struct ring_buffer_event *event;
974 struct ctx_switch_entry *entry; 996 struct ctx_switch_entry *entry;
975 997
976 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); 998 event = trace_buffer_lock_reserve(tr, TRACE_CTX,
999 sizeof(*entry), flags, pc);
977 if (!event) 1000 if (!event)
978 return; 1001 return;
979 entry = ring_buffer_event_data(event); 1002 entry = ring_buffer_event_data(event);
980 tracing_generic_entry_update(&entry->ent, flags, pc);
981 entry->ent.type = TRACE_CTX;
982 entry->prev_pid = prev->pid; 1003 entry->prev_pid = prev->pid;
983 entry->prev_prio = prev->prio; 1004 entry->prev_prio = prev->prio;
984 entry->prev_state = prev->state; 1005 entry->prev_state = prev->state;
@@ -986,9 +1007,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
986 entry->next_prio = next->prio; 1007 entry->next_prio = next->prio;
987 entry->next_state = next->state; 1008 entry->next_state = next->state;
988 entry->next_cpu = task_cpu(next); 1009 entry->next_cpu = task_cpu(next);
989 ring_buffer_unlock_commit(tr->buffer, event); 1010 trace_buffer_unlock_commit(tr, event, flags, pc);
990 ftrace_trace_stack(tr, flags, 5, pc);
991 ftrace_trace_userstack(tr, flags, pc);
992} 1011}
993 1012
994void 1013void
@@ -1000,12 +1019,11 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
1000 struct ring_buffer_event *event; 1019 struct ring_buffer_event *event;
1001 struct ctx_switch_entry *entry; 1020 struct ctx_switch_entry *entry;
1002 1021
1003 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); 1022 event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
1023 sizeof(*entry), flags, pc);
1004 if (!event) 1024 if (!event)
1005 return; 1025 return;
1006 entry = ring_buffer_event_data(event); 1026 entry = ring_buffer_event_data(event);
1007 tracing_generic_entry_update(&entry->ent, flags, pc);
1008 entry->ent.type = TRACE_WAKE;
1009 entry->prev_pid = curr->pid; 1027 entry->prev_pid = curr->pid;
1010 entry->prev_prio = curr->prio; 1028 entry->prev_prio = curr->prio;
1011 entry->prev_state = curr->state; 1029 entry->prev_state = curr->state;
@@ -1013,11 +1031,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
1013 entry->next_prio = wakee->prio; 1031 entry->next_prio = wakee->prio;
1014 entry->next_state = wakee->state; 1032 entry->next_state = wakee->state;
1015 entry->next_cpu = task_cpu(wakee); 1033 entry->next_cpu = task_cpu(wakee);
1016 ring_buffer_unlock_commit(tr->buffer, event); 1034 trace_buffer_unlock_commit(tr, event, flags, pc);
1017 ftrace_trace_stack(tr, flags, 6, pc);
1018 ftrace_trace_userstack(tr, flags, pc);
1019
1020 trace_wake_up();
1021} 1035}
1022 1036
1023void 1037void
@@ -2825,12 +2839,10 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
2825 trace_buf[len] = 0; 2839 trace_buf[len] = 0;
2826 2840
2827 size = sizeof(*entry) + len + 1; 2841 size = sizeof(*entry) + len + 1;
2828 event = ring_buffer_lock_reserve(tr->buffer, size); 2842 event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc);
2829 if (!event) 2843 if (!event)
2830 goto out_unlock; 2844 goto out_unlock;
2831 entry = ring_buffer_event_data(event); 2845 entry = ring_buffer_event_data(event);
2832 tracing_generic_entry_update(&entry->ent, irq_flags, pc);
2833 entry->ent.type = TRACE_PRINT;
2834 entry->ip = ip; 2846 entry->ip = ip;
2835 entry->depth = depth; 2847 entry->depth = depth;
2836 2848