diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2009-02-05 13:12:56 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-05 19:01:40 -0500 |
commit | 0a9877514c4fed10a70720293b37213dd172ee3e (patch) | |
tree | 7f21548126d448d96116f57f2c7927fa2b2c7ddd /kernel/trace/trace.c | |
parent | dac74940289f350c2590bec92737833bad608541 (diff) |
ring_buffer: remove unused flags parameter
Impact: API change, cleanup
>From ring_buffer_{lock_reserve,unlock_commit}.
$ codiff /tmp/vmlinux.before /tmp/vmlinux.after
linux-2.6-tip/kernel/trace/trace.c:
trace_vprintk | -14
trace_graph_return | -14
trace_graph_entry | -10
trace_function | -8
__ftrace_trace_stack | -8
ftrace_trace_userstack | -8
tracing_sched_switch_trace | -8
ftrace_trace_special | -12
tracing_sched_wakeup_trace | -8
9 functions changed, 90 bytes removed, diff: -90
linux-2.6-tip/block/blktrace.c:
__blk_add_trace | -1
1 function changed, 1 bytes removed, diff: -1
/tmp/vmlinux.after:
10 functions changed, 91 bytes removed, diff: -91
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Frédéric Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 56 |
1 files changed, 20 insertions, 36 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 3536ef41575d..eb453a238a6f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -783,14 +783,12 @@ trace_function(struct trace_array *tr, | |||
783 | { | 783 | { |
784 | struct ring_buffer_event *event; | 784 | struct ring_buffer_event *event; |
785 | struct ftrace_entry *entry; | 785 | struct ftrace_entry *entry; |
786 | unsigned long irq_flags; | ||
787 | 786 | ||
788 | /* If we are reading the ring buffer, don't trace */ | 787 | /* If we are reading the ring buffer, don't trace */ |
789 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 788 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
790 | return; | 789 | return; |
791 | 790 | ||
792 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 791 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
793 | &irq_flags); | ||
794 | if (!event) | 792 | if (!event) |
795 | return; | 793 | return; |
796 | entry = ring_buffer_event_data(event); | 794 | entry = ring_buffer_event_data(event); |
@@ -798,7 +796,7 @@ trace_function(struct trace_array *tr, | |||
798 | entry->ent.type = TRACE_FN; | 796 | entry->ent.type = TRACE_FN; |
799 | entry->ip = ip; | 797 | entry->ip = ip; |
800 | entry->parent_ip = parent_ip; | 798 | entry->parent_ip = parent_ip; |
801 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 799 | ring_buffer_unlock_commit(tr->buffer, event); |
802 | } | 800 | } |
803 | 801 | ||
804 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 802 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
@@ -809,20 +807,18 @@ static void __trace_graph_entry(struct trace_array *tr, | |||
809 | { | 807 | { |
810 | struct ring_buffer_event *event; | 808 | struct ring_buffer_event *event; |
811 | struct ftrace_graph_ent_entry *entry; | 809 | struct ftrace_graph_ent_entry *entry; |
812 | unsigned long irq_flags; | ||
813 | 810 | ||
814 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 811 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
815 | return; | 812 | return; |
816 | 813 | ||
817 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | 814 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry)); |
818 | &irq_flags); | ||
819 | if (!event) | 815 | if (!event) |
820 | return; | 816 | return; |
821 | entry = ring_buffer_event_data(event); | 817 | entry = ring_buffer_event_data(event); |
822 | tracing_generic_entry_update(&entry->ent, flags, pc); | 818 | tracing_generic_entry_update(&entry->ent, flags, pc); |
823 | entry->ent.type = TRACE_GRAPH_ENT; | 819 | entry->ent.type = TRACE_GRAPH_ENT; |
824 | entry->graph_ent = *trace; | 820 | entry->graph_ent = *trace; |
825 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | 821 | ring_buffer_unlock_commit(global_trace.buffer, event); |
826 | } | 822 | } |
827 | 823 | ||
828 | static void __trace_graph_return(struct trace_array *tr, | 824 | static void __trace_graph_return(struct trace_array *tr, |
@@ -832,20 +828,18 @@ static void __trace_graph_return(struct trace_array *tr, | |||
832 | { | 828 | { |
833 | struct ring_buffer_event *event; | 829 | struct ring_buffer_event *event; |
834 | struct ftrace_graph_ret_entry *entry; | 830 | struct ftrace_graph_ret_entry *entry; |
835 | unsigned long irq_flags; | ||
836 | 831 | ||
837 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 832 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
838 | return; | 833 | return; |
839 | 834 | ||
840 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | 835 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry)); |
841 | &irq_flags); | ||
842 | if (!event) | 836 | if (!event) |
843 | return; | 837 | return; |
844 | entry = ring_buffer_event_data(event); | 838 | entry = ring_buffer_event_data(event); |
845 | tracing_generic_entry_update(&entry->ent, flags, pc); | 839 | tracing_generic_entry_update(&entry->ent, flags, pc); |
846 | entry->ent.type = TRACE_GRAPH_RET; | 840 | entry->ent.type = TRACE_GRAPH_RET; |
847 | entry->ret = *trace; | 841 | entry->ret = *trace; |
848 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | 842 | ring_buffer_unlock_commit(global_trace.buffer, event); |
849 | } | 843 | } |
850 | #endif | 844 | #endif |
851 | 845 | ||
@@ -866,10 +860,8 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
866 | struct ring_buffer_event *event; | 860 | struct ring_buffer_event *event; |
867 | struct stack_entry *entry; | 861 | struct stack_entry *entry; |
868 | struct stack_trace trace; | 862 | struct stack_trace trace; |
869 | unsigned long irq_flags; | ||
870 | 863 | ||
871 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 864 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
872 | &irq_flags); | ||
873 | if (!event) | 865 | if (!event) |
874 | return; | 866 | return; |
875 | entry = ring_buffer_event_data(event); | 867 | entry = ring_buffer_event_data(event); |
@@ -884,7 +876,7 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
884 | trace.entries = entry->caller; | 876 | trace.entries = entry->caller; |
885 | 877 | ||
886 | save_stack_trace(&trace); | 878 | save_stack_trace(&trace); |
887 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 879 | ring_buffer_unlock_commit(tr->buffer, event); |
888 | #endif | 880 | #endif |
889 | } | 881 | } |
890 | 882 | ||
@@ -912,13 +904,11 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
912 | struct ring_buffer_event *event; | 904 | struct ring_buffer_event *event; |
913 | struct userstack_entry *entry; | 905 | struct userstack_entry *entry; |
914 | struct stack_trace trace; | 906 | struct stack_trace trace; |
915 | unsigned long irq_flags; | ||
916 | 907 | ||
917 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 908 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
918 | return; | 909 | return; |
919 | 910 | ||
920 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 911 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
921 | &irq_flags); | ||
922 | if (!event) | 912 | if (!event) |
923 | return; | 913 | return; |
924 | entry = ring_buffer_event_data(event); | 914 | entry = ring_buffer_event_data(event); |
@@ -933,7 +923,7 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
933 | trace.entries = entry->caller; | 923 | trace.entries = entry->caller; |
934 | 924 | ||
935 | save_stack_trace_user(&trace); | 925 | save_stack_trace_user(&trace); |
936 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 926 | ring_buffer_unlock_commit(tr->buffer, event); |
937 | #endif | 927 | #endif |
938 | } | 928 | } |
939 | 929 | ||
@@ -950,10 +940,8 @@ ftrace_trace_special(void *__tr, | |||
950 | struct ring_buffer_event *event; | 940 | struct ring_buffer_event *event; |
951 | struct trace_array *tr = __tr; | 941 | struct trace_array *tr = __tr; |
952 | struct special_entry *entry; | 942 | struct special_entry *entry; |
953 | unsigned long irq_flags; | ||
954 | 943 | ||
955 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 944 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
956 | &irq_flags); | ||
957 | if (!event) | 945 | if (!event) |
958 | return; | 946 | return; |
959 | entry = ring_buffer_event_data(event); | 947 | entry = ring_buffer_event_data(event); |
@@ -962,9 +950,9 @@ ftrace_trace_special(void *__tr, | |||
962 | entry->arg1 = arg1; | 950 | entry->arg1 = arg1; |
963 | entry->arg2 = arg2; | 951 | entry->arg2 = arg2; |
964 | entry->arg3 = arg3; | 952 | entry->arg3 = arg3; |
965 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 953 | ring_buffer_unlock_commit(tr->buffer, event); |
966 | ftrace_trace_stack(tr, irq_flags, 4, pc); | 954 | ftrace_trace_stack(tr, 0, 4, pc); |
967 | ftrace_trace_userstack(tr, irq_flags, pc); | 955 | ftrace_trace_userstack(tr, 0, pc); |
968 | 956 | ||
969 | trace_wake_up(); | 957 | trace_wake_up(); |
970 | } | 958 | } |
@@ -984,10 +972,8 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
984 | { | 972 | { |
985 | struct ring_buffer_event *event; | 973 | struct ring_buffer_event *event; |
986 | struct ctx_switch_entry *entry; | 974 | struct ctx_switch_entry *entry; |
987 | unsigned long irq_flags; | ||
988 | 975 | ||
989 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 976 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
990 | &irq_flags); | ||
991 | if (!event) | 977 | if (!event) |
992 | return; | 978 | return; |
993 | entry = ring_buffer_event_data(event); | 979 | entry = ring_buffer_event_data(event); |
@@ -1000,7 +986,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
1000 | entry->next_prio = next->prio; | 986 | entry->next_prio = next->prio; |
1001 | entry->next_state = next->state; | 987 | entry->next_state = next->state; |
1002 | entry->next_cpu = task_cpu(next); | 988 | entry->next_cpu = task_cpu(next); |
1003 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 989 | ring_buffer_unlock_commit(tr->buffer, event); |
1004 | ftrace_trace_stack(tr, flags, 5, pc); | 990 | ftrace_trace_stack(tr, flags, 5, pc); |
1005 | ftrace_trace_userstack(tr, flags, pc); | 991 | ftrace_trace_userstack(tr, flags, pc); |
1006 | } | 992 | } |
@@ -1013,10 +999,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
1013 | { | 999 | { |
1014 | struct ring_buffer_event *event; | 1000 | struct ring_buffer_event *event; |
1015 | struct ctx_switch_entry *entry; | 1001 | struct ctx_switch_entry *entry; |
1016 | unsigned long irq_flags; | ||
1017 | 1002 | ||
1018 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 1003 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
1019 | &irq_flags); | ||
1020 | if (!event) | 1004 | if (!event) |
1021 | return; | 1005 | return; |
1022 | entry = ring_buffer_event_data(event); | 1006 | entry = ring_buffer_event_data(event); |
@@ -1029,7 +1013,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
1029 | entry->next_prio = wakee->prio; | 1013 | entry->next_prio = wakee->prio; |
1030 | entry->next_state = wakee->state; | 1014 | entry->next_state = wakee->state; |
1031 | entry->next_cpu = task_cpu(wakee); | 1015 | entry->next_cpu = task_cpu(wakee); |
1032 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1016 | ring_buffer_unlock_commit(tr->buffer, event); |
1033 | ftrace_trace_stack(tr, flags, 6, pc); | 1017 | ftrace_trace_stack(tr, flags, 6, pc); |
1034 | ftrace_trace_userstack(tr, flags, pc); | 1018 | ftrace_trace_userstack(tr, flags, pc); |
1035 | 1019 | ||
@@ -2841,7 +2825,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | |||
2841 | trace_buf[len] = 0; | 2825 | trace_buf[len] = 0; |
2842 | 2826 | ||
2843 | size = sizeof(*entry) + len + 1; | 2827 | size = sizeof(*entry) + len + 1; |
2844 | event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags); | 2828 | event = ring_buffer_lock_reserve(tr->buffer, size); |
2845 | if (!event) | 2829 | if (!event) |
2846 | goto out_unlock; | 2830 | goto out_unlock; |
2847 | entry = ring_buffer_event_data(event); | 2831 | entry = ring_buffer_event_data(event); |
@@ -2852,7 +2836,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | |||
2852 | 2836 | ||
2853 | memcpy(&entry->buf, trace_buf, len); | 2837 | memcpy(&entry->buf, trace_buf, len); |
2854 | entry->buf[len] = 0; | 2838 | entry->buf[len] = 0; |
2855 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 2839 | ring_buffer_unlock_commit(tr->buffer, event); |
2856 | 2840 | ||
2857 | out_unlock: | 2841 | out_unlock: |
2858 | spin_unlock_irqrestore(&trace_buf_lock, irq_flags); | 2842 | spin_unlock_irqrestore(&trace_buf_lock, irq_flags); |