diff options
Diffstat (limited to 'kernel/trace/trace_sched_switch.c')
| -rw-r--r-- | kernel/trace/trace_sched_switch.c | 137 |
1 files changed, 31 insertions, 106 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index cb817a209aa..b8f56beb1a6 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
| @@ -9,8 +9,8 @@ | |||
| 9 | #include <linux/debugfs.h> | 9 | #include <linux/debugfs.h> |
| 10 | #include <linux/kallsyms.h> | 10 | #include <linux/kallsyms.h> |
| 11 | #include <linux/uaccess.h> | 11 | #include <linux/uaccess.h> |
| 12 | #include <linux/marker.h> | ||
| 13 | #include <linux/ftrace.h> | 12 | #include <linux/ftrace.h> |
| 13 | #include <trace/sched.h> | ||
| 14 | 14 | ||
| 15 | #include "trace.h" | 15 | #include "trace.h" |
| 16 | 16 | ||
| @@ -19,15 +19,16 @@ static int __read_mostly tracer_enabled; | |||
| 19 | static atomic_t sched_ref; | 19 | static atomic_t sched_ref; |
| 20 | 20 | ||
| 21 | static void | 21 | static void |
| 22 | sched_switch_func(void *private, void *__rq, struct task_struct *prev, | 22 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, |
| 23 | struct task_struct *next) | 23 | struct task_struct *next) |
| 24 | { | 24 | { |
| 25 | struct trace_array **ptr = private; | ||
| 26 | struct trace_array *tr = *ptr; | ||
| 27 | struct trace_array_cpu *data; | 25 | struct trace_array_cpu *data; |
| 28 | unsigned long flags; | 26 | unsigned long flags; |
| 29 | long disabled; | ||
| 30 | int cpu; | 27 | int cpu; |
| 28 | int pc; | ||
| 29 | |||
| 30 | if (!atomic_read(&sched_ref)) | ||
| 31 | return; | ||
| 31 | 32 | ||
| 32 | tracing_record_cmdline(prev); | 33 | tracing_record_cmdline(prev); |
| 33 | tracing_record_cmdline(next); | 34 | tracing_record_cmdline(next); |
| @@ -35,97 +36,41 @@ sched_switch_func(void *private, void *__rq, struct task_struct *prev, | |||
| 35 | if (!tracer_enabled) | 36 | if (!tracer_enabled) |
| 36 | return; | 37 | return; |
| 37 | 38 | ||
| 39 | pc = preempt_count(); | ||
| 38 | local_irq_save(flags); | 40 | local_irq_save(flags); |
| 39 | cpu = raw_smp_processor_id(); | 41 | cpu = raw_smp_processor_id(); |
| 40 | data = tr->data[cpu]; | 42 | data = ctx_trace->data[cpu]; |
| 41 | disabled = atomic_inc_return(&data->disabled); | ||
| 42 | 43 | ||
| 43 | if (likely(disabled == 1)) | 44 | if (likely(!atomic_read(&data->disabled))) |
| 44 | tracing_sched_switch_trace(tr, data, prev, next, flags); | 45 | tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); |
| 45 | 46 | ||
| 46 | atomic_dec(&data->disabled); | ||
| 47 | local_irq_restore(flags); | 47 | local_irq_restore(flags); |
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | static notrace void | ||
| 51 | sched_switch_callback(void *probe_data, void *call_data, | ||
| 52 | const char *format, va_list *args) | ||
| 53 | { | ||
| 54 | struct task_struct *prev; | ||
| 55 | struct task_struct *next; | ||
| 56 | struct rq *__rq; | ||
| 57 | |||
| 58 | if (!atomic_read(&sched_ref)) | ||
| 59 | return; | ||
| 60 | |||
| 61 | /* skip prev_pid %d next_pid %d prev_state %ld */ | ||
| 62 | (void)va_arg(*args, int); | ||
| 63 | (void)va_arg(*args, int); | ||
| 64 | (void)va_arg(*args, long); | ||
| 65 | __rq = va_arg(*args, typeof(__rq)); | ||
| 66 | prev = va_arg(*args, typeof(prev)); | ||
| 67 | next = va_arg(*args, typeof(next)); | ||
| 68 | |||
| 69 | /* | ||
| 70 | * If tracer_switch_func only points to the local | ||
| 71 | * switch func, it still needs the ptr passed to it. | ||
| 72 | */ | ||
| 73 | sched_switch_func(probe_data, __rq, prev, next); | ||
| 74 | } | ||
| 75 | |||
| 76 | static void | 50 | static void |
| 77 | wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct | 51 | probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) |
| 78 | task_struct *curr) | ||
| 79 | { | 52 | { |
| 80 | struct trace_array **ptr = private; | ||
| 81 | struct trace_array *tr = *ptr; | ||
| 82 | struct trace_array_cpu *data; | 53 | struct trace_array_cpu *data; |
| 83 | unsigned long flags; | 54 | unsigned long flags; |
| 84 | long disabled; | 55 | int cpu, pc; |
| 85 | int cpu; | ||
| 86 | 56 | ||
| 87 | if (!tracer_enabled) | 57 | if (!likely(tracer_enabled)) |
| 88 | return; | 58 | return; |
| 89 | 59 | ||
| 90 | tracing_record_cmdline(curr); | 60 | pc = preempt_count(); |
| 61 | tracing_record_cmdline(current); | ||
| 91 | 62 | ||
| 92 | local_irq_save(flags); | 63 | local_irq_save(flags); |
| 93 | cpu = raw_smp_processor_id(); | 64 | cpu = raw_smp_processor_id(); |
| 94 | data = tr->data[cpu]; | 65 | data = ctx_trace->data[cpu]; |
| 95 | disabled = atomic_inc_return(&data->disabled); | ||
| 96 | 66 | ||
| 97 | if (likely(disabled == 1)) | 67 | if (likely(!atomic_read(&data->disabled))) |
| 98 | tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); | 68 | tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, |
| 69 | flags, pc); | ||
| 99 | 70 | ||
| 100 | atomic_dec(&data->disabled); | ||
| 101 | local_irq_restore(flags); | 71 | local_irq_restore(flags); |
| 102 | } | 72 | } |
| 103 | 73 | ||
| 104 | static notrace void | ||
| 105 | wake_up_callback(void *probe_data, void *call_data, | ||
| 106 | const char *format, va_list *args) | ||
| 107 | { | ||
| 108 | struct task_struct *curr; | ||
| 109 | struct task_struct *task; | ||
| 110 | struct rq *__rq; | ||
| 111 | |||
| 112 | if (likely(!tracer_enabled)) | ||
| 113 | return; | ||
| 114 | |||
| 115 | /* Skip pid %d state %ld */ | ||
| 116 | (void)va_arg(*args, int); | ||
| 117 | (void)va_arg(*args, long); | ||
| 118 | /* now get the meat: "rq %p task %p rq->curr %p" */ | ||
| 119 | __rq = va_arg(*args, typeof(__rq)); | ||
| 120 | task = va_arg(*args, typeof(task)); | ||
| 121 | curr = va_arg(*args, typeof(curr)); | ||
| 122 | |||
| 123 | tracing_record_cmdline(task); | ||
| 124 | tracing_record_cmdline(curr); | ||
| 125 | |||
| 126 | wakeup_func(probe_data, __rq, task, curr); | ||
| 127 | } | ||
| 128 | |||
| 129 | static void sched_switch_reset(struct trace_array *tr) | 74 | static void sched_switch_reset(struct trace_array *tr) |
| 130 | { | 75 | { |
| 131 | int cpu; | 76 | int cpu; |
| @@ -133,67 +78,47 @@ static void sched_switch_reset(struct trace_array *tr) | |||
| 133 | tr->time_start = ftrace_now(tr->cpu); | 78 | tr->time_start = ftrace_now(tr->cpu); |
| 134 | 79 | ||
| 135 | for_each_online_cpu(cpu) | 80 | for_each_online_cpu(cpu) |
| 136 | tracing_reset(tr->data[cpu]); | 81 | tracing_reset(tr, cpu); |
| 137 | } | 82 | } |
| 138 | 83 | ||
| 139 | static int tracing_sched_register(void) | 84 | static int tracing_sched_register(void) |
| 140 | { | 85 | { |
| 141 | int ret; | 86 | int ret; |
| 142 | 87 | ||
| 143 | ret = marker_probe_register("kernel_sched_wakeup", | 88 | ret = register_trace_sched_wakeup(probe_sched_wakeup); |
| 144 | "pid %d state %ld ## rq %p task %p rq->curr %p", | ||
| 145 | wake_up_callback, | ||
| 146 | &ctx_trace); | ||
| 147 | if (ret) { | 89 | if (ret) { |
| 148 | pr_info("wakeup trace: Couldn't add marker" | 90 | pr_info("wakeup trace: Couldn't activate tracepoint" |
| 149 | " probe to kernel_sched_wakeup\n"); | 91 | " probe to kernel_sched_wakeup\n"); |
| 150 | return ret; | 92 | return ret; |
| 151 | } | 93 | } |
| 152 | 94 | ||
| 153 | ret = marker_probe_register("kernel_sched_wakeup_new", | 95 | ret = register_trace_sched_wakeup_new(probe_sched_wakeup); |
| 154 | "pid %d state %ld ## rq %p task %p rq->curr %p", | ||
| 155 | wake_up_callback, | ||
| 156 | &ctx_trace); | ||
| 157 | if (ret) { | 96 | if (ret) { |
| 158 | pr_info("wakeup trace: Couldn't add marker" | 97 | pr_info("wakeup trace: Couldn't activate tracepoint" |
| 159 | " probe to kernel_sched_wakeup_new\n"); | 98 | " probe to kernel_sched_wakeup_new\n"); |
| 160 | goto fail_deprobe; | 99 | goto fail_deprobe; |
| 161 | } | 100 | } |
| 162 | 101 | ||
| 163 | ret = marker_probe_register("kernel_sched_schedule", | 102 | ret = register_trace_sched_switch(probe_sched_switch); |
| 164 | "prev_pid %d next_pid %d prev_state %ld " | ||
| 165 | "## rq %p prev %p next %p", | ||
| 166 | sched_switch_callback, | ||
| 167 | &ctx_trace); | ||
| 168 | if (ret) { | 103 | if (ret) { |
| 169 | pr_info("sched trace: Couldn't add marker" | 104 | pr_info("sched trace: Couldn't activate tracepoint" |
| 170 | " probe to kernel_sched_schedule\n"); | 105 | " probe to kernel_sched_schedule\n"); |
| 171 | goto fail_deprobe_wake_new; | 106 | goto fail_deprobe_wake_new; |
| 172 | } | 107 | } |
| 173 | 108 | ||
| 174 | return ret; | 109 | return ret; |
| 175 | fail_deprobe_wake_new: | 110 | fail_deprobe_wake_new: |
| 176 | marker_probe_unregister("kernel_sched_wakeup_new", | 111 | unregister_trace_sched_wakeup_new(probe_sched_wakeup); |
| 177 | wake_up_callback, | ||
| 178 | &ctx_trace); | ||
| 179 | fail_deprobe: | 112 | fail_deprobe: |
| 180 | marker_probe_unregister("kernel_sched_wakeup", | 113 | unregister_trace_sched_wakeup(probe_sched_wakeup); |
| 181 | wake_up_callback, | ||
| 182 | &ctx_trace); | ||
| 183 | return ret; | 114 | return ret; |
| 184 | } | 115 | } |
| 185 | 116 | ||
| 186 | static void tracing_sched_unregister(void) | 117 | static void tracing_sched_unregister(void) |
| 187 | { | 118 | { |
| 188 | marker_probe_unregister("kernel_sched_schedule", | 119 | unregister_trace_sched_switch(probe_sched_switch); |
| 189 | sched_switch_callback, | 120 | unregister_trace_sched_wakeup_new(probe_sched_wakeup); |
| 190 | &ctx_trace); | 121 | unregister_trace_sched_wakeup(probe_sched_wakeup); |
| 191 | marker_probe_unregister("kernel_sched_wakeup_new", | ||
| 192 | wake_up_callback, | ||
| 193 | &ctx_trace); | ||
| 194 | marker_probe_unregister("kernel_sched_wakeup", | ||
| 195 | wake_up_callback, | ||
| 196 | &ctx_trace); | ||
| 197 | } | 122 | } |
| 198 | 123 | ||
| 199 | static void tracing_start_sched_switch(void) | 124 | static void tracing_start_sched_switch(void) |
