aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-07-29 12:00:29 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-08-06 01:28:06 -0400
commit82e04af498a85ba425efe77580b7ba08234411df (patch)
tree3683fb5e68cd96b518eb72d76b608205613feb24
parentc0a0d0d3f65284c71115a9bb1ed801ee33eeb552 (diff)
tracing: Move sched event insertion helpers in the sched switch tracer file
The sched events helpers which insert the sched switch and wakeup events into the ring buffer currently reside in trace.c But this file is quite overloaded and the right place for these helpers is in the sched switch tracer file. Then move them to trace_functions.c Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--kernel/trace/trace.c56
-rw-r--r--kernel/trace/trace_sched_switch.c57
2 files changed, 57 insertions, 56 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d6059a493e7f..1b73acb40e56 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1106,62 +1106,6 @@ __trace_special(void *__tr, void *__data,
1106} 1106}
1107 1107
1108void 1108void
1109tracing_sched_switch_trace(struct trace_array *tr,
1110 struct task_struct *prev,
1111 struct task_struct *next,
1112 unsigned long flags, int pc)
1113{
1114 struct ftrace_event_call *call = &event_context_switch;
1115 struct ring_buffer_event *event;
1116 struct ctx_switch_entry *entry;
1117
1118 event = trace_buffer_lock_reserve(tr, TRACE_CTX,
1119 sizeof(*entry), flags, pc);
1120 if (!event)
1121 return;
1122 entry = ring_buffer_event_data(event);
1123 entry->prev_pid = prev->pid;
1124 entry->prev_prio = prev->prio;
1125 entry->prev_state = prev->state;
1126 entry->next_pid = next->pid;
1127 entry->next_prio = next->prio;
1128 entry->next_state = next->state;
1129 entry->next_cpu = task_cpu(next);
1130
1131 if (!filter_check_discard(call, entry, tr->buffer, event))
1132 trace_buffer_unlock_commit(tr, event, flags, pc);
1133}
1134
1135void
1136tracing_sched_wakeup_trace(struct trace_array *tr,
1137 struct task_struct *wakee,
1138 struct task_struct *curr,
1139 unsigned long flags, int pc)
1140{
1141 struct ftrace_event_call *call = &event_wakeup;
1142 struct ring_buffer_event *event;
1143 struct ctx_switch_entry *entry;
1144
1145 event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
1146 sizeof(*entry), flags, pc);
1147 if (!event)
1148 return;
1149 entry = ring_buffer_event_data(event);
1150 entry->prev_pid = curr->pid;
1151 entry->prev_prio = curr->prio;
1152 entry->prev_state = curr->state;
1153 entry->next_pid = wakee->pid;
1154 entry->next_prio = wakee->prio;
1155 entry->next_state = wakee->state;
1156 entry->next_cpu = task_cpu(wakee);
1157
1158 if (!filter_check_discard(call, entry, tr->buffer, event))
1159 ring_buffer_unlock_commit(tr->buffer, event);
1160 ftrace_trace_stack(tr, flags, 6, pc);
1161 ftrace_trace_userstack(tr, flags, pc);
1162}
1163
1164void
1165ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) 1109ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1166{ 1110{
1167 struct trace_array *tr = &global_trace; 1111 struct trace_array *tr = &global_trace;
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index a98106dd979c..e1285d7b5488 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -20,6 +20,34 @@ static int sched_ref;
20static DEFINE_MUTEX(sched_register_mutex); 20static DEFINE_MUTEX(sched_register_mutex);
21static int sched_stopped; 21static int sched_stopped;
22 22
23
24void
25tracing_sched_switch_trace(struct trace_array *tr,
26 struct task_struct *prev,
27 struct task_struct *next,
28 unsigned long flags, int pc)
29{
30 struct ftrace_event_call *call = &event_context_switch;
31 struct ring_buffer_event *event;
32 struct ctx_switch_entry *entry;
33
34 event = trace_buffer_lock_reserve(tr, TRACE_CTX,
35 sizeof(*entry), flags, pc);
36 if (!event)
37 return;
38 entry = ring_buffer_event_data(event);
39 entry->prev_pid = prev->pid;
40 entry->prev_prio = prev->prio;
41 entry->prev_state = prev->state;
42 entry->next_pid = next->pid;
43 entry->next_prio = next->prio;
44 entry->next_state = next->state;
45 entry->next_cpu = task_cpu(next);
46
47 if (!filter_check_discard(call, entry, tr->buffer, event))
48 trace_buffer_unlock_commit(tr, event, flags, pc);
49}
50
23static void 51static void
24probe_sched_switch(struct rq *__rq, struct task_struct *prev, 52probe_sched_switch(struct rq *__rq, struct task_struct *prev,
25 struct task_struct *next) 53 struct task_struct *next)
@@ -49,6 +77,35 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
49 local_irq_restore(flags); 77 local_irq_restore(flags);
50} 78}
51 79
80void
81tracing_sched_wakeup_trace(struct trace_array *tr,
82 struct task_struct *wakee,
83 struct task_struct *curr,
84 unsigned long flags, int pc)
85{
86 struct ftrace_event_call *call = &event_wakeup;
87 struct ring_buffer_event *event;
88 struct ctx_switch_entry *entry;
89
90 event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
91 sizeof(*entry), flags, pc);
92 if (!event)
93 return;
94 entry = ring_buffer_event_data(event);
95 entry->prev_pid = curr->pid;
96 entry->prev_prio = curr->prio;
97 entry->prev_state = curr->state;
98 entry->next_pid = wakee->pid;
99 entry->next_prio = wakee->prio;
100 entry->next_state = wakee->state;
101 entry->next_cpu = task_cpu(wakee);
102
103 if (!filter_check_discard(call, entry, tr->buffer, event))
104 ring_buffer_unlock_commit(tr->buffer, event);
105 ftrace_trace_stack(tr, flags, 6, pc);
106 ftrace_trace_userstack(tr, flags, pc);
107}
108
52static void 109static void
53probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) 110probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
54{ 111{