diff options
-rw-r--r-- | kernel/trace/trace.c | 56 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 57 |
2 files changed, 57 insertions, 56 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d6059a493e7f..1b73acb40e56 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1106,62 +1106,6 @@ __trace_special(void *__tr, void *__data, | |||
1106 | } | 1106 | } |
1107 | 1107 | ||
1108 | void | 1108 | void |
1109 | tracing_sched_switch_trace(struct trace_array *tr, | ||
1110 | struct task_struct *prev, | ||
1111 | struct task_struct *next, | ||
1112 | unsigned long flags, int pc) | ||
1113 | { | ||
1114 | struct ftrace_event_call *call = &event_context_switch; | ||
1115 | struct ring_buffer_event *event; | ||
1116 | struct ctx_switch_entry *entry; | ||
1117 | |||
1118 | event = trace_buffer_lock_reserve(tr, TRACE_CTX, | ||
1119 | sizeof(*entry), flags, pc); | ||
1120 | if (!event) | ||
1121 | return; | ||
1122 | entry = ring_buffer_event_data(event); | ||
1123 | entry->prev_pid = prev->pid; | ||
1124 | entry->prev_prio = prev->prio; | ||
1125 | entry->prev_state = prev->state; | ||
1126 | entry->next_pid = next->pid; | ||
1127 | entry->next_prio = next->prio; | ||
1128 | entry->next_state = next->state; | ||
1129 | entry->next_cpu = task_cpu(next); | ||
1130 | |||
1131 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
1132 | trace_buffer_unlock_commit(tr, event, flags, pc); | ||
1133 | } | ||
1134 | |||
1135 | void | ||
1136 | tracing_sched_wakeup_trace(struct trace_array *tr, | ||
1137 | struct task_struct *wakee, | ||
1138 | struct task_struct *curr, | ||
1139 | unsigned long flags, int pc) | ||
1140 | { | ||
1141 | struct ftrace_event_call *call = &event_wakeup; | ||
1142 | struct ring_buffer_event *event; | ||
1143 | struct ctx_switch_entry *entry; | ||
1144 | |||
1145 | event = trace_buffer_lock_reserve(tr, TRACE_WAKE, | ||
1146 | sizeof(*entry), flags, pc); | ||
1147 | if (!event) | ||
1148 | return; | ||
1149 | entry = ring_buffer_event_data(event); | ||
1150 | entry->prev_pid = curr->pid; | ||
1151 | entry->prev_prio = curr->prio; | ||
1152 | entry->prev_state = curr->state; | ||
1153 | entry->next_pid = wakee->pid; | ||
1154 | entry->next_prio = wakee->prio; | ||
1155 | entry->next_state = wakee->state; | ||
1156 | entry->next_cpu = task_cpu(wakee); | ||
1157 | |||
1158 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
1159 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1160 | ftrace_trace_stack(tr, flags, 6, pc); | ||
1161 | ftrace_trace_userstack(tr, flags, pc); | ||
1162 | } | ||
1163 | |||
1164 | void | ||
1165 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | 1109 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) |
1166 | { | 1110 | { |
1167 | struct trace_array *tr = &global_trace; | 1111 | struct trace_array *tr = &global_trace; |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index a98106dd979c..e1285d7b5488 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -20,6 +20,34 @@ static int sched_ref; | |||
20 | static DEFINE_MUTEX(sched_register_mutex); | 20 | static DEFINE_MUTEX(sched_register_mutex); |
21 | static int sched_stopped; | 21 | static int sched_stopped; |
22 | 22 | ||
23 | |||
24 | void | ||
25 | tracing_sched_switch_trace(struct trace_array *tr, | ||
26 | struct task_struct *prev, | ||
27 | struct task_struct *next, | ||
28 | unsigned long flags, int pc) | ||
29 | { | ||
30 | struct ftrace_event_call *call = &event_context_switch; | ||
31 | struct ring_buffer_event *event; | ||
32 | struct ctx_switch_entry *entry; | ||
33 | |||
34 | event = trace_buffer_lock_reserve(tr, TRACE_CTX, | ||
35 | sizeof(*entry), flags, pc); | ||
36 | if (!event) | ||
37 | return; | ||
38 | entry = ring_buffer_event_data(event); | ||
39 | entry->prev_pid = prev->pid; | ||
40 | entry->prev_prio = prev->prio; | ||
41 | entry->prev_state = prev->state; | ||
42 | entry->next_pid = next->pid; | ||
43 | entry->next_prio = next->prio; | ||
44 | entry->next_state = next->state; | ||
45 | entry->next_cpu = task_cpu(next); | ||
46 | |||
47 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
48 | trace_buffer_unlock_commit(tr, event, flags, pc); | ||
49 | } | ||
50 | |||
23 | static void | 51 | static void |
24 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, | 52 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, |
25 | struct task_struct *next) | 53 | struct task_struct *next) |
@@ -49,6 +77,35 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
49 | local_irq_restore(flags); | 77 | local_irq_restore(flags); |
50 | } | 78 | } |
51 | 79 | ||
80 | void | ||
81 | tracing_sched_wakeup_trace(struct trace_array *tr, | ||
82 | struct task_struct *wakee, | ||
83 | struct task_struct *curr, | ||
84 | unsigned long flags, int pc) | ||
85 | { | ||
86 | struct ftrace_event_call *call = &event_wakeup; | ||
87 | struct ring_buffer_event *event; | ||
88 | struct ctx_switch_entry *entry; | ||
89 | |||
90 | event = trace_buffer_lock_reserve(tr, TRACE_WAKE, | ||
91 | sizeof(*entry), flags, pc); | ||
92 | if (!event) | ||
93 | return; | ||
94 | entry = ring_buffer_event_data(event); | ||
95 | entry->prev_pid = curr->pid; | ||
96 | entry->prev_prio = curr->prio; | ||
97 | entry->prev_state = curr->state; | ||
98 | entry->next_pid = wakee->pid; | ||
99 | entry->next_prio = wakee->prio; | ||
100 | entry->next_state = wakee->state; | ||
101 | entry->next_cpu = task_cpu(wakee); | ||
102 | |||
103 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
104 | ring_buffer_unlock_commit(tr->buffer, event); | ||
105 | ftrace_trace_stack(tr, flags, 6, pc); | ||
106 | ftrace_trace_userstack(tr, flags, pc); | ||
107 | } | ||
108 | |||
52 | static void | 109 | static void |
53 | probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) | 110 | probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) |
54 | { | 111 | { |