diff options
-rw-r--r-- | include/linux/tracepoint.h | 105 | ||||
-rw-r--r-- | include/trace/irq_event_types.h | 8 | ||||
-rw-r--r-- | include/trace/sched_event_types.h | 102 | ||||
-rw-r--r-- | kernel/trace/blktrace.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 45 | ||||
-rw-r--r-- | kernel/trace/trace_events_stage_1.h | 8 | ||||
-rw-r--r-- | kernel/trace/trace_events_stage_2.h | 13 | ||||
-rw-r--r-- | kernel/trace/trace_events_stage_3.h | 98 | ||||
-rw-r--r-- | kernel/trace/trace_export.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace_workqueue.c | 6 |
14 files changed, 275 insertions, 146 deletions
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 69b56988813d..d35a7ee7611f 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -157,7 +157,110 @@ static inline void tracepoint_synchronize_unregister(void) | |||
157 | #define TRACE_FORMAT(name, proto, args, fmt) \ | 157 | #define TRACE_FORMAT(name, proto, args, fmt) \ |
158 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | 158 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) |
159 | 159 | ||
160 | #define TRACE_EVENT(name, proto, args, struct, print, assign) \ | 160 | |
161 | /* | ||
162 | * For use with the TRACE_EVENT macro: | ||
163 | * | ||
164 | * We define a tracepoint, its arguments, its printk format | ||
165 | * and its 'fast binay record' layout. | ||
166 | * | ||
167 | * Firstly, name your tracepoint via TRACE_EVENT(name : the | ||
168 | * 'subsystem_event' notation is fine. | ||
169 | * | ||
170 | * Think about this whole construct as the | ||
171 | * 'trace_sched_switch() function' from now on. | ||
172 | * | ||
173 | * | ||
174 | * TRACE_EVENT(sched_switch, | ||
175 | * | ||
176 | * * | ||
177 | * * A function has a regular function arguments | ||
178 | * * prototype, declare it via TP_PROTO(): | ||
179 | * * | ||
180 | * | ||
181 | * TP_PROTO(struct rq *rq, struct task_struct *prev, | ||
182 | * struct task_struct *next), | ||
183 | * | ||
184 | * * | ||
185 | * * Define the call signature of the 'function'. | ||
186 | * * (Design sidenote: we use this instead of a | ||
187 | * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.) | ||
188 | * * | ||
189 | * | ||
190 | * TP_ARGS(rq, prev, next), | ||
191 | * | ||
192 | * * | ||
193 | * * Fast binary tracing: define the trace record via | ||
194 | * * TP_STRUCT__entry(). You can think about it like a | ||
195 | * * regular C structure local variable definition. | ||
196 | * * | ||
197 | * * This is how the trace record is structured and will | ||
198 | * * be saved into the ring buffer. These are the fields | ||
199 | * * that will be exposed to user-space in | ||
200 | * * /debug/tracing/events/<*>/format. | ||
201 | * * | ||
202 | * * The declared 'local variable' is called '__entry' | ||
203 | * * | ||
204 | * * __field(pid_t, prev_prid) is equivalent to a standard declariton: | ||
205 | * * | ||
206 | * * pid_t prev_pid; | ||
207 | * * | ||
208 | * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to: | ||
209 | * * | ||
210 | * * char prev_comm[TASK_COMM_LEN]; | ||
211 | * * | ||
212 | * | ||
213 | * TP_STRUCT__entry( | ||
214 | * __array( char, prev_comm, TASK_COMM_LEN ) | ||
215 | * __field( pid_t, prev_pid ) | ||
216 | * __field( int, prev_prio ) | ||
217 | * __array( char, next_comm, TASK_COMM_LEN ) | ||
218 | * __field( pid_t, next_pid ) | ||
219 | * __field( int, next_prio ) | ||
220 | * ), | ||
221 | * | ||
222 | * * | ||
223 | * * Assign the entry into the trace record, by embedding | ||
224 | * * a full C statement block into TP_fast_assign(). You | ||
225 | * * can refer to the trace record as '__entry' - | ||
226 | * * otherwise you can put arbitrary C code in here. | ||
227 | * * | ||
228 | * * Note: this C code will execute every time a trace event | ||
229 | * * happens, on an active tracepoint. | ||
230 | * * | ||
231 | * | ||
232 | * TP_fast_assign( | ||
233 | * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); | ||
234 | * __entry->prev_pid = prev->pid; | ||
235 | * __entry->prev_prio = prev->prio; | ||
236 | * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); | ||
237 | * __entry->next_pid = next->pid; | ||
238 | * __entry->next_prio = next->prio; | ||
239 | * ) | ||
240 | * | ||
241 | * * | ||
242 | * * Formatted output of a trace record via TP_printk(). | ||
243 | * * This is how the tracepoint will appear under ftrace | ||
244 | * * plugins that make use of this tracepoint. | ||
245 | * * | ||
246 | * * (raw-binary tracing wont actually perform this step.) | ||
247 | * * | ||
248 | * | ||
249 | * TP_printk("task %s:%d [%d] ==> %s:%d [%d]", | ||
250 | * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, | ||
251 | * __entry->next_comm, __entry->next_pid, __entry->next_prio), | ||
252 | * | ||
253 | * ); | ||
254 | * | ||
255 | * This macro construct is thus used for the regular printk format | ||
256 | * tracing setup, it is used to construct a function pointer based | ||
257 | * tracepoint callback (this is used by programmatic plugins and | ||
258 | * can also by used by generic instrumentation like SystemTap), and | ||
259 | * it is also used to expose a structured trace record in | ||
260 | * /debug/tracing/events/. | ||
261 | */ | ||
262 | |||
263 | #define TRACE_EVENT(name, proto, args, struct, assign, print) \ | ||
161 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | 264 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) |
162 | 265 | ||
163 | #endif | 266 | #endif |
diff --git a/include/trace/irq_event_types.h b/include/trace/irq_event_types.h index 43bcb74dd49f..214bb928fe9e 100644 --- a/include/trace/irq_event_types.h +++ b/include/trace/irq_event_types.h | |||
@@ -31,13 +31,13 @@ TRACE_EVENT(irq_handler_exit, | |||
31 | __field( int, ret ) | 31 | __field( int, ret ) |
32 | ), | 32 | ), |
33 | 33 | ||
34 | TP_printk("irq=%d return=%s", | ||
35 | __entry->irq, __entry->ret ? "handled" : "unhandled"), | ||
36 | |||
37 | TP_fast_assign( | 34 | TP_fast_assign( |
38 | __entry->irq = irq; | 35 | __entry->irq = irq; |
39 | __entry->ret = ret; | 36 | __entry->ret = ret; |
40 | ) | 37 | ), |
38 | |||
39 | TP_printk("irq=%d return=%s", | ||
40 | __entry->irq, __entry->ret ? "handled" : "unhandled") | ||
41 | ); | 41 | ); |
42 | 42 | ||
43 | #undef TRACE_SYSTEM | 43 | #undef TRACE_SYSTEM |
diff --git a/include/trace/sched_event_types.h b/include/trace/sched_event_types.h index fb37af672c88..63547dc1125f 100644 --- a/include/trace/sched_event_types.h +++ b/include/trace/sched_event_types.h | |||
@@ -22,12 +22,12 @@ TRACE_EVENT(sched_kthread_stop, | |||
22 | __field( pid_t, pid ) | 22 | __field( pid_t, pid ) |
23 | ), | 23 | ), |
24 | 24 | ||
25 | TP_printk("task %s:%d", __entry->comm, __entry->pid), | ||
26 | |||
27 | TP_fast_assign( | 25 | TP_fast_assign( |
28 | memcpy(__entry->comm, t->comm, TASK_COMM_LEN); | 26 | memcpy(__entry->comm, t->comm, TASK_COMM_LEN); |
29 | __entry->pid = t->pid; | 27 | __entry->pid = t->pid; |
30 | ) | 28 | ), |
29 | |||
30 | TP_printk("task %s:%d", __entry->comm, __entry->pid) | ||
31 | ); | 31 | ); |
32 | 32 | ||
33 | /* | 33 | /* |
@@ -43,11 +43,11 @@ TRACE_EVENT(sched_kthread_stop_ret, | |||
43 | __field( int, ret ) | 43 | __field( int, ret ) |
44 | ), | 44 | ), |
45 | 45 | ||
46 | TP_printk("ret %d", __entry->ret), | ||
47 | |||
48 | TP_fast_assign( | 46 | TP_fast_assign( |
49 | __entry->ret = ret; | 47 | __entry->ret = ret; |
50 | ) | 48 | ), |
49 | |||
50 | TP_printk("ret %d", __entry->ret) | ||
51 | ); | 51 | ); |
52 | 52 | ||
53 | /* | 53 | /* |
@@ -68,14 +68,14 @@ TRACE_EVENT(sched_wait_task, | |||
68 | __field( int, prio ) | 68 | __field( int, prio ) |
69 | ), | 69 | ), |
70 | 70 | ||
71 | TP_printk("task %s:%d [%d]", | ||
72 | __entry->comm, __entry->pid, __entry->prio), | ||
73 | |||
74 | TP_fast_assign( | 71 | TP_fast_assign( |
75 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); | 72 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
76 | __entry->pid = p->pid; | 73 | __entry->pid = p->pid; |
77 | __entry->prio = p->prio; | 74 | __entry->prio = p->prio; |
78 | ) | 75 | ), |
76 | |||
77 | TP_printk("task %s:%d [%d]", | ||
78 | __entry->comm, __entry->pid, __entry->prio) | ||
79 | ); | 79 | ); |
80 | 80 | ||
81 | /* | 81 | /* |
@@ -97,16 +97,16 @@ TRACE_EVENT(sched_wakeup, | |||
97 | __field( int, success ) | 97 | __field( int, success ) |
98 | ), | 98 | ), |
99 | 99 | ||
100 | TP_printk("task %s:%d [%d] success=%d", | ||
101 | __entry->comm, __entry->pid, __entry->prio, | ||
102 | __entry->success), | ||
103 | |||
104 | TP_fast_assign( | 100 | TP_fast_assign( |
105 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); | 101 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
106 | __entry->pid = p->pid; | 102 | __entry->pid = p->pid; |
107 | __entry->prio = p->prio; | 103 | __entry->prio = p->prio; |
108 | __entry->success = success; | 104 | __entry->success = success; |
109 | ) | 105 | ), |
106 | |||
107 | TP_printk("task %s:%d [%d] success=%d", | ||
108 | __entry->comm, __entry->pid, __entry->prio, | ||
109 | __entry->success) | ||
110 | ); | 110 | ); |
111 | 111 | ||
112 | /* | 112 | /* |
@@ -128,16 +128,16 @@ TRACE_EVENT(sched_wakeup_new, | |||
128 | __field( int, success ) | 128 | __field( int, success ) |
129 | ), | 129 | ), |
130 | 130 | ||
131 | TP_printk("task %s:%d [%d] success=%d", | ||
132 | __entry->comm, __entry->pid, __entry->prio, | ||
133 | __entry->success), | ||
134 | |||
135 | TP_fast_assign( | 131 | TP_fast_assign( |
136 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); | 132 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
137 | __entry->pid = p->pid; | 133 | __entry->pid = p->pid; |
138 | __entry->prio = p->prio; | 134 | __entry->prio = p->prio; |
139 | __entry->success = success; | 135 | __entry->success = success; |
140 | ) | 136 | ), |
137 | |||
138 | TP_printk("task %s:%d [%d] success=%d", | ||
139 | __entry->comm, __entry->pid, __entry->prio, | ||
140 | __entry->success) | ||
141 | ); | 141 | ); |
142 | 142 | ||
143 | /* | 143 | /* |
@@ -162,10 +162,6 @@ TRACE_EVENT(sched_switch, | |||
162 | __field( int, next_prio ) | 162 | __field( int, next_prio ) |
163 | ), | 163 | ), |
164 | 164 | ||
165 | TP_printk("task %s:%d [%d] ==> %s:%d [%d]", | ||
166 | __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, | ||
167 | __entry->next_comm, __entry->next_pid, __entry->next_prio), | ||
168 | |||
169 | TP_fast_assign( | 165 | TP_fast_assign( |
170 | memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); | 166 | memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); |
171 | __entry->prev_pid = prev->pid; | 167 | __entry->prev_pid = prev->pid; |
@@ -173,7 +169,11 @@ TRACE_EVENT(sched_switch, | |||
173 | memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); | 169 | memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); |
174 | __entry->next_pid = next->pid; | 170 | __entry->next_pid = next->pid; |
175 | __entry->next_prio = next->prio; | 171 | __entry->next_prio = next->prio; |
176 | ) | 172 | ), |
173 | |||
174 | TP_printk("task %s:%d [%d] ==> %s:%d [%d]", | ||
175 | __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, | ||
176 | __entry->next_comm, __entry->next_pid, __entry->next_prio) | ||
177 | ); | 177 | ); |
178 | 178 | ||
179 | /* | 179 | /* |
@@ -193,17 +193,17 @@ TRACE_EVENT(sched_migrate_task, | |||
193 | __field( int, dest_cpu ) | 193 | __field( int, dest_cpu ) |
194 | ), | 194 | ), |
195 | 195 | ||
196 | TP_printk("task %s:%d [%d] from: %d to: %d", | ||
197 | __entry->comm, __entry->pid, __entry->prio, | ||
198 | __entry->orig_cpu, __entry->dest_cpu), | ||
199 | |||
200 | TP_fast_assign( | 196 | TP_fast_assign( |
201 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); | 197 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
202 | __entry->pid = p->pid; | 198 | __entry->pid = p->pid; |
203 | __entry->prio = p->prio; | 199 | __entry->prio = p->prio; |
204 | __entry->orig_cpu = orig_cpu; | 200 | __entry->orig_cpu = orig_cpu; |
205 | __entry->dest_cpu = dest_cpu; | 201 | __entry->dest_cpu = dest_cpu; |
206 | ) | 202 | ), |
203 | |||
204 | TP_printk("task %s:%d [%d] from: %d to: %d", | ||
205 | __entry->comm, __entry->pid, __entry->prio, | ||
206 | __entry->orig_cpu, __entry->dest_cpu) | ||
207 | ); | 207 | ); |
208 | 208 | ||
209 | /* | 209 | /* |
@@ -221,14 +221,14 @@ TRACE_EVENT(sched_process_free, | |||
221 | __field( int, prio ) | 221 | __field( int, prio ) |
222 | ), | 222 | ), |
223 | 223 | ||
224 | TP_printk("task %s:%d [%d]", | ||
225 | __entry->comm, __entry->pid, __entry->prio), | ||
226 | |||
227 | TP_fast_assign( | 224 | TP_fast_assign( |
228 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); | 225 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
229 | __entry->pid = p->pid; | 226 | __entry->pid = p->pid; |
230 | __entry->prio = p->prio; | 227 | __entry->prio = p->prio; |
231 | ) | 228 | ), |
229 | |||
230 | TP_printk("task %s:%d [%d]", | ||
231 | __entry->comm, __entry->pid, __entry->prio) | ||
232 | ); | 232 | ); |
233 | 233 | ||
234 | /* | 234 | /* |
@@ -246,14 +246,14 @@ TRACE_EVENT(sched_process_exit, | |||
246 | __field( int, prio ) | 246 | __field( int, prio ) |
247 | ), | 247 | ), |
248 | 248 | ||
249 | TP_printk("task %s:%d [%d]", | ||
250 | __entry->comm, __entry->pid, __entry->prio), | ||
251 | |||
252 | TP_fast_assign( | 249 | TP_fast_assign( |
253 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); | 250 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
254 | __entry->pid = p->pid; | 251 | __entry->pid = p->pid; |
255 | __entry->prio = p->prio; | 252 | __entry->prio = p->prio; |
256 | ) | 253 | ), |
254 | |||
255 | TP_printk("task %s:%d [%d]", | ||
256 | __entry->comm, __entry->pid, __entry->prio) | ||
257 | ); | 257 | ); |
258 | 258 | ||
259 | /* | 259 | /* |
@@ -271,14 +271,14 @@ TRACE_EVENT(sched_process_wait, | |||
271 | __field( int, prio ) | 271 | __field( int, prio ) |
272 | ), | 272 | ), |
273 | 273 | ||
274 | TP_printk("task %s:%d [%d]", | ||
275 | __entry->comm, __entry->pid, __entry->prio), | ||
276 | |||
277 | TP_fast_assign( | 274 | TP_fast_assign( |
278 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 275 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
279 | __entry->pid = pid_nr(pid); | 276 | __entry->pid = pid_nr(pid); |
280 | __entry->prio = current->prio; | 277 | __entry->prio = current->prio; |
281 | ) | 278 | ), |
279 | |||
280 | TP_printk("task %s:%d [%d]", | ||
281 | __entry->comm, __entry->pid, __entry->prio) | ||
282 | ); | 282 | ); |
283 | 283 | ||
284 | /* | 284 | /* |
@@ -297,16 +297,16 @@ TRACE_EVENT(sched_process_fork, | |||
297 | __field( pid_t, child_pid ) | 297 | __field( pid_t, child_pid ) |
298 | ), | 298 | ), |
299 | 299 | ||
300 | TP_printk("parent %s:%d child %s:%d", | ||
301 | __entry->parent_comm, __entry->parent_pid, | ||
302 | __entry->child_comm, __entry->child_pid), | ||
303 | |||
304 | TP_fast_assign( | 300 | TP_fast_assign( |
305 | memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN); | 301 | memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN); |
306 | __entry->parent_pid = parent->pid; | 302 | __entry->parent_pid = parent->pid; |
307 | memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN); | 303 | memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN); |
308 | __entry->child_pid = child->pid; | 304 | __entry->child_pid = child->pid; |
309 | ) | 305 | ), |
306 | |||
307 | TP_printk("parent %s:%d child %s:%d", | ||
308 | __entry->parent_comm, __entry->parent_pid, | ||
309 | __entry->child_comm, __entry->child_pid) | ||
310 | ); | 310 | ); |
311 | 311 | ||
312 | /* | 312 | /* |
@@ -324,14 +324,14 @@ TRACE_EVENT(sched_signal_send, | |||
324 | __field( pid_t, pid ) | 324 | __field( pid_t, pid ) |
325 | ), | 325 | ), |
326 | 326 | ||
327 | TP_printk("sig: %d task %s:%d", | ||
328 | __entry->sig, __entry->comm, __entry->pid), | ||
329 | |||
330 | TP_fast_assign( | 327 | TP_fast_assign( |
331 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); | 328 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
332 | __entry->pid = p->pid; | 329 | __entry->pid = p->pid; |
333 | __entry->sig = sig; | 330 | __entry->sig = sig; |
334 | ) | 331 | ), |
332 | |||
333 | TP_printk("sig: %d task %s:%d", | ||
334 | __entry->sig, __entry->comm, __entry->pid) | ||
335 | ); | 335 | ); |
336 | 336 | ||
337 | #undef TRACE_SYSTEM | 337 | #undef TRACE_SYSTEM |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index d24a10b8411a..1f32e4edf490 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -33,7 +33,7 @@ static struct trace_array *blk_tr; | |||
33 | static int __read_mostly blk_tracer_enabled; | 33 | static int __read_mostly blk_tracer_enabled; |
34 | 34 | ||
35 | /* Select an alternative, minimalistic output than the original one */ | 35 | /* Select an alternative, minimalistic output than the original one */ |
36 | #define TRACE_BLK_OPT_CLASSIC 0x1 | 36 | #define TRACE_BLK_OPT_CLASSIC 0x1 |
37 | 37 | ||
38 | static struct tracer_opt blk_tracer_opts[] = { | 38 | static struct tracer_opt blk_tracer_opts[] = { |
39 | /* Default disable the minimalistic output */ | 39 | /* Default disable the minimalistic output */ |
@@ -564,7 +564,7 @@ EXPORT_SYMBOL_GPL(blk_trace_startstop); | |||
564 | /** | 564 | /** |
565 | * blk_trace_ioctl: - handle the ioctls associated with tracing | 565 | * blk_trace_ioctl: - handle the ioctls associated with tracing |
566 | * @bdev: the block device | 566 | * @bdev: the block device |
567 | * @cmd: the ioctl cmd | 567 | * @cmd: the ioctl cmd |
568 | * @arg: the argument data, if any | 568 | * @arg: the argument data, if any |
569 | * | 569 | * |
570 | **/ | 570 | **/ |
@@ -1128,9 +1128,9 @@ static void blk_tracer_reset(struct trace_array *tr) | |||
1128 | 1128 | ||
1129 | static struct { | 1129 | static struct { |
1130 | const char *act[2]; | 1130 | const char *act[2]; |
1131 | int (*print)(struct trace_seq *s, const struct trace_entry *ent); | 1131 | int (*print)(struct trace_seq *s, const struct trace_entry *ent); |
1132 | } what2act[] __read_mostly = { | 1132 | } what2act[] __read_mostly = { |
1133 | [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, | 1133 | [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, |
1134 | [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, | 1134 | [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, |
1135 | [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic }, | 1135 | [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic }, |
1136 | [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic }, | 1136 | [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic }, |
@@ -1229,7 +1229,7 @@ static struct tracer blk_tracer __read_mostly = { | |||
1229 | }; | 1229 | }; |
1230 | 1230 | ||
1231 | static struct trace_event trace_blk_event = { | 1231 | static struct trace_event trace_blk_event = { |
1232 | .type = TRACE_BLK, | 1232 | .type = TRACE_BLK, |
1233 | .trace = blk_trace_event_print, | 1233 | .trace = blk_trace_event_print, |
1234 | .binary = blk_trace_event_print_binary, | 1234 | .binary = blk_trace_event_print_binary, |
1235 | }; | 1235 | }; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e5b56199e5e0..a941d257b619 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -799,7 +799,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
799 | 799 | ||
800 | entry->preempt_count = pc & 0xff; | 800 | entry->preempt_count = pc & 0xff; |
801 | entry->pid = (tsk) ? tsk->pid : 0; | 801 | entry->pid = (tsk) ? tsk->pid : 0; |
802 | entry->tgid = (tsk) ? tsk->tgid : 0; | 802 | entry->tgid = (tsk) ? tsk->tgid : 0; |
803 | entry->flags = | 803 | entry->flags = |
804 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 804 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
805 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 805 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index aaa0755268b9..ad8c22efff41 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -157,7 +157,7 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter, | |||
157 | 157 | ||
158 | 158 | ||
159 | static struct trace_event trace_branch_event = { | 159 | static struct trace_event trace_branch_event = { |
160 | .type = TRACE_BRANCH, | 160 | .type = TRACE_BRANCH, |
161 | .trace = trace_branch_print, | 161 | .trace = trace_branch_print, |
162 | }; | 162 | }; |
163 | 163 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 1880a6438097..769dfd00fc85 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -102,7 +102,7 @@ static int ftrace_set_clr_event(char *buf, int set) | |||
102 | mutex_lock(&event_mutex); | 102 | mutex_lock(&event_mutex); |
103 | events_for_each(call) { | 103 | events_for_each(call) { |
104 | 104 | ||
105 | if (!call->name) | 105 | if (!call->name || !call->regfunc) |
106 | continue; | 106 | continue; |
107 | 107 | ||
108 | if (match && | 108 | if (match && |
@@ -207,8 +207,20 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
207 | 207 | ||
208 | (*pos)++; | 208 | (*pos)++; |
209 | 209 | ||
210 | if ((unsigned long)call >= (unsigned long)__stop_ftrace_events) | 210 | for (;;) { |
211 | return NULL; | 211 | if ((unsigned long)call >= (unsigned long)__stop_ftrace_events) |
212 | return NULL; | ||
213 | |||
214 | /* | ||
215 | * The ftrace subsystem is for showing formats only. | ||
216 | * They can not be enabled or disabled via the event files. | ||
217 | */ | ||
218 | if (call->regfunc) | ||
219 | break; | ||
220 | |||
221 | call++; | ||
222 | next = call; | ||
223 | } | ||
212 | 224 | ||
213 | m->private = ++next; | 225 | m->private = ++next; |
214 | 226 | ||
@@ -338,8 +350,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
338 | 350 | ||
339 | #undef FIELD | 351 | #undef FIELD |
340 | #define FIELD(type, name) \ | 352 | #define FIELD(type, name) \ |
341 | #type, #name, (unsigned int)offsetof(typeof(field), name), \ | 353 | #type, #name, offsetof(typeof(field), name), sizeof(field.name) |
342 | (unsigned int)sizeof(field.name) | ||
343 | 354 | ||
344 | static int trace_write_header(struct trace_seq *s) | 355 | static int trace_write_header(struct trace_seq *s) |
345 | { | 356 | { |
@@ -347,11 +358,11 @@ static int trace_write_header(struct trace_seq *s) | |||
347 | 358 | ||
348 | /* struct trace_entry */ | 359 | /* struct trace_entry */ |
349 | return trace_seq_printf(s, | 360 | return trace_seq_printf(s, |
350 | "\tfield:%s %s;\toffset:%u;\tsize:%u;\n" | 361 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" |
351 | "\tfield:%s %s;\toffset:%u;\tsize:%u;\n" | 362 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" |
352 | "\tfield:%s %s;\toffset:%u;\tsize:%u;\n" | 363 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" |
353 | "\tfield:%s %s;\toffset:%u;\tsize:%u;\n" | 364 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" |
354 | "\tfield:%s %s;\toffset:%u;\tsize:%u;\n" | 365 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" |
355 | "\n", | 366 | "\n", |
356 | FIELD(unsigned char, type), | 367 | FIELD(unsigned char, type), |
357 | FIELD(unsigned char, flags), | 368 | FIELD(unsigned char, flags), |
@@ -417,6 +428,13 @@ static const struct seq_operations show_set_event_seq_ops = { | |||
417 | .stop = t_stop, | 428 | .stop = t_stop, |
418 | }; | 429 | }; |
419 | 430 | ||
431 | static const struct file_operations ftrace_avail_fops = { | ||
432 | .open = ftrace_event_seq_open, | ||
433 | .read = seq_read, | ||
434 | .llseek = seq_lseek, | ||
435 | .release = seq_release, | ||
436 | }; | ||
437 | |||
420 | static const struct file_operations ftrace_set_event_fops = { | 438 | static const struct file_operations ftrace_set_event_fops = { |
421 | .open = ftrace_event_seq_open, | 439 | .open = ftrace_event_seq_open, |
422 | .read = seq_read, | 440 | .read = seq_read, |
@@ -558,6 +576,13 @@ static __init int event_trace_init(void) | |||
558 | if (!d_tracer) | 576 | if (!d_tracer) |
559 | return 0; | 577 | return 0; |
560 | 578 | ||
579 | entry = debugfs_create_file("available_events", 0444, d_tracer, | ||
580 | (void *)&show_event_seq_ops, | ||
581 | &ftrace_avail_fops); | ||
582 | if (!entry) | ||
583 | pr_warning("Could not create debugfs " | ||
584 | "'available_events' entry\n"); | ||
585 | |||
561 | entry = debugfs_create_file("set_event", 0644, d_tracer, | 586 | entry = debugfs_create_file("set_event", 0644, d_tracer, |
562 | (void *)&show_set_event_seq_ops, | 587 | (void *)&show_set_event_seq_ops, |
563 | &ftrace_set_event_fops); | 588 | &ftrace_set_event_fops); |
diff --git a/kernel/trace/trace_events_stage_1.h b/kernel/trace/trace_events_stage_1.h index 15e9bf965a18..38985f9b379c 100644 --- a/kernel/trace/trace_events_stage_1.h +++ b/kernel/trace/trace_events_stage_1.h | |||
@@ -6,11 +6,13 @@ | |||
6 | * struct ftrace_raw_<call> { | 6 | * struct ftrace_raw_<call> { |
7 | * struct trace_entry ent; | 7 | * struct trace_entry ent; |
8 | * <type> <item>; | 8 | * <type> <item>; |
9 | * <type2> <item2>[<len>]; | ||
9 | * [...] | 10 | * [...] |
10 | * }; | 11 | * }; |
11 | * | 12 | * |
12 | * The <type> <item> is created by the TRACE_FIELD(type, item, assign) | 13 | * The <type> <item> is created by the __field(type, item) macro or |
13 | * macro. We simply do "type item;", and that will create the fields | 14 | * the __array(type2, item2, len) macro. |
15 | * We simply do "type item;", and that will create the fields | ||
14 | * in the structure. | 16 | * in the structure. |
15 | */ | 17 | */ |
16 | 18 | ||
@@ -27,7 +29,7 @@ | |||
27 | #define TP_STRUCT__entry(args...) args | 29 | #define TP_STRUCT__entry(args...) args |
28 | 30 | ||
29 | #undef TRACE_EVENT | 31 | #undef TRACE_EVENT |
30 | #define TRACE_EVENT(name, proto, args, tstruct, print, assign) \ | 32 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ |
31 | struct ftrace_raw_##name { \ | 33 | struct ftrace_raw_##name { \ |
32 | struct trace_entry ent; \ | 34 | struct trace_entry ent; \ |
33 | tstruct \ | 35 | tstruct \ |
diff --git a/kernel/trace/trace_events_stage_2.h b/kernel/trace/trace_events_stage_2.h index d91bf4c56661..ca347afd6aa0 100644 --- a/kernel/trace/trace_events_stage_2.h +++ b/kernel/trace/trace_events_stage_2.h | |||
@@ -20,7 +20,7 @@ | |||
20 | * | 20 | * |
21 | * field = (typeof(field))entry; | 21 | * field = (typeof(field))entry; |
22 | * | 22 | * |
23 | * ret = trace_seq_printf(s, <TP_RAW_FMT> "%s", <ARGS> "\n"); | 23 | * ret = trace_seq_printf(s, <TP_printk> "\n"); |
24 | * if (!ret) | 24 | * if (!ret) |
25 | * return TRACE_TYPE_PARTIAL_LINE; | 25 | * return TRACE_TYPE_PARTIAL_LINE; |
26 | * | 26 | * |
@@ -39,7 +39,7 @@ | |||
39 | #define TP_printk(fmt, args...) fmt "\n", args | 39 | #define TP_printk(fmt, args...) fmt "\n", args |
40 | 40 | ||
41 | #undef TRACE_EVENT | 41 | #undef TRACE_EVENT |
42 | #define TRACE_EVENT(call, proto, args, tstruct, print, assign) \ | 42 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ |
43 | enum print_line_t \ | 43 | enum print_line_t \ |
44 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | 44 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ |
45 | { \ | 45 | { \ |
@@ -76,10 +76,9 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
76 | * int ret; | 76 | * int ret; |
77 | * | 77 | * |
78 | * ret = trace_seq_printf(s, #type " " #item ";" | 78 | * ret = trace_seq_printf(s, #type " " #item ";" |
79 | * " size:%d; offset:%d;\n", | 79 | * " offset:%u; size:%u;\n", |
80 | * sizeof(field.type), | 80 | * offsetof(struct ftrace_raw_##call, item), |
81 | * offsetof(struct ftrace_raw_##call, | 81 | * sizeof(field.type)); |
82 | * item)); | ||
83 | * | 82 | * |
84 | * } | 83 | * } |
85 | */ | 84 | */ |
@@ -115,7 +114,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
115 | #define TP_fast_assign(args...) args | 114 | #define TP_fast_assign(args...) args |
116 | 115 | ||
117 | #undef TRACE_EVENT | 116 | #undef TRACE_EVENT |
118 | #define TRACE_EVENT(call, proto, args, tstruct, print, func) \ | 117 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ |
119 | static int \ | 118 | static int \ |
120 | ftrace_format_##call(struct trace_seq *s) \ | 119 | ftrace_format_##call(struct trace_seq *s) \ |
121 | { \ | 120 | { \ |
diff --git a/kernel/trace/trace_events_stage_3.h b/kernel/trace/trace_events_stage_3.h index 3ba55d4ab073..ae2e323df0c7 100644 --- a/kernel/trace/trace_events_stage_3.h +++ b/kernel/trace/trace_events_stage_3.h | |||
@@ -5,23 +5,23 @@ | |||
5 | * | 5 | * |
6 | * static void ftrace_event_<call>(proto) | 6 | * static void ftrace_event_<call>(proto) |
7 | * { | 7 | * { |
8 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); | 8 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); |
9 | * } | 9 | * } |
10 | * | 10 | * |
11 | * static int ftrace_reg_event_<call>(void) | 11 | * static int ftrace_reg_event_<call>(void) |
12 | * { | 12 | * { |
13 | * int ret; | 13 | * int ret; |
14 | * | 14 | * |
15 | * ret = register_trace_<call>(ftrace_event_<call>); | 15 | * ret = register_trace_<call>(ftrace_event_<call>); |
16 | * if (!ret) | 16 | * if (!ret) |
17 | * pr_info("event trace: Could not activate trace point " | 17 | * pr_info("event trace: Could not activate trace point " |
18 | * "probe to <call>"); | 18 | * "probe to <call>"); |
19 | * return ret; | 19 | * return ret; |
20 | * } | 20 | * } |
21 | * | 21 | * |
22 | * static void ftrace_unreg_event_<call>(void) | 22 | * static void ftrace_unreg_event_<call>(void) |
23 | * { | 23 | * { |
24 | * unregister_trace_<call>(ftrace_event_<call>); | 24 | * unregister_trace_<call>(ftrace_event_<call>); |
25 | * } | 25 | * } |
26 | * | 26 | * |
27 | * For those macros defined with TRACE_FORMAT: | 27 | * For those macros defined with TRACE_FORMAT: |
@@ -29,9 +29,9 @@ | |||
29 | * static struct ftrace_event_call __used | 29 | * static struct ftrace_event_call __used |
30 | * __attribute__((__aligned__(4))) | 30 | * __attribute__((__aligned__(4))) |
31 | * __attribute__((section("_ftrace_events"))) event_<call> = { | 31 | * __attribute__((section("_ftrace_events"))) event_<call> = { |
32 | * .name = "<call>", | 32 | * .name = "<call>", |
33 | * .regfunc = ftrace_reg_event_<call>, | 33 | * .regfunc = ftrace_reg_event_<call>, |
34 | * .unregfunc = ftrace_unreg_event_<call>, | 34 | * .unregfunc = ftrace_unreg_event_<call>, |
35 | * } | 35 | * } |
36 | * | 36 | * |
37 | * | 37 | * |
@@ -41,66 +41,66 @@ | |||
41 | * | 41 | * |
42 | * static void ftrace_raw_event_<call>(proto) | 42 | * static void ftrace_raw_event_<call>(proto) |
43 | * { | 43 | * { |
44 | * struct ring_buffer_event *event; | 44 | * struct ring_buffer_event *event; |
45 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 45 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 |
46 | * unsigned long irq_flags; | 46 | * unsigned long irq_flags; |
47 | * int pc; | 47 | * int pc; |
48 | * | 48 | * |
49 | * local_save_flags(irq_flags); | 49 | * local_save_flags(irq_flags); |
50 | * pc = preempt_count(); | 50 | * pc = preempt_count(); |
51 | * | 51 | * |
52 | * event = trace_current_buffer_lock_reserve(event_<call>.id, | 52 | * event = trace_current_buffer_lock_reserve(event_<call>.id, |
53 | * sizeof(struct ftrace_raw_<call>), | 53 | * sizeof(struct ftrace_raw_<call>), |
54 | * irq_flags, pc); | 54 | * irq_flags, pc); |
55 | * if (!event) | 55 | * if (!event) |
56 | * return; | 56 | * return; |
57 | * entry = ring_buffer_event_data(event); | 57 | * entry = ring_buffer_event_data(event); |
58 | * | 58 | * |
59 | * <tstruct>; <-- Here we assign the entries by the TRACE_FIELD. | 59 | * <assign>; <-- Here we assign the entries by the __field and |
60 | * __array macros. | ||
60 | * | 61 | * |
61 | * trace_current_buffer_unlock_commit(event, irq_flags, pc); | 62 | * trace_current_buffer_unlock_commit(event, irq_flags, pc); |
62 | * } | 63 | * } |
63 | * | 64 | * |
64 | * static int ftrace_raw_reg_event_<call>(void) | 65 | * static int ftrace_raw_reg_event_<call>(void) |
65 | * { | 66 | * { |
66 | * int ret; | 67 | * int ret; |
67 | * | 68 | * |
68 | * ret = register_trace_<call>(ftrace_raw_event_<call>); | 69 | * ret = register_trace_<call>(ftrace_raw_event_<call>); |
69 | * if (!ret) | 70 | * if (!ret) |
70 | * pr_info("event trace: Could not activate trace point " | 71 | * pr_info("event trace: Could not activate trace point " |
71 | * "probe to <call>"); | 72 | * "probe to <call>"); |
72 | * return ret; | 73 | * return ret; |
73 | * } | 74 | * } |
74 | * | 75 | * |
75 | * static void ftrace_unreg_event_<call>(void) | 76 | * static void ftrace_unreg_event_<call>(void) |
76 | * { | 77 | * { |
77 | * unregister_trace_<call>(ftrace_raw_event_<call>); | 78 | * unregister_trace_<call>(ftrace_raw_event_<call>); |
78 | * } | 79 | * } |
79 | * | 80 | * |
80 | * static struct trace_event ftrace_event_type_<call> = { | 81 | * static struct trace_event ftrace_event_type_<call> = { |
81 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | 82 | * .trace = ftrace_raw_output_<call>, <-- stage 2 |
82 | * }; | 83 | * }; |
83 | * | 84 | * |
84 | * static int ftrace_raw_init_event_<call>(void) | 85 | * static int ftrace_raw_init_event_<call>(void) |
85 | * { | 86 | * { |
86 | * int id; | 87 | * int id; |
87 | * | 88 | * |
88 | * id = register_ftrace_event(&ftrace_event_type_<call>); | 89 | * id = register_ftrace_event(&ftrace_event_type_<call>); |
89 | * if (!id) | 90 | * if (!id) |
90 | * return -ENODEV; | 91 | * return -ENODEV; |
91 | * event_<call>.id = id; | 92 | * event_<call>.id = id; |
92 | * return 0; | 93 | * return 0; |
93 | * } | 94 | * } |
94 | * | 95 | * |
95 | * static struct ftrace_event_call __used | 96 | * static struct ftrace_event_call __used |
96 | * __attribute__((__aligned__(4))) | 97 | * __attribute__((__aligned__(4))) |
97 | * __attribute__((section("_ftrace_events"))) event_<call> = { | 98 | * __attribute__((section("_ftrace_events"))) event_<call> = { |
98 | * .name = "<call>", | 99 | * .name = "<call>", |
99 | * .regfunc = ftrace_reg_event_<call>, | 100 | * .system = "<system>", |
100 | * .unregfunc = ftrace_unreg_event_<call>, | 101 | * .raw_init = ftrace_raw_init_event_<call>, |
101 | * .raw_init = ftrace_raw_init_event_<call>, | 102 | * .regfunc = ftrace_reg_event_<call>, |
102 | * .raw_reg = ftrace_raw_reg_event_<call>, | 103 | * .unregfunc = ftrace_unreg_event_<call>, |
103 | * .raw_unreg = ftrace_raw_unreg_event_<call>, | ||
104 | * .show_format = ftrace_format_<call>, | 104 | * .show_format = ftrace_format_<call>, |
105 | * } | 105 | * } |
106 | * | 106 | * |
@@ -138,7 +138,7 @@ _TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \ | |||
138 | static struct ftrace_event_call __used \ | 138 | static struct ftrace_event_call __used \ |
139 | __attribute__((__aligned__(4))) \ | 139 | __attribute__((__aligned__(4))) \ |
140 | __attribute__((section("_ftrace_events"))) event_##call = { \ | 140 | __attribute__((section("_ftrace_events"))) event_##call = { \ |
141 | .name = #call, \ | 141 | .name = #call, \ |
142 | .system = __stringify(TRACE_SYSTEM), \ | 142 | .system = __stringify(TRACE_SYSTEM), \ |
143 | .regfunc = ftrace_reg_event_##call, \ | 143 | .regfunc = ftrace_reg_event_##call, \ |
144 | .unregfunc = ftrace_unreg_event_##call, \ | 144 | .unregfunc = ftrace_unreg_event_##call, \ |
@@ -148,7 +148,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
148 | #define __entry entry | 148 | #define __entry entry |
149 | 149 | ||
150 | #undef TRACE_EVENT | 150 | #undef TRACE_EVENT |
151 | #define TRACE_EVENT(call, proto, args, tstruct, print, assign) \ | 151 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ |
152 | \ | 152 | \ |
153 | static struct ftrace_event_call event_##call; \ | 153 | static struct ftrace_event_call event_##call; \ |
154 | \ | 154 | \ |
@@ -163,7 +163,7 @@ static void ftrace_raw_event_##call(proto) \ | |||
163 | pc = preempt_count(); \ | 163 | pc = preempt_count(); \ |
164 | \ | 164 | \ |
165 | event = trace_current_buffer_lock_reserve(event_##call.id, \ | 165 | event = trace_current_buffer_lock_reserve(event_##call.id, \ |
166 | sizeof(struct ftrace_raw_##call), \ | 166 | sizeof(struct ftrace_raw_##call), \ |
167 | irq_flags, pc); \ | 167 | irq_flags, pc); \ |
168 | if (!event) \ | 168 | if (!event) \ |
169 | return; \ | 169 | return; \ |
@@ -208,7 +208,7 @@ static int ftrace_raw_init_event_##call(void) \ | |||
208 | static struct ftrace_event_call __used \ | 208 | static struct ftrace_event_call __used \ |
209 | __attribute__((__aligned__(4))) \ | 209 | __attribute__((__aligned__(4))) \ |
210 | __attribute__((section("_ftrace_events"))) event_##call = { \ | 210 | __attribute__((section("_ftrace_events"))) event_##call = { \ |
211 | .name = #call, \ | 211 | .name = #call, \ |
212 | .system = __stringify(TRACE_SYSTEM), \ | 212 | .system = __stringify(TRACE_SYSTEM), \ |
213 | .raw_init = ftrace_raw_init_event_##call, \ | 213 | .raw_init = ftrace_raw_init_event_##call, \ |
214 | .regfunc = ftrace_raw_reg_event_##call, \ | 214 | .regfunc = ftrace_raw_reg_event_##call, \ |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 23ae78430d58..4d9952d3df50 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -94,7 +94,7 @@ ftrace_format_##call(struct trace_seq *s) \ | |||
94 | static struct ftrace_event_call __used \ | 94 | static struct ftrace_event_call __used \ |
95 | __attribute__((__aligned__(4))) \ | 95 | __attribute__((__aligned__(4))) \ |
96 | __attribute__((section("_ftrace_events"))) event_##call = { \ | 96 | __attribute__((section("_ftrace_events"))) event_##call = { \ |
97 | .name = #call, \ | 97 | .name = #call, \ |
98 | .id = proto, \ | 98 | .id = proto, \ |
99 | .system = __stringify(TRACE_SYSTEM), \ | 99 | .system = __stringify(TRACE_SYSTEM), \ |
100 | .show_format = ftrace_format_##call, \ | 100 | .show_format = ftrace_format_##call, \ |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 35257be6a9d6..8566c14b3e9a 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -841,12 +841,12 @@ static void graph_trace_close(struct trace_iterator *iter) | |||
841 | } | 841 | } |
842 | 842 | ||
843 | static struct tracer graph_trace __read_mostly = { | 843 | static struct tracer graph_trace __read_mostly = { |
844 | .name = "function_graph", | 844 | .name = "function_graph", |
845 | .open = graph_trace_open, | 845 | .open = graph_trace_open, |
846 | .close = graph_trace_close, | 846 | .close = graph_trace_close, |
847 | .wait_pipe = poll_wait_pipe, | 847 | .wait_pipe = poll_wait_pipe, |
848 | .init = graph_trace_init, | 848 | .init = graph_trace_init, |
849 | .reset = graph_trace_reset, | 849 | .reset = graph_trace_reset, |
850 | .print_line = print_graph_function, | 850 | .print_line = print_graph_function, |
851 | .print_header = print_graph_headers, | 851 | .print_header = print_graph_headers, |
852 | .flags = &tracer_flags, | 852 | .flags = &tracer_flags, |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index ef8fd661b217..491832af9ba1 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -565,7 +565,7 @@ static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags) | |||
565 | } | 565 | } |
566 | 566 | ||
567 | static struct trace_event trace_fn_event = { | 567 | static struct trace_event trace_fn_event = { |
568 | .type = TRACE_FN, | 568 | .type = TRACE_FN, |
569 | .trace = trace_fn_trace, | 569 | .trace = trace_fn_trace, |
570 | .raw = trace_fn_raw, | 570 | .raw = trace_fn_raw, |
571 | .hex = trace_fn_hex, | 571 | .hex = trace_fn_hex, |
@@ -696,7 +696,7 @@ static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, | |||
696 | } | 696 | } |
697 | 697 | ||
698 | static struct trace_event trace_ctx_event = { | 698 | static struct trace_event trace_ctx_event = { |
699 | .type = TRACE_CTX, | 699 | .type = TRACE_CTX, |
700 | .trace = trace_ctx_print, | 700 | .trace = trace_ctx_print, |
701 | .raw = trace_ctx_raw, | 701 | .raw = trace_ctx_raw, |
702 | .hex = trace_ctx_hex, | 702 | .hex = trace_ctx_hex, |
@@ -704,7 +704,7 @@ static struct trace_event trace_ctx_event = { | |||
704 | }; | 704 | }; |
705 | 705 | ||
706 | static struct trace_event trace_wake_event = { | 706 | static struct trace_event trace_wake_event = { |
707 | .type = TRACE_WAKE, | 707 | .type = TRACE_WAKE, |
708 | .trace = trace_wake_print, | 708 | .trace = trace_wake_print, |
709 | .raw = trace_wake_raw, | 709 | .raw = trace_wake_raw, |
710 | .hex = trace_wake_hex, | 710 | .hex = trace_wake_hex, |
@@ -759,7 +759,7 @@ static enum print_line_t trace_special_bin(struct trace_iterator *iter, | |||
759 | } | 759 | } |
760 | 760 | ||
761 | static struct trace_event trace_special_event = { | 761 | static struct trace_event trace_special_event = { |
762 | .type = TRACE_SPECIAL, | 762 | .type = TRACE_SPECIAL, |
763 | .trace = trace_special_print, | 763 | .trace = trace_special_print, |
764 | .raw = trace_special_print, | 764 | .raw = trace_special_print, |
765 | .hex = trace_special_hex, | 765 | .hex = trace_special_hex, |
@@ -796,7 +796,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter, | |||
796 | } | 796 | } |
797 | 797 | ||
798 | static struct trace_event trace_stack_event = { | 798 | static struct trace_event trace_stack_event = { |
799 | .type = TRACE_STACK, | 799 | .type = TRACE_STACK, |
800 | .trace = trace_stack_print, | 800 | .trace = trace_stack_print, |
801 | .raw = trace_special_print, | 801 | .raw = trace_special_print, |
802 | .hex = trace_special_hex, | 802 | .hex = trace_special_hex, |
@@ -825,7 +825,7 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, | |||
825 | } | 825 | } |
826 | 826 | ||
827 | static struct trace_event trace_user_stack_event = { | 827 | static struct trace_event trace_user_stack_event = { |
828 | .type = TRACE_USER_STACK, | 828 | .type = TRACE_USER_STACK, |
829 | .trace = trace_user_stack_print, | 829 | .trace = trace_user_stack_print, |
830 | .raw = trace_special_print, | 830 | .raw = trace_special_print, |
831 | .hex = trace_special_hex, | 831 | .hex = trace_special_hex, |
@@ -879,7 +879,7 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags) | |||
879 | 879 | ||
880 | 880 | ||
881 | static struct trace_event trace_print_event = { | 881 | static struct trace_event trace_print_event = { |
882 | .type = TRACE_PRINT, | 882 | .type = TRACE_PRINT, |
883 | .trace = trace_print_print, | 883 | .trace = trace_print_print, |
884 | .raw = trace_print_raw, | 884 | .raw = trace_print_raw, |
885 | }; | 885 | }; |
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index 739fdacf873b..fb5ccac8bbc0 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c | |||
@@ -19,14 +19,14 @@ struct cpu_workqueue_stats { | |||
19 | /* Useful to know if we print the cpu headers */ | 19 | /* Useful to know if we print the cpu headers */ |
20 | bool first_entry; | 20 | bool first_entry; |
21 | int cpu; | 21 | int cpu; |
22 | pid_t pid; | 22 | pid_t pid; |
23 | /* Can be inserted from interrupt or user context, need to be atomic */ | 23 | /* Can be inserted from interrupt or user context, need to be atomic */ |
24 | atomic_t inserted; | 24 | atomic_t inserted; |
25 | /* | 25 | /* |
26 | * Don't need to be atomic, works are serialized in a single workqueue thread | 26 | * Don't need to be atomic, works are serialized in a single workqueue thread |
27 | * on a single CPU. | 27 | * on a single CPU. |
28 | */ | 28 | */ |
29 | unsigned int executed; | 29 | unsigned int executed; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | /* List of workqueue threads on one cpu */ | 32 | /* List of workqueue threads on one cpu */ |