diff options
-rw-r--r-- | kernel/trace/trace.h | 9 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 56 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 56 |
3 files changed, 56 insertions, 65 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index a3a82d5f25dc..3376de623ea0 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -569,15 +569,6 @@ void trace_init_global_iter(struct trace_iterator *iter); | |||
569 | 569 | ||
570 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); | 570 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); |
571 | 571 | ||
572 | void tracing_sched_switch_trace(struct trace_array *tr, | ||
573 | struct task_struct *prev, | ||
574 | struct task_struct *next, | ||
575 | unsigned long flags, int pc); | ||
576 | |||
577 | void tracing_sched_wakeup_trace(struct trace_array *tr, | ||
578 | struct task_struct *wakee, | ||
579 | struct task_struct *cur, | ||
580 | unsigned long flags, int pc); | ||
581 | void trace_function(struct trace_array *tr, | 572 | void trace_function(struct trace_array *tr, |
582 | unsigned long ip, | 573 | unsigned long ip, |
583 | unsigned long parent_ip, | 574 | unsigned long parent_ip, |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index f7c7f4f1544c..2e293beb186e 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -17,34 +17,6 @@ | |||
17 | static int sched_ref; | 17 | static int sched_ref; |
18 | static DEFINE_MUTEX(sched_register_mutex); | 18 | static DEFINE_MUTEX(sched_register_mutex); |
19 | 19 | ||
20 | void | ||
21 | tracing_sched_switch_trace(struct trace_array *tr, | ||
22 | struct task_struct *prev, | ||
23 | struct task_struct *next, | ||
24 | unsigned long flags, int pc) | ||
25 | { | ||
26 | struct ftrace_event_call *call = &event_context_switch; | ||
27 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | ||
28 | struct ring_buffer_event *event; | ||
29 | struct ctx_switch_entry *entry; | ||
30 | |||
31 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, | ||
32 | sizeof(*entry), flags, pc); | ||
33 | if (!event) | ||
34 | return; | ||
35 | entry = ring_buffer_event_data(event); | ||
36 | entry->prev_pid = prev->pid; | ||
37 | entry->prev_prio = prev->prio; | ||
38 | entry->prev_state = prev->state; | ||
39 | entry->next_pid = next->pid; | ||
40 | entry->next_prio = next->prio; | ||
41 | entry->next_state = next->state; | ||
42 | entry->next_cpu = task_cpu(next); | ||
43 | |||
44 | if (!call_filter_check_discard(call, entry, buffer, event)) | ||
45 | trace_buffer_unlock_commit(buffer, event, flags, pc); | ||
46 | } | ||
47 | |||
48 | static void | 20 | static void |
49 | probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) | 21 | probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) |
50 | { | 22 | { |
@@ -55,34 +27,6 @@ probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *n | |||
55 | tracing_record_cmdline(next); | 27 | tracing_record_cmdline(next); |
56 | } | 28 | } |
57 | 29 | ||
58 | void | ||
59 | tracing_sched_wakeup_trace(struct trace_array *tr, | ||
60 | struct task_struct *wakee, | ||
61 | struct task_struct *curr, | ||
62 | unsigned long flags, int pc) | ||
63 | { | ||
64 | struct ftrace_event_call *call = &event_wakeup; | ||
65 | struct ring_buffer_event *event; | ||
66 | struct ctx_switch_entry *entry; | ||
67 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | ||
68 | |||
69 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, | ||
70 | sizeof(*entry), flags, pc); | ||
71 | if (!event) | ||
72 | return; | ||
73 | entry = ring_buffer_event_data(event); | ||
74 | entry->prev_pid = curr->pid; | ||
75 | entry->prev_prio = curr->prio; | ||
76 | entry->prev_state = curr->state; | ||
77 | entry->next_pid = wakee->pid; | ||
78 | entry->next_prio = wakee->prio; | ||
79 | entry->next_state = wakee->state; | ||
80 | entry->next_cpu = task_cpu(wakee); | ||
81 | |||
82 | if (!call_filter_check_discard(call, entry, buffer, event)) | ||
83 | trace_buffer_unlock_commit(buffer, event, flags, pc); | ||
84 | } | ||
85 | |||
86 | static void | 30 | static void |
87 | probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) | 31 | probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) |
88 | { | 32 | { |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 19bd8928ce94..8fb84b362816 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -365,6 +365,62 @@ probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) | |||
365 | wakeup_current_cpu = cpu; | 365 | wakeup_current_cpu = cpu; |
366 | } | 366 | } |
367 | 367 | ||
368 | static void | ||
369 | tracing_sched_switch_trace(struct trace_array *tr, | ||
370 | struct task_struct *prev, | ||
371 | struct task_struct *next, | ||
372 | unsigned long flags, int pc) | ||
373 | { | ||
374 | struct ftrace_event_call *call = &event_context_switch; | ||
375 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | ||
376 | struct ring_buffer_event *event; | ||
377 | struct ctx_switch_entry *entry; | ||
378 | |||
379 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, | ||
380 | sizeof(*entry), flags, pc); | ||
381 | if (!event) | ||
382 | return; | ||
383 | entry = ring_buffer_event_data(event); | ||
384 | entry->prev_pid = prev->pid; | ||
385 | entry->prev_prio = prev->prio; | ||
386 | entry->prev_state = prev->state; | ||
387 | entry->next_pid = next->pid; | ||
388 | entry->next_prio = next->prio; | ||
389 | entry->next_state = next->state; | ||
390 | entry->next_cpu = task_cpu(next); | ||
391 | |||
392 | if (!call_filter_check_discard(call, entry, buffer, event)) | ||
393 | trace_buffer_unlock_commit(buffer, event, flags, pc); | ||
394 | } | ||
395 | |||
396 | static void | ||
397 | tracing_sched_wakeup_trace(struct trace_array *tr, | ||
398 | struct task_struct *wakee, | ||
399 | struct task_struct *curr, | ||
400 | unsigned long flags, int pc) | ||
401 | { | ||
402 | struct ftrace_event_call *call = &event_wakeup; | ||
403 | struct ring_buffer_event *event; | ||
404 | struct ctx_switch_entry *entry; | ||
405 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | ||
406 | |||
407 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, | ||
408 | sizeof(*entry), flags, pc); | ||
409 | if (!event) | ||
410 | return; | ||
411 | entry = ring_buffer_event_data(event); | ||
412 | entry->prev_pid = curr->pid; | ||
413 | entry->prev_prio = curr->prio; | ||
414 | entry->prev_state = curr->state; | ||
415 | entry->next_pid = wakee->pid; | ||
416 | entry->next_prio = wakee->prio; | ||
417 | entry->next_state = wakee->state; | ||
418 | entry->next_cpu = task_cpu(wakee); | ||
419 | |||
420 | if (!call_filter_check_discard(call, entry, buffer, event)) | ||
421 | trace_buffer_unlock_commit(buffer, event, flags, pc); | ||
422 | } | ||
423 | |||
368 | static void notrace | 424 | static void notrace |
369 | probe_wakeup_sched_switch(void *ignore, | 425 | probe_wakeup_sched_switch(void *ignore, |
370 | struct task_struct *prev, struct task_struct *next) | 426 | struct task_struct *prev, struct task_struct *next) |