diff options
Diffstat (limited to 'kernel/trace/trace_sched_switch.c')
| -rw-r--r-- | kernel/trace/trace_sched_switch.c | 59 |
1 files changed, 59 insertions, 0 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index a98106dd979c..5fca0f51fde4 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
| @@ -20,6 +20,35 @@ static int sched_ref; | |||
| 20 | static DEFINE_MUTEX(sched_register_mutex); | 20 | static DEFINE_MUTEX(sched_register_mutex); |
| 21 | static int sched_stopped; | 21 | static int sched_stopped; |
| 22 | 22 | ||
| 23 | |||
| 24 | void | ||
| 25 | tracing_sched_switch_trace(struct trace_array *tr, | ||
| 26 | struct task_struct *prev, | ||
| 27 | struct task_struct *next, | ||
| 28 | unsigned long flags, int pc) | ||
| 29 | { | ||
| 30 | struct ftrace_event_call *call = &event_context_switch; | ||
| 31 | struct ring_buffer *buffer = tr->buffer; | ||
| 32 | struct ring_buffer_event *event; | ||
| 33 | struct ctx_switch_entry *entry; | ||
| 34 | |||
| 35 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, | ||
| 36 | sizeof(*entry), flags, pc); | ||
| 37 | if (!event) | ||
| 38 | return; | ||
| 39 | entry = ring_buffer_event_data(event); | ||
| 40 | entry->prev_pid = prev->pid; | ||
| 41 | entry->prev_prio = prev->prio; | ||
| 42 | entry->prev_state = prev->state; | ||
| 43 | entry->next_pid = next->pid; | ||
| 44 | entry->next_prio = next->prio; | ||
| 45 | entry->next_state = next->state; | ||
| 46 | entry->next_cpu = task_cpu(next); | ||
| 47 | |||
| 48 | if (!filter_check_discard(call, entry, buffer, event)) | ||
| 49 | trace_buffer_unlock_commit(buffer, event, flags, pc); | ||
| 50 | } | ||
| 51 | |||
| 23 | static void | 52 | static void |
| 24 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, | 53 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, |
| 25 | struct task_struct *next) | 54 | struct task_struct *next) |
| @@ -49,6 +78,36 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
| 49 | local_irq_restore(flags); | 78 | local_irq_restore(flags); |
| 50 | } | 79 | } |
| 51 | 80 | ||
| 81 | void | ||
| 82 | tracing_sched_wakeup_trace(struct trace_array *tr, | ||
| 83 | struct task_struct *wakee, | ||
| 84 | struct task_struct *curr, | ||
| 85 | unsigned long flags, int pc) | ||
| 86 | { | ||
| 87 | struct ftrace_event_call *call = &event_wakeup; | ||
| 88 | struct ring_buffer_event *event; | ||
| 89 | struct ctx_switch_entry *entry; | ||
| 90 | struct ring_buffer *buffer = tr->buffer; | ||
| 91 | |||
| 92 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, | ||
| 93 | sizeof(*entry), flags, pc); | ||
| 94 | if (!event) | ||
| 95 | return; | ||
| 96 | entry = ring_buffer_event_data(event); | ||
| 97 | entry->prev_pid = curr->pid; | ||
| 98 | entry->prev_prio = curr->prio; | ||
| 99 | entry->prev_state = curr->state; | ||
| 100 | entry->next_pid = wakee->pid; | ||
| 101 | entry->next_prio = wakee->prio; | ||
| 102 | entry->next_state = wakee->state; | ||
| 103 | entry->next_cpu = task_cpu(wakee); | ||
| 104 | |||
| 105 | if (!filter_check_discard(call, entry, buffer, event)) | ||
| 106 | ring_buffer_unlock_commit(buffer, event); | ||
| 107 | ftrace_trace_stack(tr->buffer, flags, 6, pc); | ||
| 108 | ftrace_trace_userstack(tr->buffer, flags, pc); | ||
| 109 | } | ||
| 110 | |||
| 52 | static void | 111 | static void |
| 53 | probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) | 112 | probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) |
| 54 | { | 113 | { |
