aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_sched_switch.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_sched_switch.c')
-rw-r--r--kernel/trace/trace_sched_switch.c57
1 files changed, 57 insertions, 0 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index a98106dd979c..e1285d7b5488 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -20,6 +20,34 @@ static int sched_ref;
20static DEFINE_MUTEX(sched_register_mutex); 20static DEFINE_MUTEX(sched_register_mutex);
21static int sched_stopped; 21static int sched_stopped;
22 22
23
24void
25tracing_sched_switch_trace(struct trace_array *tr,
26 struct task_struct *prev,
27 struct task_struct *next,
28 unsigned long flags, int pc)
29{
30 struct ftrace_event_call *call = &event_context_switch;
31 struct ring_buffer_event *event;
32 struct ctx_switch_entry *entry;
33
34 event = trace_buffer_lock_reserve(tr, TRACE_CTX,
35 sizeof(*entry), flags, pc);
36 if (!event)
37 return;
38 entry = ring_buffer_event_data(event);
39 entry->prev_pid = prev->pid;
40 entry->prev_prio = prev->prio;
41 entry->prev_state = prev->state;
42 entry->next_pid = next->pid;
43 entry->next_prio = next->prio;
44 entry->next_state = next->state;
45 entry->next_cpu = task_cpu(next);
46
47 if (!filter_check_discard(call, entry, tr->buffer, event))
48 trace_buffer_unlock_commit(tr, event, flags, pc);
49}
50
23static void 51static void
24probe_sched_switch(struct rq *__rq, struct task_struct *prev, 52probe_sched_switch(struct rq *__rq, struct task_struct *prev,
25 struct task_struct *next) 53 struct task_struct *next)
@@ -49,6 +77,35 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
49 local_irq_restore(flags); 77 local_irq_restore(flags);
50} 78}
51 79
80void
81tracing_sched_wakeup_trace(struct trace_array *tr,
82 struct task_struct *wakee,
83 struct task_struct *curr,
84 unsigned long flags, int pc)
85{
86 struct ftrace_event_call *call = &event_wakeup;
87 struct ring_buffer_event *event;
88 struct ctx_switch_entry *entry;
89
90 event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
91 sizeof(*entry), flags, pc);
92 if (!event)
93 return;
94 entry = ring_buffer_event_data(event);
95 entry->prev_pid = curr->pid;
96 entry->prev_prio = curr->prio;
97 entry->prev_state = curr->state;
98 entry->next_pid = wakee->pid;
99 entry->next_prio = wakee->prio;
100 entry->next_state = wakee->state;
101 entry->next_cpu = task_cpu(wakee);
102
103 if (!filter_check_discard(call, entry, tr->buffer, event))
104 ring_buffer_unlock_commit(tr->buffer, event);
105 ftrace_trace_stack(tr, flags, 6, pc);
106 ftrace_trace_userstack(tr, flags, pc);
107}
108
52static void 109static void
53probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) 110probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
54{ 111{