aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-10-30 20:44:53 -0400
committerSteven Rostedt <rostedt@goodmis.org>2014-11-11 12:43:15 -0500
commit243f7610a68a606eb1787c09450a440bf30bebe0 (patch)
treeda33141093d8ba036a45f32b3dfbe5ce8a21dbd7
parent458faf0b88b19a46d51bb9760fa6e03a1bc6d97b (diff)
tracing: Move tracing_sched_{switch,wakeup}() into wakeup tracer
The only code that references tracing_sched_switch_trace() and tracing_sched_wakeup_trace() is the wakeup latency tracer. Those two functions use to belong to the sched_switch tracer which has long been removed. These functions were left behind because the wakeup latency tracer used them. But since the wakeup latency tracer is the only one to use them, they should be static functions inside that code. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--kernel/trace/trace.h9
-rw-r--r--kernel/trace/trace_sched_switch.c56
-rw-r--r--kernel/trace/trace_sched_wakeup.c56
3 files changed, 56 insertions, 65 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index a3a82d5f25dc..3376de623ea0 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -569,15 +569,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
569 569
570void tracing_iter_reset(struct trace_iterator *iter, int cpu); 570void tracing_iter_reset(struct trace_iterator *iter, int cpu);
571 571
572void tracing_sched_switch_trace(struct trace_array *tr,
573 struct task_struct *prev,
574 struct task_struct *next,
575 unsigned long flags, int pc);
576
577void tracing_sched_wakeup_trace(struct trace_array *tr,
578 struct task_struct *wakee,
579 struct task_struct *cur,
580 unsigned long flags, int pc);
581void trace_function(struct trace_array *tr, 572void trace_function(struct trace_array *tr,
582 unsigned long ip, 573 unsigned long ip,
583 unsigned long parent_ip, 574 unsigned long parent_ip,
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index f7c7f4f1544c..2e293beb186e 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -17,34 +17,6 @@
17static int sched_ref; 17static int sched_ref;
18static DEFINE_MUTEX(sched_register_mutex); 18static DEFINE_MUTEX(sched_register_mutex);
19 19
20void
21tracing_sched_switch_trace(struct trace_array *tr,
22 struct task_struct *prev,
23 struct task_struct *next,
24 unsigned long flags, int pc)
25{
26 struct ftrace_event_call *call = &event_context_switch;
27 struct ring_buffer *buffer = tr->trace_buffer.buffer;
28 struct ring_buffer_event *event;
29 struct ctx_switch_entry *entry;
30
31 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
32 sizeof(*entry), flags, pc);
33 if (!event)
34 return;
35 entry = ring_buffer_event_data(event);
36 entry->prev_pid = prev->pid;
37 entry->prev_prio = prev->prio;
38 entry->prev_state = prev->state;
39 entry->next_pid = next->pid;
40 entry->next_prio = next->prio;
41 entry->next_state = next->state;
42 entry->next_cpu = task_cpu(next);
43
44 if (!call_filter_check_discard(call, entry, buffer, event))
45 trace_buffer_unlock_commit(buffer, event, flags, pc);
46}
47
48static void 20static void
49probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) 21probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
50{ 22{
@@ -55,34 +27,6 @@ probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *n
55 tracing_record_cmdline(next); 27 tracing_record_cmdline(next);
56} 28}
57 29
58void
59tracing_sched_wakeup_trace(struct trace_array *tr,
60 struct task_struct *wakee,
61 struct task_struct *curr,
62 unsigned long flags, int pc)
63{
64 struct ftrace_event_call *call = &event_wakeup;
65 struct ring_buffer_event *event;
66 struct ctx_switch_entry *entry;
67 struct ring_buffer *buffer = tr->trace_buffer.buffer;
68
69 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
70 sizeof(*entry), flags, pc);
71 if (!event)
72 return;
73 entry = ring_buffer_event_data(event);
74 entry->prev_pid = curr->pid;
75 entry->prev_prio = curr->prio;
76 entry->prev_state = curr->state;
77 entry->next_pid = wakee->pid;
78 entry->next_prio = wakee->prio;
79 entry->next_state = wakee->state;
80 entry->next_cpu = task_cpu(wakee);
81
82 if (!call_filter_check_discard(call, entry, buffer, event))
83 trace_buffer_unlock_commit(buffer, event, flags, pc);
84}
85
86static void 30static void
87probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) 31probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
88{ 32{
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 19bd8928ce94..8fb84b362816 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -365,6 +365,62 @@ probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
365 wakeup_current_cpu = cpu; 365 wakeup_current_cpu = cpu;
366} 366}
367 367
368static void
369tracing_sched_switch_trace(struct trace_array *tr,
370 struct task_struct *prev,
371 struct task_struct *next,
372 unsigned long flags, int pc)
373{
374 struct ftrace_event_call *call = &event_context_switch;
375 struct ring_buffer *buffer = tr->trace_buffer.buffer;
376 struct ring_buffer_event *event;
377 struct ctx_switch_entry *entry;
378
379 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
380 sizeof(*entry), flags, pc);
381 if (!event)
382 return;
383 entry = ring_buffer_event_data(event);
384 entry->prev_pid = prev->pid;
385 entry->prev_prio = prev->prio;
386 entry->prev_state = prev->state;
387 entry->next_pid = next->pid;
388 entry->next_prio = next->prio;
389 entry->next_state = next->state;
390 entry->next_cpu = task_cpu(next);
391
392 if (!call_filter_check_discard(call, entry, buffer, event))
393 trace_buffer_unlock_commit(buffer, event, flags, pc);
394}
395
396static void
397tracing_sched_wakeup_trace(struct trace_array *tr,
398 struct task_struct *wakee,
399 struct task_struct *curr,
400 unsigned long flags, int pc)
401{
402 struct ftrace_event_call *call = &event_wakeup;
403 struct ring_buffer_event *event;
404 struct ctx_switch_entry *entry;
405 struct ring_buffer *buffer = tr->trace_buffer.buffer;
406
407 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
408 sizeof(*entry), flags, pc);
409 if (!event)
410 return;
411 entry = ring_buffer_event_data(event);
412 entry->prev_pid = curr->pid;
413 entry->prev_prio = curr->prio;
414 entry->prev_state = curr->state;
415 entry->next_pid = wakee->pid;
416 entry->next_prio = wakee->prio;
417 entry->next_state = wakee->state;
418 entry->next_cpu = task_cpu(wakee);
419
420 if (!call_filter_check_discard(call, entry, buffer, event))
421 trace_buffer_unlock_commit(buffer, event, flags, pc);
422}
423
368static void notrace 424static void notrace
369probe_wakeup_sched_switch(void *ignore, 425probe_wakeup_sched_switch(void *ignore,
370 struct task_struct *prev, struct task_struct *next) 426 struct task_struct *prev, struct task_struct *next)