aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/trace/ring_buffer.c19
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_boot.c12
-rw-r--r--kernel/trace/trace_functions.c14
-rw-r--r--kernel/trace/trace_hw_branches.c14
-rw-r--r--kernel/trace/trace_mmiotrace.c6
-rw-r--r--kernel/trace/trace_sched_switch.c16
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_sysprof.c12
11 files changed, 40 insertions, 68 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ceda5799466e..dcb39bc88f6c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2324,7 +2324,7 @@ out_activate:
2324 success = 1; 2324 success = 1;
2325 2325
2326out_running: 2326out_running:
2327 trace_sched_wakeup(rq, p); 2327 trace_sched_wakeup(rq, p, success);
2328 check_preempt_curr(rq, p, sync); 2328 check_preempt_curr(rq, p, sync);
2329 2329
2330 p->state = TASK_RUNNING; 2330 p->state = TASK_RUNNING;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index bb6922a931b1..76f34c0ef29c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -838,6 +838,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
838 * back to us). This allows us to do a simple loop to 838 * back to us). This allows us to do a simple loop to
839 * assign the commit to the tail. 839 * assign the commit to the tail.
840 */ 840 */
841 again:
841 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 842 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
842 cpu_buffer->commit_page->page->commit = 843 cpu_buffer->commit_page->page->commit =
843 cpu_buffer->commit_page->write; 844 cpu_buffer->commit_page->write;
@@ -853,6 +854,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
853 cpu_buffer->commit_page->write; 854 cpu_buffer->commit_page->write;
854 barrier(); 855 barrier();
855 } 856 }
857
858 /* again, keep gcc from optimizing */
859 barrier();
860
861 /*
862 * If an interrupt came in just after the first while loop
863 * and pushed the tail page forward, we will be left with
864 * a dangling commit that will never go forward.
865 */
866 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
867 goto again;
856} 868}
857 869
858static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 870static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
@@ -950,12 +962,15 @@ static struct ring_buffer_event *
950__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 962__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
951 unsigned type, unsigned long length, u64 *ts) 963 unsigned type, unsigned long length, u64 *ts)
952{ 964{
953 struct buffer_page *tail_page, *head_page, *reader_page; 965 struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
954 unsigned long tail, write; 966 unsigned long tail, write;
955 struct ring_buffer *buffer = cpu_buffer->buffer; 967 struct ring_buffer *buffer = cpu_buffer->buffer;
956 struct ring_buffer_event *event; 968 struct ring_buffer_event *event;
957 unsigned long flags; 969 unsigned long flags;
958 970
971 commit_page = cpu_buffer->commit_page;
972 /* we just need to protect against interrupts */
973 barrier();
959 tail_page = cpu_buffer->tail_page; 974 tail_page = cpu_buffer->tail_page;
960 write = local_add_return(length, &tail_page->write); 975 write = local_add_return(length, &tail_page->write);
961 tail = write - length; 976 tail = write - length;
@@ -981,7 +996,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
981 * it all the way around the buffer, bail, and warn 996 * it all the way around the buffer, bail, and warn
982 * about it. 997 * about it.
983 */ 998 */
984 if (unlikely(next_page == cpu_buffer->commit_page)) { 999 if (unlikely(next_page == commit_page)) {
985 WARN_ON_ONCE(1); 1000 WARN_ON_ONCE(1);
986 goto out_unlock; 1001 goto out_unlock;
987 } 1002 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0eb6d48347f7..79db26e8216e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -679,6 +679,16 @@ void tracing_reset(struct trace_array *tr, int cpu)
679 ftrace_enable_cpu(); 679 ftrace_enable_cpu();
680} 680}
681 681
682void tracing_reset_online_cpus(struct trace_array *tr)
683{
684 int cpu;
685
686 tr->time_start = ftrace_now(tr->cpu);
687
688 for_each_online_cpu(cpu)
689 tracing_reset(tr, cpu);
690}
691
682#define SAVED_CMDLINES 128 692#define SAVED_CMDLINES 128
683static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 693static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
684static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 694static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index fc75dce7a664..cc7a4f864036 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -374,6 +374,7 @@ struct trace_iterator {
374int tracing_is_enabled(void); 374int tracing_is_enabled(void);
375void trace_wake_up(void); 375void trace_wake_up(void);
376void tracing_reset(struct trace_array *tr, int cpu); 376void tracing_reset(struct trace_array *tr, int cpu);
377void tracing_reset_online_cpus(struct trace_array *tr);
377int tracing_open_generic(struct inode *inode, struct file *filp); 378int tracing_open_generic(struct inode *inode, struct file *filp);
378struct dentry *tracing_init_dentry(void); 379struct dentry *tracing_init_dentry(void);
379void init_tracer_sysprof_debugfs(struct dentry *d_tracer); 380void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index a4fa2c57e34e..3ccebde28482 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -37,16 +37,6 @@ void disable_boot_trace(void)
37 tracing_stop_sched_switch_record(); 37 tracing_stop_sched_switch_record();
38} 38}
39 39
40static void reset_boot_trace(struct trace_array *tr)
41{
42 int cpu;
43
44 tr->time_start = ftrace_now(tr->cpu);
45
46 for_each_online_cpu(cpu)
47 tracing_reset(tr, cpu);
48}
49
50static int boot_trace_init(struct trace_array *tr) 40static int boot_trace_init(struct trace_array *tr)
51{ 41{
52 int cpu; 42 int cpu;
@@ -130,7 +120,7 @@ struct tracer boot_tracer __read_mostly =
130{ 120{
131 .name = "initcall", 121 .name = "initcall",
132 .init = boot_trace_init, 122 .init = boot_trace_init,
133 .reset = reset_boot_trace, 123 .reset = tracing_reset_online_cpus,
134 .print_line = initcall_print_line, 124 .print_line = initcall_print_line,
135}; 125};
136 126
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index e74f6d0a3216..9236d7e25a16 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -16,20 +16,10 @@
16 16
17#include "trace.h" 17#include "trace.h"
18 18
19static void function_reset(struct trace_array *tr)
20{
21 int cpu;
22
23 tr->time_start = ftrace_now(tr->cpu);
24
25 for_each_online_cpu(cpu)
26 tracing_reset(tr, cpu);
27}
28
29static void start_function_trace(struct trace_array *tr) 19static void start_function_trace(struct trace_array *tr)
30{ 20{
31 tr->cpu = get_cpu(); 21 tr->cpu = get_cpu();
32 function_reset(tr); 22 tracing_reset_online_cpus(tr);
33 put_cpu(); 23 put_cpu();
34 24
35 tracing_start_cmdline_record(); 25 tracing_start_cmdline_record();
@@ -55,7 +45,7 @@ static void function_trace_reset(struct trace_array *tr)
55 45
56static void function_trace_start(struct trace_array *tr) 46static void function_trace_start(struct trace_array *tr)
57{ 47{
58 function_reset(tr); 48 tracing_reset_online_cpus(tr);
59} 49}
60 50
61static struct tracer function_trace __read_mostly = 51static struct tracer function_trace __read_mostly =
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index ee29e012aa97..b6a3e20a49a9 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -25,16 +25,6 @@ static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
25#define this_buffer per_cpu(buffer, smp_processor_id()) 25#define this_buffer per_cpu(buffer, smp_processor_id())
26 26
27 27
28static void bts_trace_reset(struct trace_array *tr)
29{
30 int cpu;
31
32 tr->time_start = ftrace_now(tr->cpu);
33
34 for_each_online_cpu(cpu)
35 tracing_reset(tr, cpu);
36}
37
38static void bts_trace_start_cpu(void *arg) 28static void bts_trace_start_cpu(void *arg)
39{ 29{
40 if (this_tracer) 30 if (this_tracer)
@@ -54,7 +44,7 @@ static void bts_trace_start(struct trace_array *tr)
54{ 44{
55 int cpu; 45 int cpu;
56 46
57 bts_trace_reset(tr); 47 tracing_reset_online_cpus(tr);
58 48
59 for_each_cpu_mask(cpu, cpu_possible_map) 49 for_each_cpu_mask(cpu, cpu_possible_map)
60 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); 50 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
@@ -78,7 +68,7 @@ static void bts_trace_stop(struct trace_array *tr)
78 68
79static int bts_trace_init(struct trace_array *tr) 69static int bts_trace_init(struct trace_array *tr)
80{ 70{
81 bts_trace_reset(tr); 71 tracing_reset_online_cpus(tr);
82 bts_trace_start(tr); 72 bts_trace_start(tr);
83 73
84 return 0; 74 return 0;
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 2fb6da6523b3..fffcb069f1dc 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -22,14 +22,10 @@ static unsigned long prev_overruns;
22 22
23static void mmio_reset_data(struct trace_array *tr) 23static void mmio_reset_data(struct trace_array *tr)
24{ 24{
25 int cpu;
26
27 overrun_detected = false; 25 overrun_detected = false;
28 prev_overruns = 0; 26 prev_overruns = 0;
29 tr->time_start = ftrace_now(tr->cpu);
30 27
31 for_each_online_cpu(cpu) 28 tracing_reset_online_cpus(tr);
32 tracing_reset(tr, cpu);
33} 29}
34 30
35static int mmio_trace_init(struct trace_array *tr) 31static int mmio_trace_init(struct trace_array *tr)
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 781d72ef873c..df175cb4564f 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -49,7 +49,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
49} 49}
50 50
51static void 51static void
52probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) 52probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
53{ 53{
54 struct trace_array_cpu *data; 54 struct trace_array_cpu *data;
55 unsigned long flags; 55 unsigned long flags;
@@ -72,16 +72,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
72 local_irq_restore(flags); 72 local_irq_restore(flags);
73} 73}
74 74
75static void sched_switch_reset(struct trace_array *tr)
76{
77 int cpu;
78
79 tr->time_start = ftrace_now(tr->cpu);
80
81 for_each_online_cpu(cpu)
82 tracing_reset(tr, cpu);
83}
84
85static int tracing_sched_register(void) 75static int tracing_sched_register(void)
86{ 76{
87 int ret; 77 int ret;
@@ -197,7 +187,7 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr)
197 187
198static void start_sched_trace(struct trace_array *tr) 188static void start_sched_trace(struct trace_array *tr)
199{ 189{
200 sched_switch_reset(tr); 190 tracing_reset_online_cpus(tr);
201 tracing_start_sched_switch_record(); 191 tracing_start_sched_switch_record();
202} 192}
203 193
@@ -221,7 +211,7 @@ static void sched_switch_trace_reset(struct trace_array *tr)
221 211
222static void sched_switch_trace_start(struct trace_array *tr) 212static void sched_switch_trace_start(struct trace_array *tr)
223{ 213{
224 sched_switch_reset(tr); 214 tracing_reset_online_cpus(tr);
225 tracing_start_sched_switch(); 215 tracing_start_sched_switch();
226} 216}
227 217
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 0067b49746c1..43586b689e31 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -211,7 +211,7 @@ static void wakeup_reset(struct trace_array *tr)
211} 211}
212 212
213static void 213static void
214probe_wakeup(struct rq *rq, struct task_struct *p) 214probe_wakeup(struct rq *rq, struct task_struct *p, int success)
215{ 215{
216 int cpu = smp_processor_id(); 216 int cpu = smp_processor_id();
217 unsigned long flags; 217 unsigned long flags;
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 54960edb96d0..01becf1f19ff 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -234,20 +234,10 @@ static void stop_stack_timers(void)
234 stop_stack_timer(cpu); 234 stop_stack_timer(cpu);
235} 235}
236 236
237static void stack_reset(struct trace_array *tr)
238{
239 int cpu;
240
241 tr->time_start = ftrace_now(tr->cpu);
242
243 for_each_online_cpu(cpu)
244 tracing_reset(tr, cpu);
245}
246
247static void start_stack_trace(struct trace_array *tr) 237static void start_stack_trace(struct trace_array *tr)
248{ 238{
249 mutex_lock(&sample_timer_lock); 239 mutex_lock(&sample_timer_lock);
250 stack_reset(tr); 240 tracing_reset_online_cpus(tr);
251 start_stack_timers(); 241 start_stack_timers();
252 tracer_enabled = 1; 242 tracer_enabled = 1;
253 mutex_unlock(&sample_timer_lock); 243 mutex_unlock(&sample_timer_lock);