aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/lockdep.c1
-rw-r--r--kernel/trace/trace.c12
-rw-r--r--kernel/trace/trace_branch.c4
-rw-r--r--kernel/trace/trace_stack.c8
4 files changed, 13 insertions, 12 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 46a404173db2..74b1878b8bb8 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -25,6 +25,7 @@
25 * Thanks to Arjan van de Ven for coming up with the initial idea of 25 * Thanks to Arjan van de Ven for coming up with the initial idea of
26 * mapping lock dependencies runtime. 26 * mapping lock dependencies runtime.
27 */ 27 */
28#define DISABLE_BRANCH_PROFILING
28#include <linux/mutex.h> 29#include <linux/mutex.h>
29#include <linux/sched.h> 30#include <linux/sched.h>
30#include <linux/delay.h> 31#include <linux/delay.h>
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 91887a280ab9..380de630ebce 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1209,7 +1209,7 @@ void trace_graph_entry(struct ftrace_graph_ent *trace)
1209 int cpu; 1209 int cpu;
1210 int pc; 1210 int pc;
1211 1211
1212 raw_local_irq_save(flags); 1212 local_irq_save(flags);
1213 cpu = raw_smp_processor_id(); 1213 cpu = raw_smp_processor_id();
1214 data = tr->data[cpu]; 1214 data = tr->data[cpu];
1215 disabled = atomic_inc_return(&data->disabled); 1215 disabled = atomic_inc_return(&data->disabled);
@@ -1218,7 +1218,7 @@ void trace_graph_entry(struct ftrace_graph_ent *trace)
1218 __trace_graph_entry(tr, data, trace, flags, pc); 1218 __trace_graph_entry(tr, data, trace, flags, pc);
1219 } 1219 }
1220 atomic_dec(&data->disabled); 1220 atomic_dec(&data->disabled);
1221 raw_local_irq_restore(flags); 1221 local_irq_restore(flags);
1222} 1222}
1223 1223
1224void trace_graph_return(struct ftrace_graph_ret *trace) 1224void trace_graph_return(struct ftrace_graph_ret *trace)
@@ -1230,7 +1230,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
1230 int cpu; 1230 int cpu;
1231 int pc; 1231 int pc;
1232 1232
1233 raw_local_irq_save(flags); 1233 local_irq_save(flags);
1234 cpu = raw_smp_processor_id(); 1234 cpu = raw_smp_processor_id();
1235 data = tr->data[cpu]; 1235 data = tr->data[cpu];
1236 disabled = atomic_inc_return(&data->disabled); 1236 disabled = atomic_inc_return(&data->disabled);
@@ -1239,7 +1239,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
1239 __trace_graph_return(tr, data, trace, flags, pc); 1239 __trace_graph_return(tr, data, trace, flags, pc);
1240 } 1240 }
1241 atomic_dec(&data->disabled); 1241 atomic_dec(&data->disabled);
1242 raw_local_irq_restore(flags); 1242 local_irq_restore(flags);
1243} 1243}
1244#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1244#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1245 1245
@@ -2645,7 +2645,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2645 if (err) 2645 if (err)
2646 goto err_unlock; 2646 goto err_unlock;
2647 2647
2648 raw_local_irq_disable(); 2648 local_irq_disable();
2649 __raw_spin_lock(&ftrace_max_lock); 2649 __raw_spin_lock(&ftrace_max_lock);
2650 for_each_tracing_cpu(cpu) { 2650 for_each_tracing_cpu(cpu) {
2651 /* 2651 /*
@@ -2662,7 +2662,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2662 } 2662 }
2663 } 2663 }
2664 __raw_spin_unlock(&ftrace_max_lock); 2664 __raw_spin_unlock(&ftrace_max_lock);
2665 raw_local_irq_enable(); 2665 local_irq_enable();
2666 2666
2667 tracing_cpumask = tracing_cpumask_new; 2667 tracing_cpumask = tracing_cpumask_new;
2668 2668
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index bc972753568d..6c00feb3bac7 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -42,7 +42,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
42 if (unlikely(!tr)) 42 if (unlikely(!tr))
43 return; 43 return;
44 44
45 raw_local_irq_save(flags); 45 local_irq_save(flags);
46 cpu = raw_smp_processor_id(); 46 cpu = raw_smp_processor_id();
47 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) 47 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
48 goto out; 48 goto out;
@@ -74,7 +74,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
74 74
75 out: 75 out:
76 atomic_dec(&tr->data[cpu]->disabled); 76 atomic_dec(&tr->data[cpu]->disabled);
77 raw_local_irq_restore(flags); 77 local_irq_restore(flags);
78} 78}
79 79
80static inline 80static inline
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index fde3be15c642..06a16115be0f 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -48,7 +48,7 @@ static inline void check_stack(void)
48 if (!object_is_on_stack(&this_size)) 48 if (!object_is_on_stack(&this_size))
49 return; 49 return;
50 50
51 raw_local_irq_save(flags); 51 local_irq_save(flags);
52 __raw_spin_lock(&max_stack_lock); 52 __raw_spin_lock(&max_stack_lock);
53 53
54 /* a race could have already updated it */ 54 /* a race could have already updated it */
@@ -96,7 +96,7 @@ static inline void check_stack(void)
96 96
97 out: 97 out:
98 __raw_spin_unlock(&max_stack_lock); 98 __raw_spin_unlock(&max_stack_lock);
99 raw_local_irq_restore(flags); 99 local_irq_restore(flags);
100} 100}
101 101
102static void 102static void
@@ -162,11 +162,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
162 if (ret < 0) 162 if (ret < 0)
163 return ret; 163 return ret;
164 164
165 raw_local_irq_save(flags); 165 local_irq_save(flags);
166 __raw_spin_lock(&max_stack_lock); 166 __raw_spin_lock(&max_stack_lock);
167 *ptr = val; 167 *ptr = val;
168 __raw_spin_unlock(&max_stack_lock); 168 __raw_spin_unlock(&max_stack_lock);
169 raw_local_irq_restore(flags); 169 local_irq_restore(flags);
170 170
171 return count; 171 return count;
172} 172}