aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c5
-rw-r--r--kernel/trace/ring_buffer_benchmark.c1
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_functions_graph.c4
5 files changed, 10 insertions, 8 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 54191d6ed195..05a9f83b8819 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -20,6 +20,7 @@
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/fs.h> 21#include <linux/fs.h>
22 22
23#include <asm/local.h>
23#include "trace.h" 24#include "trace.h"
24 25
25/* 26/*
@@ -2541,7 +2542,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
2541 * @buffer: The ring buffer to enable writes 2542 * @buffer: The ring buffer to enable writes
2542 * 2543 *
2543 * Note, multiple disables will need the same number of enables 2544 * Note, multiple disables will need the same number of enables
2544 * to truely enable the writing (much like preempt_disable). 2545 * to truly enable the writing (much like preempt_disable).
2545 */ 2546 */
2546void ring_buffer_record_enable(struct ring_buffer *buffer) 2547void ring_buffer_record_enable(struct ring_buffer *buffer)
2547{ 2548{
@@ -2577,7 +2578,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
2577 * @cpu: The CPU to enable. 2578 * @cpu: The CPU to enable.
2578 * 2579 *
2579 * Note, multiple disables will need the same number of enables 2580 * Note, multiple disables will need the same number of enables
2580 * to truely enable the writing (much like preempt_disable). 2581 * to truly enable the writing (much like preempt_disable).
2581 */ 2582 */
2582void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) 2583void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2583{ 2584{
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index b2477caf09c2..df74c7982255 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -8,6 +8,7 @@
8#include <linux/kthread.h> 8#include <linux/kthread.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/time.h> 10#include <linux/time.h>
11#include <asm/local.h>
11 12
12struct rb_page { 13struct rb_page {
13 u64 ts; 14 u64 ts;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e52683f7c3b2..3ec2ee6f6560 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -92,12 +92,12 @@ DEFINE_PER_CPU(int, ftrace_cpu_disabled);
92static inline void ftrace_disable_cpu(void) 92static inline void ftrace_disable_cpu(void)
93{ 93{
94 preempt_disable(); 94 preempt_disable();
95 __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); 95 __this_cpu_inc(ftrace_cpu_disabled);
96} 96}
97 97
98static inline void ftrace_enable_cpu(void) 98static inline void ftrace_enable_cpu(void)
99{ 99{
100 __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); 100 __this_cpu_dec(ftrace_cpu_disabled);
101 preempt_enable(); 101 preempt_enable();
102} 102}
103 103
@@ -1191,7 +1191,7 @@ trace_function(struct trace_array *tr,
1191 struct ftrace_entry *entry; 1191 struct ftrace_entry *entry;
1192 1192
1193 /* If we are reading the ring buffer, don't trace */ 1193 /* If we are reading the ring buffer, don't trace */
1194 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 1194 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1195 return; 1195 return;
1196 1196
1197 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1197 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 1bc8cd1431d7..2825ef2c0b15 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -551,7 +551,7 @@ static inline int ftrace_trace_task(struct task_struct *task)
551 * struct trace_parser - servers for reading the user input separated by spaces 551 * struct trace_parser - servers for reading the user input separated by spaces
552 * @cont: set if the input is not complete - no final space char was found 552 * @cont: set if the input is not complete - no final space char was found
553 * @buffer: holds the parsed user input 553 * @buffer: holds the parsed user input
554 * @idx: user input lenght 554 * @idx: user input length
555 * @size: buffer size 555 * @size: buffer size
556 */ 556 */
557struct trace_parser { 557struct trace_parser {
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index e9df04b60267..e6989d9b44da 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -188,7 +188,7 @@ static int __trace_graph_entry(struct trace_array *tr,
188 struct ring_buffer *buffer = tr->buffer; 188 struct ring_buffer *buffer = tr->buffer;
189 struct ftrace_graph_ent_entry *entry; 189 struct ftrace_graph_ent_entry *entry;
190 190
191 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 191 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
192 return 0; 192 return 0;
193 193
194 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, 194 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -255,7 +255,7 @@ static void __trace_graph_return(struct trace_array *tr,
255 struct ring_buffer *buffer = tr->buffer; 255 struct ring_buffer *buffer = tr->buffer;
256 struct ftrace_graph_ret_entry *entry; 256 struct ftrace_graph_ret_entry *entry;
257 257
258 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 258 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
259 return; 259 return;
260 260
261 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, 261 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,