diff options
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/Kconfig | 2 | ||||
| -rw-r--r-- | kernel/trace/blktrace.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 20 |
3 files changed, 20 insertions, 6 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e04b8bcdef88..ea37e2ff4164 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -126,7 +126,7 @@ if FTRACE | |||
| 126 | config FUNCTION_TRACER | 126 | config FUNCTION_TRACER |
| 127 | bool "Kernel Function Tracer" | 127 | bool "Kernel Function Tracer" |
| 128 | depends on HAVE_FUNCTION_TRACER | 128 | depends on HAVE_FUNCTION_TRACER |
| 129 | select FRAME_POINTER if (!ARM_UNWIND) | 129 | select FRAME_POINTER if !ARM_UNWIND && !S390 |
| 130 | select KALLSYMS | 130 | select KALLSYMS |
| 131 | select GENERIC_TRACER | 131 | select GENERIC_TRACER |
| 132 | select CONTEXT_SWITCH_TRACER | 132 | select CONTEXT_SWITCH_TRACER |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index bc251ed66724..7b8ec0281548 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -168,7 +168,6 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, | |||
| 168 | static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), | 168 | static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), |
| 169 | BLK_TC_ACT(BLK_TC_WRITE) }; | 169 | BLK_TC_ACT(BLK_TC_WRITE) }; |
| 170 | 170 | ||
| 171 | #define BLK_TC_HARDBARRIER BLK_TC_BARRIER | ||
| 172 | #define BLK_TC_RAHEAD BLK_TC_AHEAD | 171 | #define BLK_TC_RAHEAD BLK_TC_AHEAD |
| 173 | 172 | ||
| 174 | /* The ilog2() calls fall out because they're constant */ | 173 | /* The ilog2() calls fall out because they're constant */ |
| @@ -196,7 +195,6 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
| 196 | return; | 195 | return; |
| 197 | 196 | ||
| 198 | what |= ddir_act[rw & WRITE]; | 197 | what |= ddir_act[rw & WRITE]; |
| 199 | what |= MASK_TC_BIT(rw, HARDBARRIER); | ||
| 200 | what |= MASK_TC_BIT(rw, SYNC); | 198 | what |= MASK_TC_BIT(rw, SYNC); |
| 201 | what |= MASK_TC_BIT(rw, RAHEAD); | 199 | what |= MASK_TC_BIT(rw, RAHEAD); |
| 202 | what |= MASK_TC_BIT(rw, META); | 200 | what |= MASK_TC_BIT(rw, META); |
| @@ -1807,8 +1805,6 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) | |||
| 1807 | 1805 | ||
| 1808 | if (rw & REQ_RAHEAD) | 1806 | if (rw & REQ_RAHEAD) |
| 1809 | rwbs[i++] = 'A'; | 1807 | rwbs[i++] = 'A'; |
| 1810 | if (rw & REQ_HARDBARRIER) | ||
| 1811 | rwbs[i++] = 'B'; | ||
| 1812 | if (rw & REQ_SYNC) | 1808 | if (rw & REQ_SYNC) |
| 1813 | rwbs[i++] = 'S'; | 1809 | rwbs[i++] = 'S'; |
| 1814 | if (rw & REQ_META) | 1810 | if (rw & REQ_META) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 82d9b8106cd0..c380612273bf 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
| 18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
| 19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
| 20 | #include <linux/smp_lock.h> | ||
| 21 | #include <linux/notifier.h> | 20 | #include <linux/notifier.h> |
| 22 | #include <linux/irqflags.h> | 21 | #include <linux/irqflags.h> |
| 23 | #include <linux/debugfs.h> | 22 | #include <linux/debugfs.h> |
| @@ -1284,6 +1283,8 @@ void trace_dump_stack(void) | |||
| 1284 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | 1283 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); |
| 1285 | } | 1284 | } |
| 1286 | 1285 | ||
| 1286 | static DEFINE_PER_CPU(int, user_stack_count); | ||
| 1287 | |||
| 1287 | void | 1288 | void |
| 1288 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1289 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
| 1289 | { | 1290 | { |
| @@ -1302,6 +1303,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
| 1302 | if (unlikely(in_nmi())) | 1303 | if (unlikely(in_nmi())) |
| 1303 | return; | 1304 | return; |
| 1304 | 1305 | ||
| 1306 | /* | ||
| 1307 | * prevent recursion, since the user stack tracing may | ||
| 1308 | * trigger other kernel events. | ||
| 1309 | */ | ||
| 1310 | preempt_disable(); | ||
| 1311 | if (__this_cpu_read(user_stack_count)) | ||
| 1312 | goto out; | ||
| 1313 | |||
| 1314 | __this_cpu_inc(user_stack_count); | ||
| 1315 | |||
| 1316 | |||
| 1317 | |||
| 1305 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 1318 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
| 1306 | sizeof(*entry), flags, pc); | 1319 | sizeof(*entry), flags, pc); |
| 1307 | if (!event) | 1320 | if (!event) |
| @@ -1319,6 +1332,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
| 1319 | save_stack_trace_user(&trace); | 1332 | save_stack_trace_user(&trace); |
| 1320 | if (!filter_check_discard(call, entry, buffer, event)) | 1333 | if (!filter_check_discard(call, entry, buffer, event)) |
| 1321 | ring_buffer_unlock_commit(buffer, event); | 1334 | ring_buffer_unlock_commit(buffer, event); |
| 1335 | |||
| 1336 | __this_cpu_dec(user_stack_count); | ||
| 1337 | |||
| 1338 | out: | ||
| 1339 | preempt_enable(); | ||
| 1322 | } | 1340 | } |
| 1323 | 1341 | ||
| 1324 | #ifdef UNUSED | 1342 | #ifdef UNUSED |
