aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/char/sysrq.c18
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/trace/ring_buffer.c27
-rw-r--r--kernel/trace/trace.c48
-rw-r--r--kernel/trace/trace.h49
-rw-r--r--kernel/trace/trace_sched_wakeup.c13
-rw-r--r--kernel/trace/trace_stack.c8
7 files changed, 121 insertions, 44 deletions
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index ce0d9da52a8a..94966edfb44d 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -274,6 +274,22 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
274 .enable_mask = SYSRQ_ENABLE_DUMP, 274 .enable_mask = SYSRQ_ENABLE_DUMP,
275}; 275};
276 276
277#ifdef CONFIG_TRACING
278#include <linux/ftrace.h>
279
280static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
281{
282 ftrace_dump();
283}
284static struct sysrq_key_op sysrq_ftrace_dump_op = {
285 .handler = sysrq_ftrace_dump,
286 .help_msg = "dumpZ-ftrace-buffer",
287 .action_msg = "Dump ftrace buffer",
288 .enable_mask = SYSRQ_ENABLE_DUMP,
289};
290#else
291#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)0)
292#endif
277 293
278static void sysrq_handle_showmem(int key, struct tty_struct *tty) 294static void sysrq_handle_showmem(int key, struct tty_struct *tty)
279{ 295{
@@ -406,7 +422,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
406 NULL, /* x */ 422 NULL, /* x */
407 /* y: May be registered on sparc64 for global register dump */ 423 /* y: May be registered on sparc64 for global register dump */
408 NULL, /* y */ 424 NULL, /* y */
409 NULL /* z */ 425 &sysrq_ftrace_dump_op, /* z */
410}; 426};
411 427
412/* key2index calculation, -1 on invalid index */ 428/* key2index calculation, -1 on invalid index */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 6b6b727258b5..65d4a9ba79e4 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -487,7 +487,7 @@ static struct ctl_table kern_table[] = {
487#ifdef CONFIG_TRACING 487#ifdef CONFIG_TRACING
488 { 488 {
489 .ctl_name = CTL_UNNUMBERED, 489 .ctl_name = CTL_UNNUMBERED,
490 .procname = "ftrace_dump_on_opps", 490 .procname = "ftrace_dump_on_oops",
491 .data = &ftrace_dump_on_oops, 491 .data = &ftrace_dump_on_oops,
492 .maxlen = sizeof(int), 492 .maxlen = sizeof(int),
493 .mode = 0644, 493 .mode = 0644,
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index cedf4e268285..151f6a748676 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -16,6 +16,8 @@
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18 18
19#include "trace.h"
20
19/* Up this if you want to test the TIME_EXTENTS and normalization */ 21/* Up this if you want to test the TIME_EXTENTS and normalization */
20#define DEBUG_SHIFT 0 22#define DEBUG_SHIFT 0
21 23
@@ -1122,8 +1124,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1122 return NULL; 1124 return NULL;
1123 1125
1124 /* If we are tracing schedule, we don't want to recurse */ 1126 /* If we are tracing schedule, we don't want to recurse */
1125 resched = need_resched(); 1127 resched = ftrace_preempt_disable();
1126 preempt_disable_notrace();
1127 1128
1128 cpu = raw_smp_processor_id(); 1129 cpu = raw_smp_processor_id();
1129 1130
@@ -1154,10 +1155,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1154 return event; 1155 return event;
1155 1156
1156 out: 1157 out:
1157 if (resched) 1158 ftrace_preempt_enable(resched);
1158 preempt_enable_notrace();
1159 else
1160 preempt_enable_notrace();
1161 return NULL; 1159 return NULL;
1162} 1160}
1163 1161
@@ -1199,12 +1197,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1199 /* 1197 /*
1200 * Only the last preempt count needs to restore preemption. 1198 * Only the last preempt count needs to restore preemption.
1201 */ 1199 */
1202 if (preempt_count() == 1) { 1200 if (preempt_count() == 1)
1203 if (per_cpu(rb_need_resched, cpu)) 1201 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1204 preempt_enable_no_resched_notrace(); 1202 else
1205 else
1206 preempt_enable_notrace();
1207 } else
1208 preempt_enable_no_resched_notrace(); 1203 preempt_enable_no_resched_notrace();
1209 1204
1210 return 0; 1205 return 0;
@@ -1237,8 +1232,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1237 if (atomic_read(&buffer->record_disabled)) 1232 if (atomic_read(&buffer->record_disabled))
1238 return -EBUSY; 1233 return -EBUSY;
1239 1234
1240 resched = need_resched(); 1235 resched = ftrace_preempt_disable();
1241 preempt_disable_notrace();
1242 1236
1243 cpu = raw_smp_processor_id(); 1237 cpu = raw_smp_processor_id();
1244 1238
@@ -1264,10 +1258,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1264 1258
1265 ret = 0; 1259 ret = 0;
1266 out: 1260 out:
1267 if (resched) 1261 ftrace_preempt_enable(resched);
1268 preempt_enable_no_resched_notrace();
1269 else
1270 preempt_enable_notrace();
1271 1262
1272 return ret; 1263 return ret;
1273} 1264}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 50d7018163f6..29ab40a764c8 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -244,6 +244,7 @@ static const char *trace_options[] = {
244 "stacktrace", 244 "stacktrace",
245 "sched-tree", 245 "sched-tree",
246 "ftrace_printk", 246 "ftrace_printk",
247 "ftrace_preempt",
247 NULL 248 NULL
248}; 249};
249 250
@@ -891,7 +892,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
891 892
892#ifdef CONFIG_FUNCTION_TRACER 893#ifdef CONFIG_FUNCTION_TRACER
893static void 894static void
894function_trace_call(unsigned long ip, unsigned long parent_ip) 895function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
895{ 896{
896 struct trace_array *tr = &global_trace; 897 struct trace_array *tr = &global_trace;
897 struct trace_array_cpu *data; 898 struct trace_array_cpu *data;
@@ -904,8 +905,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
904 return; 905 return;
905 906
906 pc = preempt_count(); 907 pc = preempt_count();
907 resched = need_resched(); 908 resched = ftrace_preempt_disable();
908 preempt_disable_notrace();
909 local_save_flags(flags); 909 local_save_flags(flags);
910 cpu = raw_smp_processor_id(); 910 cpu = raw_smp_processor_id();
911 data = tr->data[cpu]; 911 data = tr->data[cpu];
@@ -915,10 +915,38 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
915 trace_function(tr, data, ip, parent_ip, flags, pc); 915 trace_function(tr, data, ip, parent_ip, flags, pc);
916 916
917 atomic_dec(&data->disabled); 917 atomic_dec(&data->disabled);
918 if (resched) 918 ftrace_preempt_enable(resched);
919 preempt_enable_no_resched_notrace(); 919}
920 else 920
921 preempt_enable_notrace(); 921static void
922function_trace_call(unsigned long ip, unsigned long parent_ip)
923{
924 struct trace_array *tr = &global_trace;
925 struct trace_array_cpu *data;
926 unsigned long flags;
927 long disabled;
928 int cpu;
929 int pc;
930
931 if (unlikely(!ftrace_function_enabled))
932 return;
933
934 /*
935 * Need to use raw, since this must be called before the
936 * recursive protection is performed.
937 */
938 raw_local_irq_save(flags);
939 cpu = raw_smp_processor_id();
940 data = tr->data[cpu];
941 disabled = atomic_inc_return(&data->disabled);
942
943 if (likely(disabled == 1)) {
944 pc = preempt_count();
945 trace_function(tr, data, ip, parent_ip, flags, pc);
946 }
947
948 atomic_dec(&data->disabled);
949 raw_local_irq_restore(flags);
922} 950}
923 951
924static struct ftrace_ops trace_ops __read_mostly = 952static struct ftrace_ops trace_ops __read_mostly =
@@ -929,6 +957,12 @@ static struct ftrace_ops trace_ops __read_mostly =
929void tracing_start_function_trace(void) 957void tracing_start_function_trace(void)
930{ 958{
931 ftrace_function_enabled = 0; 959 ftrace_function_enabled = 0;
960
961 if (trace_flags & TRACE_ITER_PREEMPTONLY)
962 trace_ops.func = function_trace_call_preempt_only;
963 else
964 trace_ops.func = function_trace_call;
965
932 register_ftrace_function(&trace_ops); 966 register_ftrace_function(&trace_ops);
933 if (tracer_enabled) 967 if (tracer_enabled)
934 ftrace_function_enabled = 1; 968 ftrace_function_enabled = 1;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 9911277b268b..cc14a6bc1094 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -416,8 +416,57 @@ enum trace_iterator_flags {
416 TRACE_ITER_STACKTRACE = 0x100, 416 TRACE_ITER_STACKTRACE = 0x100,
417 TRACE_ITER_SCHED_TREE = 0x200, 417 TRACE_ITER_SCHED_TREE = 0x200,
418 TRACE_ITER_PRINTK = 0x400, 418 TRACE_ITER_PRINTK = 0x400,
419 TRACE_ITER_PREEMPTONLY = 0x800,
419}; 420};
420 421
421extern struct tracer nop_trace; 422extern struct tracer nop_trace;
422 423
424/**
425 * ftrace_preempt_disable - disable preemption scheduler safe
426 *
427 * When tracing can happen inside the scheduler, there exists
428 * cases that the tracing might happen before the need_resched
429 * flag is checked. If this happens and the tracer calls
430 * preempt_enable (after a disable), a schedule might take place
431 * causing an infinite recursion.
432 *
433 * To prevent this, we read the need_recshed flag before
434 * disabling preemption. When we want to enable preemption we
435 * check the flag, if it is set, then we call preempt_enable_no_resched.
436 * Otherwise, we call preempt_enable.
437 *
438 * The rational for doing the above is that if need resched is set
439 * and we have yet to reschedule, we are either in an atomic location
440 * (where we do not need to check for scheduling) or we are inside
441 * the scheduler and do not want to resched.
442 */
443static inline int ftrace_preempt_disable(void)
444{
445 int resched;
446
447 resched = need_resched();
448 preempt_disable_notrace();
449
450 return resched;
451}
452
453/**
454 * ftrace_preempt_enable - enable preemption scheduler safe
455 * @resched: the return value from ftrace_preempt_disable
456 *
457 * This is a scheduler safe way to enable preemption and not miss
458 * any preemption checks. The disabled saved the state of preemption.
459 * If resched is set, then we were either inside an atomic or
460 * are inside the scheduler (we would have already scheduled
461 * otherwise). In this case, we do not want to call normal
462 * preempt_enable, but preempt_enable_no_resched instead.
463 */
464static inline void ftrace_preempt_enable(int resched)
465{
466 if (resched)
467 preempt_enable_no_resched_notrace();
468 else
469 preempt_enable_notrace();
470}
471
423#endif /* _LINUX_KERNEL_TRACE_H */ 472#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 3ae93f16b565..7bc4abf6fca8 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -50,8 +50,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
50 return; 50 return;
51 51
52 pc = preempt_count(); 52 pc = preempt_count();
53 resched = need_resched(); 53 resched = ftrace_preempt_disable();
54 preempt_disable_notrace();
55 54
56 cpu = raw_smp_processor_id(); 55 cpu = raw_smp_processor_id();
57 data = tr->data[cpu]; 56 data = tr->data[cpu];
@@ -81,15 +80,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
81 out: 80 out:
82 atomic_dec(&data->disabled); 81 atomic_dec(&data->disabled);
83 82
84 /* 83 ftrace_preempt_enable(resched);
85 * To prevent recursion from the scheduler, if the
86 * resched flag was set before we entered, then
87 * don't reschedule.
88 */
89 if (resched)
90 preempt_enable_no_resched_notrace();
91 else
92 preempt_enable_notrace();
93} 84}
94 85
95static struct ftrace_ops trace_ops __read_mostly = 86static struct ftrace_ops trace_ops __read_mostly =
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index be682b62fe58..d39e8b7de6a2 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -107,8 +107,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
107 if (unlikely(!ftrace_enabled || stack_trace_disabled)) 107 if (unlikely(!ftrace_enabled || stack_trace_disabled))
108 return; 108 return;
109 109
110 resched = need_resched(); 110 resched = ftrace_preempt_disable();
111 preempt_disable_notrace();
112 111
113 cpu = raw_smp_processor_id(); 112 cpu = raw_smp_processor_id();
114 /* no atomic needed, we only modify this variable by this cpu */ 113 /* no atomic needed, we only modify this variable by this cpu */
@@ -120,10 +119,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
120 out: 119 out:
121 per_cpu(trace_active, cpu)--; 120 per_cpu(trace_active, cpu)--;
122 /* prevent recursion in schedule */ 121 /* prevent recursion in schedule */
123 if (resched) 122 ftrace_preempt_enable(resched);
124 preempt_enable_no_resched_notrace();
125 else
126 preempt_enable_notrace();
127} 123}
128 124
129static struct ftrace_ops trace_ops __read_mostly = 125static struct ftrace_ops trace_ops __read_mostly =