diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-07-30 22:36:46 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-14 04:35:26 -0400 |
commit | 3f5a54e371ca20b119b73704f6c01b71295c1714 (patch) | |
tree | 1453315564a19e967f11e612e264d4d2989afbe5 /kernel/trace/trace.c | |
parent | 2f2c99dba2398ef7d9c21f7c793180a50e68b1f0 (diff) |
ftrace: dump out ftrace buffers to console on panic
At OLS I had a lot of interest to be able to have the ftrace buffers
dumped on panic. Usually one would expect to uses kexec and examine
the buffers after a new kernel is loaded. But sometimes the resources
do not permit kdump and kexec, so having an option to still see the
sequence of events up to the crash is very advantageous.
This patch adds the option to have the ftrace buffers dumped to the
console in the latency_trace format on a panic. When the option is set,
the default entries per CPU buffer are lowered to 16384, since the writing
to the serial (if that is the console) may take an awful long time
otherwise.
[
Changes since -v1:
Got alpine to send correctly (as well as spell check working).
Removed config option.
Moved the static variables into ftrace_dump itself.
Gave printk a log level.
]
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 175 |
1 files changed, 174 insertions, 1 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2597e7e49c3..97513c8ecd6 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/utsrelease.h> | 14 | #include <linux/utsrelease.h> |
15 | #include <linux/kallsyms.h> | 15 | #include <linux/kallsyms.h> |
16 | #include <linux/seq_file.h> | 16 | #include <linux/seq_file.h> |
17 | #include <linux/notifier.h> | ||
17 | #include <linux/debugfs.h> | 18 | #include <linux/debugfs.h> |
18 | #include <linux/pagemap.h> | 19 | #include <linux/pagemap.h> |
19 | #include <linux/hardirq.h> | 20 | #include <linux/hardirq.h> |
@@ -22,6 +23,7 @@ | |||
22 | #include <linux/ftrace.h> | 23 | #include <linux/ftrace.h> |
23 | #include <linux/module.h> | 24 | #include <linux/module.h> |
24 | #include <linux/percpu.h> | 25 | #include <linux/percpu.h> |
26 | #include <linux/kdebug.h> | ||
25 | #include <linux/ctype.h> | 27 | #include <linux/ctype.h> |
26 | #include <linux/init.h> | 28 | #include <linux/init.h> |
27 | #include <linux/poll.h> | 29 | #include <linux/poll.h> |
@@ -103,8 +105,15 @@ int ftrace_function_enabled; | |||
103 | * trace_nr_entries is the number of entries that is allocated | 105 | * trace_nr_entries is the number of entries that is allocated |
104 | * for a buffer. Note, the number of entries is always rounded | 106 | * for a buffer. Note, the number of entries is always rounded |
105 | * to ENTRIES_PER_PAGE. | 107 | * to ENTRIES_PER_PAGE. |
108 | * | ||
109 | * This number is purposely set to a low number of 16384. | ||
110 | * If the dump on oops happens, it will be much appreciated | ||
111 | * to not have to wait for all that output. Anyway this can be | ||
112 | * boot time and run time configurable. | ||
106 | */ | 113 | */ |
107 | static unsigned long trace_nr_entries = 65536UL; | 114 | #define TRACE_ENTRIES_DEFAULT 16384UL |
115 | |||
116 | static unsigned long trace_nr_entries = TRACE_ENTRIES_DEFAULT; | ||
108 | 117 | ||
109 | /* trace_types holds a link list of available tracers. */ | 118 | /* trace_types holds a link list of available tracers. */ |
110 | static struct tracer *trace_types __read_mostly; | 119 | static struct tracer *trace_types __read_mostly; |
@@ -3142,6 +3151,165 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...) | |||
3142 | } | 3151 | } |
3143 | EXPORT_SYMBOL_GPL(__ftrace_printk); | 3152 | EXPORT_SYMBOL_GPL(__ftrace_printk); |
3144 | 3153 | ||
3154 | static int trace_panic_handler(struct notifier_block *this, | ||
3155 | unsigned long event, void *unused) | ||
3156 | { | ||
3157 | ftrace_dump(); | ||
3158 | return NOTIFY_OK; | ||
3159 | } | ||
3160 | |||
3161 | static struct notifier_block trace_panic_notifier = { | ||
3162 | .notifier_call = trace_panic_handler, | ||
3163 | .next = NULL, | ||
3164 | .priority = 150 /* priority: INT_MAX >= x >= 0 */ | ||
3165 | }; | ||
3166 | |||
3167 | static int trace_die_handler(struct notifier_block *self, | ||
3168 | unsigned long val, | ||
3169 | void *data) | ||
3170 | { | ||
3171 | switch (val) { | ||
3172 | case DIE_OOPS: | ||
3173 | ftrace_dump(); | ||
3174 | break; | ||
3175 | default: | ||
3176 | break; | ||
3177 | } | ||
3178 | return NOTIFY_OK; | ||
3179 | } | ||
3180 | |||
3181 | static struct notifier_block trace_die_notifier = { | ||
3182 | .notifier_call = trace_die_handler, | ||
3183 | .priority = 200 | ||
3184 | }; | ||
3185 | |||
3186 | /* | ||
3187 | * printk is set to max of 1024, we really don't need it that big. | ||
3188 | * Nothing should be printing 1000 characters anyway. | ||
3189 | */ | ||
3190 | #define TRACE_MAX_PRINT 1000 | ||
3191 | |||
3192 | /* | ||
3193 | * Define here KERN_TRACE so that we have one place to modify | ||
3194 | * it if we decide to change what log level the ftrace dump | ||
3195 | * should be at. | ||
3196 | */ | ||
3197 | #define KERN_TRACE KERN_INFO | ||
3198 | |||
3199 | static void | ||
3200 | trace_printk_seq(struct trace_seq *s) | ||
3201 | { | ||
3202 | /* Probably should print a warning here. */ | ||
3203 | if (s->len >= 1000) | ||
3204 | s->len = 1000; | ||
3205 | |||
3206 | /* should be zero ended, but we are paranoid. */ | ||
3207 | s->buffer[s->len] = 0; | ||
3208 | |||
3209 | printk(KERN_TRACE "%s", s->buffer); | ||
3210 | |||
3211 | trace_seq_reset(s); | ||
3212 | } | ||
3213 | |||
3214 | |||
3215 | void ftrace_dump(void) | ||
3216 | { | ||
3217 | static DEFINE_SPINLOCK(ftrace_dump_lock); | ||
3218 | /* use static because iter can be a bit big for the stack */ | ||
3219 | static struct trace_iterator iter; | ||
3220 | struct trace_array_cpu *data; | ||
3221 | static cpumask_t mask; | ||
3222 | static int dump_ran; | ||
3223 | unsigned long flags; | ||
3224 | int cnt = 0; | ||
3225 | int cpu; | ||
3226 | |||
3227 | /* only one dump */ | ||
3228 | spin_lock_irqsave(&ftrace_dump_lock, flags); | ||
3229 | if (dump_ran) | ||
3230 | goto out; | ||
3231 | |||
3232 | dump_ran = 1; | ||
3233 | |||
3234 | /* No turning back! */ | ||
3235 | ftrace_kill_atomic(); | ||
3236 | |||
3237 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | ||
3238 | |||
3239 | iter.tr = &global_trace; | ||
3240 | iter.trace = current_trace; | ||
3241 | |||
3242 | /* | ||
3243 | * We need to stop all tracing on all CPUS to read the | ||
3244 | * the next buffer. This is a bit expensive, but is | ||
3245 | * not done often. We fill all what we can read, | ||
3246 | * and then release the locks again. | ||
3247 | */ | ||
3248 | |||
3249 | cpus_clear(mask); | ||
3250 | |||
3251 | for_each_tracing_cpu(cpu) { | ||
3252 | data = iter.tr->data[cpu]; | ||
3253 | |||
3254 | if (!head_page(data) || !data->trace_idx) | ||
3255 | continue; | ||
3256 | |||
3257 | atomic_inc(&data->disabled); | ||
3258 | cpu_set(cpu, mask); | ||
3259 | } | ||
3260 | |||
3261 | for_each_cpu_mask(cpu, mask) { | ||
3262 | data = iter.tr->data[cpu]; | ||
3263 | __raw_spin_lock(&data->lock); | ||
3264 | |||
3265 | if (data->overrun > iter.last_overrun[cpu]) | ||
3266 | iter.overrun[cpu] += | ||
3267 | data->overrun - iter.last_overrun[cpu]; | ||
3268 | iter.last_overrun[cpu] = data->overrun; | ||
3269 | } | ||
3270 | |||
3271 | while (!trace_empty(&iter)) { | ||
3272 | |||
3273 | if (!cnt) | ||
3274 | printk(KERN_TRACE "---------------------------------\n"); | ||
3275 | |||
3276 | cnt++; | ||
3277 | |||
3278 | /* reset all but tr, trace, and overruns */ | ||
3279 | memset(&iter.seq, 0, | ||
3280 | sizeof(struct trace_iterator) - | ||
3281 | offsetof(struct trace_iterator, seq)); | ||
3282 | iter.iter_flags |= TRACE_FILE_LAT_FMT; | ||
3283 | iter.pos = -1; | ||
3284 | |||
3285 | if (find_next_entry_inc(&iter) != NULL) { | ||
3286 | print_trace_line(&iter); | ||
3287 | trace_consume(&iter); | ||
3288 | } | ||
3289 | |||
3290 | trace_printk_seq(&iter.seq); | ||
3291 | } | ||
3292 | |||
3293 | if (!cnt) | ||
3294 | printk(KERN_TRACE " (ftrace buffer empty)\n"); | ||
3295 | else | ||
3296 | printk(KERN_TRACE "---------------------------------\n"); | ||
3297 | |||
3298 | for_each_cpu_mask(cpu, mask) { | ||
3299 | data = iter.tr->data[cpu]; | ||
3300 | __raw_spin_unlock(&data->lock); | ||
3301 | } | ||
3302 | |||
3303 | for_each_cpu_mask(cpu, mask) { | ||
3304 | data = iter.tr->data[cpu]; | ||
3305 | atomic_dec(&data->disabled); | ||
3306 | } | ||
3307 | |||
3308 | |||
3309 | out: | ||
3310 | spin_unlock_irqrestore(&ftrace_dump_lock, flags); | ||
3311 | } | ||
3312 | |||
3145 | static int trace_alloc_page(void) | 3313 | static int trace_alloc_page(void) |
3146 | { | 3314 | { |
3147 | struct trace_array_cpu *data; | 3315 | struct trace_array_cpu *data; |
@@ -3338,6 +3506,11 @@ __init static int tracer_alloc_buffers(void) | |||
3338 | global_trace.ctrl = tracer_enabled; | 3506 | global_trace.ctrl = tracer_enabled; |
3339 | tracing_disabled = 0; | 3507 | tracing_disabled = 0; |
3340 | 3508 | ||
3509 | atomic_notifier_chain_register(&panic_notifier_list, | ||
3510 | &trace_panic_notifier); | ||
3511 | |||
3512 | register_die_notifier(&trace_die_notifier); | ||
3513 | |||
3341 | return 0; | 3514 | return 0; |
3342 | 3515 | ||
3343 | free_buffers: | 3516 | free_buffers: |