aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--kernel/trace/trace.c175
2 files changed, 176 insertions, 1 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 018af16bce5c..f7fb92045bf0 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -178,6 +178,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
178extern int 178extern int
179__ftrace_printk(unsigned long ip, const char *fmt, ...) 179__ftrace_printk(unsigned long ip, const char *fmt, ...)
180 __attribute__ ((format (printf, 2, 3))); 180 __attribute__ ((format (printf, 2, 3)));
181extern void ftrace_dump(void);
181#else 182#else
182static inline void 183static inline void
183ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } 184ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
@@ -186,6 +187,7 @@ ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0)))
186{ 187{
187 return 0; 188 return 0;
188} 189}
190static inline void ftrace_dump(void) { }
189#endif 191#endif
190 192
191#ifdef CONFIG_FTRACE_MCOUNT_RECORD 193#ifdef CONFIG_FTRACE_MCOUNT_RECORD
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2597e7e49c35..97513c8ecd67 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -14,6 +14,7 @@
14#include <linux/utsrelease.h> 14#include <linux/utsrelease.h>
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/seq_file.h> 16#include <linux/seq_file.h>
17#include <linux/notifier.h>
17#include <linux/debugfs.h> 18#include <linux/debugfs.h>
18#include <linux/pagemap.h> 19#include <linux/pagemap.h>
19#include <linux/hardirq.h> 20#include <linux/hardirq.h>
@@ -22,6 +23,7 @@
22#include <linux/ftrace.h> 23#include <linux/ftrace.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/kdebug.h>
25#include <linux/ctype.h> 27#include <linux/ctype.h>
26#include <linux/init.h> 28#include <linux/init.h>
27#include <linux/poll.h> 29#include <linux/poll.h>
@@ -103,8 +105,15 @@ int ftrace_function_enabled;
103 * trace_nr_entries is the number of entries that is allocated 105 * trace_nr_entries is the number of entries that is allocated
104 * for a buffer. Note, the number of entries is always rounded 106 * for a buffer. Note, the number of entries is always rounded
105 * to ENTRIES_PER_PAGE. 107 * to ENTRIES_PER_PAGE.
108 *
109 * This number is purposely set to a low number of 16384.
110 * If the dump on oops happens, it will be much appreciated
111 * to not have to wait for all that output. Anyway this can be
112 * boot time and run time configurable.
106 */ 113 */
107static unsigned long trace_nr_entries = 65536UL; 114#define TRACE_ENTRIES_DEFAULT 16384UL
115
116static unsigned long trace_nr_entries = TRACE_ENTRIES_DEFAULT;
108 117
109/* trace_types holds a link list of available tracers. */ 118/* trace_types holds a link list of available tracers. */
110static struct tracer *trace_types __read_mostly; 119static struct tracer *trace_types __read_mostly;
@@ -3142,6 +3151,165 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...)
3142} 3151}
3143EXPORT_SYMBOL_GPL(__ftrace_printk); 3152EXPORT_SYMBOL_GPL(__ftrace_printk);
3144 3153
3154static int trace_panic_handler(struct notifier_block *this,
3155 unsigned long event, void *unused)
3156{
3157 ftrace_dump();
3158 return NOTIFY_OK;
3159}
3160
3161static struct notifier_block trace_panic_notifier = {
3162 .notifier_call = trace_panic_handler,
3163 .next = NULL,
3164 .priority = 150 /* priority: INT_MAX >= x >= 0 */
3165};
3166
3167static int trace_die_handler(struct notifier_block *self,
3168 unsigned long val,
3169 void *data)
3170{
3171 switch (val) {
3172 case DIE_OOPS:
3173 ftrace_dump();
3174 break;
3175 default:
3176 break;
3177 }
3178 return NOTIFY_OK;
3179}
3180
3181static struct notifier_block trace_die_notifier = {
3182 .notifier_call = trace_die_handler,
3183 .priority = 200
3184};
3185
3186/*
3187 * printk is set to max of 1024, we really don't need it that big.
3188 * Nothing should be printing 1000 characters anyway.
3189 */
3190#define TRACE_MAX_PRINT 1000
3191
3192/*
3193 * Define here KERN_TRACE so that we have one place to modify
3194 * it if we decide to change what log level the ftrace dump
3195 * should be at.
3196 */
3197#define KERN_TRACE KERN_INFO
3198
3199static void
3200trace_printk_seq(struct trace_seq *s)
3201{
3202 /* Probably should print a warning here. */
3203 if (s->len >= 1000)
3204 s->len = 1000;
3205
3206 /* should be zero ended, but we are paranoid. */
3207 s->buffer[s->len] = 0;
3208
3209 printk(KERN_TRACE "%s", s->buffer);
3210
3211 trace_seq_reset(s);
3212}
3213
3214
3215void ftrace_dump(void)
3216{
3217 static DEFINE_SPINLOCK(ftrace_dump_lock);
3218 /* use static because iter can be a bit big for the stack */
3219 static struct trace_iterator iter;
3220 struct trace_array_cpu *data;
3221 static cpumask_t mask;
3222 static int dump_ran;
3223 unsigned long flags;
3224 int cnt = 0;
3225 int cpu;
3226
3227 /* only one dump */
3228 spin_lock_irqsave(&ftrace_dump_lock, flags);
3229 if (dump_ran)
3230 goto out;
3231
3232 dump_ran = 1;
3233
3234 /* No turning back! */
3235 ftrace_kill_atomic();
3236
3237 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3238
3239 iter.tr = &global_trace;
3240 iter.trace = current_trace;
3241
3242 /*
3243 * We need to stop all tracing on all CPUS to read the
3244 * the next buffer. This is a bit expensive, but is
3245 * not done often. We fill all what we can read,
3246 * and then release the locks again.
3247 */
3248
3249 cpus_clear(mask);
3250
3251 for_each_tracing_cpu(cpu) {
3252 data = iter.tr->data[cpu];
3253
3254 if (!head_page(data) || !data->trace_idx)
3255 continue;
3256
3257 atomic_inc(&data->disabled);
3258 cpu_set(cpu, mask);
3259 }
3260
3261 for_each_cpu_mask(cpu, mask) {
3262 data = iter.tr->data[cpu];
3263 __raw_spin_lock(&data->lock);
3264
3265 if (data->overrun > iter.last_overrun[cpu])
3266 iter.overrun[cpu] +=
3267 data->overrun - iter.last_overrun[cpu];
3268 iter.last_overrun[cpu] = data->overrun;
3269 }
3270
3271 while (!trace_empty(&iter)) {
3272
3273 if (!cnt)
3274 printk(KERN_TRACE "---------------------------------\n");
3275
3276 cnt++;
3277
3278 /* reset all but tr, trace, and overruns */
3279 memset(&iter.seq, 0,
3280 sizeof(struct trace_iterator) -
3281 offsetof(struct trace_iterator, seq));
3282 iter.iter_flags |= TRACE_FILE_LAT_FMT;
3283 iter.pos = -1;
3284
3285 if (find_next_entry_inc(&iter) != NULL) {
3286 print_trace_line(&iter);
3287 trace_consume(&iter);
3288 }
3289
3290 trace_printk_seq(&iter.seq);
3291 }
3292
3293 if (!cnt)
3294 printk(KERN_TRACE " (ftrace buffer empty)\n");
3295 else
3296 printk(KERN_TRACE "---------------------------------\n");
3297
3298 for_each_cpu_mask(cpu, mask) {
3299 data = iter.tr->data[cpu];
3300 __raw_spin_unlock(&data->lock);
3301 }
3302
3303 for_each_cpu_mask(cpu, mask) {
3304 data = iter.tr->data[cpu];
3305 atomic_dec(&data->disabled);
3306 }
3307
3308
3309 out:
3310 spin_unlock_irqrestore(&ftrace_dump_lock, flags);
3311}
3312
3145static int trace_alloc_page(void) 3313static int trace_alloc_page(void)
3146{ 3314{
3147 struct trace_array_cpu *data; 3315 struct trace_array_cpu *data;
@@ -3338,6 +3506,11 @@ __init static int tracer_alloc_buffers(void)
3338 global_trace.ctrl = tracer_enabled; 3506 global_trace.ctrl = tracer_enabled;
3339 tracing_disabled = 0; 3507 tracing_disabled = 0;
3340 3508
3509 atomic_notifier_chain_register(&panic_notifier_list,
3510 &trace_panic_notifier);
3511
3512 register_die_notifier(&trace_die_notifier);
3513
3341 return 0; 3514 return 0;
3342 3515
3343 free_buffers: 3516 free_buffers: