summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2016-05-26 15:00:33 -0400
committerSteven Rostedt <rostedt@goodmis.org>2016-06-20 09:54:20 -0400
commite2ace001176dc9745a472fe8bda1f0b28a4d7351 (patch)
treeb278062869848c20cd6075696ee8271a4b09de62 /kernel/trace/trace.c
parent35abb67de744b5dbaec54381f2f9e0246089331d (diff)
tracing: Choose static tp_printk buffer by explicit nesting count
Currently, the trace_printk code chooses which static buffer to use based on what type of atomic context (NMI, IRQ, etc) it's in. Simplify the code and make it more robust: simply count the nesting depth and choose a buffer based on the current nesting depth. The new code will only drop an event if we nest more than 4 deep, and the old code was guaranteed to malfunction if that happened. Link: http://lkml.kernel.org/r/07ab03aecfba25fcce8f9a211b14c9c5e2865c58.1464289095.git.luto@kernel.org Acked-by: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c83
1 files changed, 24 insertions, 59 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index aa240551fc5d..45e6747589c6 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2339,83 +2339,41 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags)
2339 2339
2340/* created for use with alloc_percpu */ 2340/* created for use with alloc_percpu */
2341struct trace_buffer_struct { 2341struct trace_buffer_struct {
2342 char buffer[TRACE_BUF_SIZE]; 2342 int nesting;
2343 char buffer[4][TRACE_BUF_SIZE];
2343}; 2344};
2344 2345
2345static struct trace_buffer_struct *trace_percpu_buffer; 2346static struct trace_buffer_struct *trace_percpu_buffer;
2346static struct trace_buffer_struct *trace_percpu_sirq_buffer;
2347static struct trace_buffer_struct *trace_percpu_irq_buffer;
2348static struct trace_buffer_struct *trace_percpu_nmi_buffer;
2349 2347
2350/* 2348/*
2351 * The buffer used is dependent on the context. There is a per cpu 2349 * Thise allows for lockless recording. If we're nested too deeply, then
2352 * buffer for normal context, softirq contex, hard irq context and 2350 * this returns NULL.
2353 * for NMI context. Thise allows for lockless recording.
2354 *
2355 * Note, if the buffers failed to be allocated, then this returns NULL
2356 */ 2351 */
2357static char *get_trace_buf(void) 2352static char *get_trace_buf(void)
2358{ 2353{
2359 struct trace_buffer_struct *percpu_buffer; 2354 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2360
2361 /*
2362 * If we have allocated per cpu buffers, then we do not
2363 * need to do any locking.
2364 */
2365 if (in_nmi())
2366 percpu_buffer = trace_percpu_nmi_buffer;
2367 else if (in_irq())
2368 percpu_buffer = trace_percpu_irq_buffer;
2369 else if (in_softirq())
2370 percpu_buffer = trace_percpu_sirq_buffer;
2371 else
2372 percpu_buffer = trace_percpu_buffer;
2373 2355
2374 if (!percpu_buffer) 2356 if (!buffer || buffer->nesting >= 4)
2375 return NULL; 2357 return NULL;
2376 2358
2377 return this_cpu_ptr(&percpu_buffer->buffer[0]); 2359 return &buffer->buffer[buffer->nesting++][0];
2360}
2361
2362static void put_trace_buf(void)
2363{
2364 this_cpu_dec(trace_percpu_buffer->nesting);
2378} 2365}
2379 2366
2380static int alloc_percpu_trace_buffer(void) 2367static int alloc_percpu_trace_buffer(void)
2381{ 2368{
2382 struct trace_buffer_struct *buffers; 2369 struct trace_buffer_struct *buffers;
2383 struct trace_buffer_struct *sirq_buffers;
2384 struct trace_buffer_struct *irq_buffers;
2385 struct trace_buffer_struct *nmi_buffers;
2386 2370
2387 buffers = alloc_percpu(struct trace_buffer_struct); 2371 buffers = alloc_percpu(struct trace_buffer_struct);
2388 if (!buffers) 2372 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2389 goto err_warn; 2373 return -ENOMEM;
2390
2391 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2392 if (!sirq_buffers)
2393 goto err_sirq;
2394
2395 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2396 if (!irq_buffers)
2397 goto err_irq;
2398
2399 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2400 if (!nmi_buffers)
2401 goto err_nmi;
2402 2374
2403 trace_percpu_buffer = buffers; 2375 trace_percpu_buffer = buffers;
2404 trace_percpu_sirq_buffer = sirq_buffers;
2405 trace_percpu_irq_buffer = irq_buffers;
2406 trace_percpu_nmi_buffer = nmi_buffers;
2407
2408 return 0; 2376 return 0;
2409
2410 err_nmi:
2411 free_percpu(irq_buffers);
2412 err_irq:
2413 free_percpu(sirq_buffers);
2414 err_sirq:
2415 free_percpu(buffers);
2416 err_warn:
2417 WARN(1, "Could not allocate percpu trace_printk buffer");
2418 return -ENOMEM;
2419} 2377}
2420 2378
2421static int buffers_allocated; 2379static int buffers_allocated;
@@ -2506,7 +2464,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2506 tbuffer = get_trace_buf(); 2464 tbuffer = get_trace_buf();
2507 if (!tbuffer) { 2465 if (!tbuffer) {
2508 len = 0; 2466 len = 0;
2509 goto out; 2467 goto out_nobuffer;
2510 } 2468 }
2511 2469
2512 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); 2470 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
@@ -2532,6 +2490,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2532 } 2490 }
2533 2491
2534out: 2492out:
2493 put_trace_buf();
2494
2495out_nobuffer:
2535 preempt_enable_notrace(); 2496 preempt_enable_notrace();
2536 unpause_graph_tracing(); 2497 unpause_graph_tracing();
2537 2498
@@ -2563,7 +2524,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
2563 tbuffer = get_trace_buf(); 2524 tbuffer = get_trace_buf();
2564 if (!tbuffer) { 2525 if (!tbuffer) {
2565 len = 0; 2526 len = 0;
2566 goto out; 2527 goto out_nobuffer;
2567 } 2528 }
2568 2529
2569 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); 2530 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
@@ -2582,7 +2543,11 @@ __trace_array_vprintk(struct ring_buffer *buffer,
2582 __buffer_unlock_commit(buffer, event); 2543 __buffer_unlock_commit(buffer, event);
2583 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL); 2544 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2584 } 2545 }
2585 out: 2546
2547out:
2548 put_trace_buf();
2549
2550out_nobuffer:
2586 preempt_enable_notrace(); 2551 preempt_enable_notrace();
2587 unpause_graph_tracing(); 2552 unpause_graph_tracing();
2588 2553