diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 11 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 9 | ||||
-rw-r--r-- | kernel/trace/ring_buffer_benchmark.c | 85 | ||||
-rw-r--r-- | kernel/trace/trace.c | 41 | ||||
-rw-r--r-- | kernel/trace/trace_clock.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_export.c | 4 |
6 files changed, 106 insertions, 52 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7cb6f1922598..e51a1bcb7bed 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2274,7 +2274,6 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset) | |||
2274 | #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE | 2274 | #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE |
2275 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | 2275 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
2276 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; | 2276 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; |
2277 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; | ||
2278 | 2277 | ||
2279 | static int __init set_ftrace_notrace(char *str) | 2278 | static int __init set_ftrace_notrace(char *str) |
2280 | { | 2279 | { |
@@ -2291,6 +2290,7 @@ static int __init set_ftrace_filter(char *str) | |||
2291 | __setup("ftrace_filter=", set_ftrace_filter); | 2290 | __setup("ftrace_filter=", set_ftrace_filter); |
2292 | 2291 | ||
2293 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 2292 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2293 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; | ||
2294 | static int __init set_graph_function(char *str) | 2294 | static int __init set_graph_function(char *str) |
2295 | { | 2295 | { |
2296 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); | 2296 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); |
@@ -2985,7 +2985,7 @@ static ssize_t | |||
2985 | ftrace_pid_write(struct file *filp, const char __user *ubuf, | 2985 | ftrace_pid_write(struct file *filp, const char __user *ubuf, |
2986 | size_t cnt, loff_t *ppos) | 2986 | size_t cnt, loff_t *ppos) |
2987 | { | 2987 | { |
2988 | char buf[64]; | 2988 | char buf[64], *tmp; |
2989 | long val; | 2989 | long val; |
2990 | int ret; | 2990 | int ret; |
2991 | 2991 | ||
@@ -3001,11 +3001,11 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, | |||
3001 | * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" | 3001 | * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" |
3002 | * to clean the filter quietly. | 3002 | * to clean the filter quietly. |
3003 | */ | 3003 | */ |
3004 | strstrip(buf); | 3004 | tmp = strstrip(buf); |
3005 | if (strlen(buf) == 0) | 3005 | if (strlen(tmp) == 0) |
3006 | return 1; | 3006 | return 1; |
3007 | 3007 | ||
3008 | ret = strict_strtol(buf, 10, &val); | 3008 | ret = strict_strtol(tmp, 10, &val); |
3009 | if (ret < 0) | 3009 | if (ret < 0) |
3010 | return ret; | 3010 | return ret; |
3011 | 3011 | ||
@@ -3391,4 +3391,3 @@ void ftrace_graph_stop(void) | |||
3391 | ftrace_stop(); | 3391 | ftrace_stop(); |
3392 | } | 3392 | } |
3393 | #endif | 3393 | #endif |
3394 | |||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index db223fe8887f..a1ca4956ab5e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1790,9 +1790,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1790 | static struct ring_buffer_event * | 1790 | static struct ring_buffer_event * |
1791 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | 1791 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, |
1792 | unsigned long length, unsigned long tail, | 1792 | unsigned long length, unsigned long tail, |
1793 | struct buffer_page *commit_page, | ||
1794 | struct buffer_page *tail_page, u64 *ts) | 1793 | struct buffer_page *tail_page, u64 *ts) |
1795 | { | 1794 | { |
1795 | struct buffer_page *commit_page = cpu_buffer->commit_page; | ||
1796 | struct ring_buffer *buffer = cpu_buffer->buffer; | 1796 | struct ring_buffer *buffer = cpu_buffer->buffer; |
1797 | struct buffer_page *next_page; | 1797 | struct buffer_page *next_page; |
1798 | int ret; | 1798 | int ret; |
@@ -1895,13 +1895,10 @@ static struct ring_buffer_event * | |||
1895 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | 1895 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, |
1896 | unsigned type, unsigned long length, u64 *ts) | 1896 | unsigned type, unsigned long length, u64 *ts) |
1897 | { | 1897 | { |
1898 | struct buffer_page *tail_page, *commit_page; | 1898 | struct buffer_page *tail_page; |
1899 | struct ring_buffer_event *event; | 1899 | struct ring_buffer_event *event; |
1900 | unsigned long tail, write; | 1900 | unsigned long tail, write; |
1901 | 1901 | ||
1902 | commit_page = cpu_buffer->commit_page; | ||
1903 | /* we just need to protect against interrupts */ | ||
1904 | barrier(); | ||
1905 | tail_page = cpu_buffer->tail_page; | 1902 | tail_page = cpu_buffer->tail_page; |
1906 | write = local_add_return(length, &tail_page->write); | 1903 | write = local_add_return(length, &tail_page->write); |
1907 | 1904 | ||
@@ -1912,7 +1909,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1912 | /* See if we shot pass the end of this buffer page */ | 1909 | /* See if we shot pass the end of this buffer page */ |
1913 | if (write > BUF_PAGE_SIZE) | 1910 | if (write > BUF_PAGE_SIZE) |
1914 | return rb_move_tail(cpu_buffer, length, tail, | 1911 | return rb_move_tail(cpu_buffer, length, tail, |
1915 | commit_page, tail_page, ts); | 1912 | tail_page, ts); |
1916 | 1913 | ||
1917 | /* We reserved something on the buffer */ | 1914 | /* We reserved something on the buffer */ |
1918 | 1915 | ||
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 573d3cc762c3..b2477caf09c2 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c | |||
@@ -35,6 +35,28 @@ static int disable_reader; | |||
35 | module_param(disable_reader, uint, 0644); | 35 | module_param(disable_reader, uint, 0644); |
36 | MODULE_PARM_DESC(disable_reader, "only run producer"); | 36 | MODULE_PARM_DESC(disable_reader, "only run producer"); |
37 | 37 | ||
38 | static int write_iteration = 50; | ||
39 | module_param(write_iteration, uint, 0644); | ||
40 | MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings"); | ||
41 | |||
42 | static int producer_nice = 19; | ||
43 | static int consumer_nice = 19; | ||
44 | |||
45 | static int producer_fifo = -1; | ||
46 | static int consumer_fifo = -1; | ||
47 | |||
48 | module_param(producer_nice, uint, 0644); | ||
49 | MODULE_PARM_DESC(producer_nice, "nice prio for producer"); | ||
50 | |||
51 | module_param(consumer_nice, uint, 0644); | ||
52 | MODULE_PARM_DESC(consumer_nice, "nice prio for consumer"); | ||
53 | |||
54 | module_param(producer_fifo, uint, 0644); | ||
55 | MODULE_PARM_DESC(producer_fifo, "fifo prio for producer"); | ||
56 | |||
57 | module_param(consumer_fifo, uint, 0644); | ||
58 | MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer"); | ||
59 | |||
38 | static int read_events; | 60 | static int read_events; |
39 | 61 | ||
40 | static int kill_test; | 62 | static int kill_test; |
@@ -208,15 +230,18 @@ static void ring_buffer_producer(void) | |||
208 | do { | 230 | do { |
209 | struct ring_buffer_event *event; | 231 | struct ring_buffer_event *event; |
210 | int *entry; | 232 | int *entry; |
211 | 233 | int i; | |
212 | event = ring_buffer_lock_reserve(buffer, 10); | 234 | |
213 | if (!event) { | 235 | for (i = 0; i < write_iteration; i++) { |
214 | missed++; | 236 | event = ring_buffer_lock_reserve(buffer, 10); |
215 | } else { | 237 | if (!event) { |
216 | hit++; | 238 | missed++; |
217 | entry = ring_buffer_event_data(event); | 239 | } else { |
218 | *entry = smp_processor_id(); | 240 | hit++; |
219 | ring_buffer_unlock_commit(buffer, event); | 241 | entry = ring_buffer_event_data(event); |
242 | *entry = smp_processor_id(); | ||
243 | ring_buffer_unlock_commit(buffer, event); | ||
244 | } | ||
220 | } | 245 | } |
221 | do_gettimeofday(&end_tv); | 246 | do_gettimeofday(&end_tv); |
222 | 247 | ||
@@ -263,6 +288,27 @@ static void ring_buffer_producer(void) | |||
263 | 288 | ||
264 | if (kill_test) | 289 | if (kill_test) |
265 | trace_printk("ERROR!\n"); | 290 | trace_printk("ERROR!\n"); |
291 | |||
292 | if (!disable_reader) { | ||
293 | if (consumer_fifo < 0) | ||
294 | trace_printk("Running Consumer at nice: %d\n", | ||
295 | consumer_nice); | ||
296 | else | ||
297 | trace_printk("Running Consumer at SCHED_FIFO %d\n", | ||
298 | consumer_fifo); | ||
299 | } | ||
300 | if (producer_fifo < 0) | ||
301 | trace_printk("Running Producer at nice: %d\n", | ||
302 | producer_nice); | ||
303 | else | ||
304 | trace_printk("Running Producer at SCHED_FIFO %d\n", | ||
305 | producer_fifo); | ||
306 | |||
307 | /* Let the user know that the test is running at low priority */ | ||
308 | if (producer_fifo < 0 && consumer_fifo < 0 && | ||
309 | producer_nice == 19 && consumer_nice == 19) | ||
310 | trace_printk("WARNING!!! This test is running at lowest priority.\n"); | ||
311 | |||
266 | trace_printk("Time: %lld (usecs)\n", time); | 312 | trace_printk("Time: %lld (usecs)\n", time); |
267 | trace_printk("Overruns: %lld\n", overruns); | 313 | trace_printk("Overruns: %lld\n", overruns); |
268 | if (disable_reader) | 314 | if (disable_reader) |
@@ -392,6 +438,27 @@ static int __init ring_buffer_benchmark_init(void) | |||
392 | if (IS_ERR(producer)) | 438 | if (IS_ERR(producer)) |
393 | goto out_kill; | 439 | goto out_kill; |
394 | 440 | ||
441 | /* | ||
442 | * Run them as low-prio background tasks by default: | ||
443 | */ | ||
444 | if (!disable_reader) { | ||
445 | if (consumer_fifo >= 0) { | ||
446 | struct sched_param param = { | ||
447 | .sched_priority = consumer_fifo | ||
448 | }; | ||
449 | sched_setscheduler(consumer, SCHED_FIFO, ¶m); | ||
450 | } else | ||
451 | set_user_nice(consumer, consumer_nice); | ||
452 | } | ||
453 | |||
454 | if (producer_fifo >= 0) { | ||
455 | struct sched_param param = { | ||
456 | .sched_priority = consumer_fifo | ||
457 | }; | ||
458 | sched_setscheduler(producer, SCHED_FIFO, ¶m); | ||
459 | } else | ||
460 | set_user_nice(producer, producer_nice); | ||
461 | |||
395 | return 0; | 462 | return 0; |
396 | 463 | ||
397 | out_kill: | 464 | out_kill: |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9d3067a62d43..874f2893cff0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1361,10 +1361,11 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1361 | pause_graph_tracing(); | 1361 | pause_graph_tracing(); |
1362 | raw_local_irq_save(irq_flags); | 1362 | raw_local_irq_save(irq_flags); |
1363 | __raw_spin_lock(&trace_buf_lock); | 1363 | __raw_spin_lock(&trace_buf_lock); |
1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1364 | if (args == NULL) { |
1365 | 1365 | strncpy(trace_buf, fmt, TRACE_BUF_SIZE); | |
1366 | len = min(len, TRACE_BUF_SIZE-1); | 1366 | len = strlen(trace_buf); |
1367 | trace_buf[len] = 0; | 1367 | } else |
1368 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
1368 | 1369 | ||
1369 | size = sizeof(*entry) + len + 1; | 1370 | size = sizeof(*entry) + len + 1; |
1370 | buffer = tr->buffer; | 1371 | buffer = tr->buffer; |
@@ -1373,10 +1374,10 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1373 | if (!event) | 1374 | if (!event) |
1374 | goto out_unlock; | 1375 | goto out_unlock; |
1375 | entry = ring_buffer_event_data(event); | 1376 | entry = ring_buffer_event_data(event); |
1376 | entry->ip = ip; | 1377 | entry->ip = ip; |
1377 | 1378 | ||
1378 | memcpy(&entry->buf, trace_buf, len); | 1379 | memcpy(&entry->buf, trace_buf, len); |
1379 | entry->buf[len] = 0; | 1380 | entry->buf[len] = '\0'; |
1380 | if (!filter_check_discard(call, entry, buffer, event)) | 1381 | if (!filter_check_discard(call, entry, buffer, event)) |
1381 | ring_buffer_unlock_commit(buffer, event); | 1382 | ring_buffer_unlock_commit(buffer, event); |
1382 | 1383 | ||
@@ -3319,22 +3320,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3319 | return cnt; | 3320 | return cnt; |
3320 | } | 3321 | } |
3321 | 3322 | ||
3322 | static int mark_printk(const char *fmt, ...) | ||
3323 | { | ||
3324 | int ret; | ||
3325 | va_list args; | ||
3326 | va_start(args, fmt); | ||
3327 | ret = trace_vprintk(0, fmt, args); | ||
3328 | va_end(args); | ||
3329 | return ret; | ||
3330 | } | ||
3331 | |||
3332 | static ssize_t | 3323 | static ssize_t |
3333 | tracing_mark_write(struct file *filp, const char __user *ubuf, | 3324 | tracing_mark_write(struct file *filp, const char __user *ubuf, |
3334 | size_t cnt, loff_t *fpos) | 3325 | size_t cnt, loff_t *fpos) |
3335 | { | 3326 | { |
3336 | char *buf; | 3327 | char *buf; |
3337 | char *end; | ||
3338 | 3328 | ||
3339 | if (tracing_disabled) | 3329 | if (tracing_disabled) |
3340 | return -EINVAL; | 3330 | return -EINVAL; |
@@ -3342,7 +3332,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3342 | if (cnt > TRACE_BUF_SIZE) | 3332 | if (cnt > TRACE_BUF_SIZE) |
3343 | cnt = TRACE_BUF_SIZE; | 3333 | cnt = TRACE_BUF_SIZE; |
3344 | 3334 | ||
3345 | buf = kmalloc(cnt + 1, GFP_KERNEL); | 3335 | buf = kmalloc(cnt + 2, GFP_KERNEL); |
3346 | if (buf == NULL) | 3336 | if (buf == NULL) |
3347 | return -ENOMEM; | 3337 | return -ENOMEM; |
3348 | 3338 | ||
@@ -3350,14 +3340,13 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3350 | kfree(buf); | 3340 | kfree(buf); |
3351 | return -EFAULT; | 3341 | return -EFAULT; |
3352 | } | 3342 | } |
3343 | if (buf[cnt-1] != '\n') { | ||
3344 | buf[cnt] = '\n'; | ||
3345 | buf[cnt+1] = '\0'; | ||
3346 | } else | ||
3347 | buf[cnt] = '\0'; | ||
3353 | 3348 | ||
3354 | /* Cut from the first nil or newline. */ | 3349 | cnt = trace_vprintk(0, buf, NULL); |
3355 | buf[cnt] = '\0'; | ||
3356 | end = strchr(buf, '\n'); | ||
3357 | if (end) | ||
3358 | *end = '\0'; | ||
3359 | |||
3360 | cnt = mark_printk("%s\n", buf); | ||
3361 | kfree(buf); | 3350 | kfree(buf); |
3362 | *fpos += cnt; | 3351 | *fpos += cnt; |
3363 | 3352 | ||
@@ -3730,7 +3719,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
3730 | 3719 | ||
3731 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 3720 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
3732 | if (!s) | 3721 | if (!s) |
3733 | return ENOMEM; | 3722 | return -ENOMEM; |
3734 | 3723 | ||
3735 | trace_seq_init(s); | 3724 | trace_seq_init(s); |
3736 | 3725 | ||
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 20c5f92e28a8..878c03f386ba 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <linux/ktime.h> | 20 | #include <linux/ktime.h> |
21 | #include <linux/trace_clock.h> | 21 | #include <linux/trace_clock.h> |
22 | 22 | ||
23 | #include "trace.h" | ||
24 | |||
23 | /* | 25 | /* |
24 | * trace_clock_local(): the simplest and least coherent tracing clock. | 26 | * trace_clock_local(): the simplest and least coherent tracing clock. |
25 | * | 27 | * |
@@ -28,17 +30,17 @@ | |||
28 | */ | 30 | */ |
29 | u64 notrace trace_clock_local(void) | 31 | u64 notrace trace_clock_local(void) |
30 | { | 32 | { |
31 | unsigned long flags; | ||
32 | u64 clock; | 33 | u64 clock; |
34 | int resched; | ||
33 | 35 | ||
34 | /* | 36 | /* |
35 | * sched_clock() is an architecture implemented, fast, scalable, | 37 | * sched_clock() is an architecture implemented, fast, scalable, |
36 | * lockless clock. It is not guaranteed to be coherent across | 38 | * lockless clock. It is not guaranteed to be coherent across |
37 | * CPUs, nor across CPU idle events. | 39 | * CPUs, nor across CPU idle events. |
38 | */ | 40 | */ |
39 | raw_local_irq_save(flags); | 41 | resched = ftrace_preempt_disable(); |
40 | clock = sched_clock(); | 42 | clock = sched_clock(); |
41 | raw_local_irq_restore(flags); | 43 | ftrace_preempt_enable(resched); |
42 | 44 | ||
43 | return clock; | 45 | return clock; |
44 | } | 46 | } |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 934d81fb4ca4..dff8c84ddf17 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -48,11 +48,11 @@ | |||
48 | struct ____ftrace_##name { \ | 48 | struct ____ftrace_##name { \ |
49 | tstruct \ | 49 | tstruct \ |
50 | }; \ | 50 | }; \ |
51 | static void __used ____ftrace_check_##name(void) \ | 51 | static void __always_unused ____ftrace_check_##name(void) \ |
52 | { \ | 52 | { \ |
53 | struct ____ftrace_##name *__entry = NULL; \ | 53 | struct ____ftrace_##name *__entry = NULL; \ |
54 | \ | 54 | \ |
55 | /* force cmpile-time check on F_printk() */ \ | 55 | /* force compile-time check on F_printk() */ \ |
56 | printk(print); \ | 56 | printk(print); \ |
57 | } | 57 | } |
58 | 58 | ||