diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/ring_buffer.c | 42 | ||||
-rw-r--r-- | kernel/trace/trace.c | 60 | ||||
-rw-r--r-- | kernel/trace/trace_sysprof.c | 13 |
3 files changed, 64 insertions, 51 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1d601a7c4587..a9d9760dc7b6 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -195,7 +195,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) | |||
195 | EXPORT_SYMBOL_GPL(ring_buffer_event_data); | 195 | EXPORT_SYMBOL_GPL(ring_buffer_event_data); |
196 | 196 | ||
197 | #define for_each_buffer_cpu(buffer, cpu) \ | 197 | #define for_each_buffer_cpu(buffer, cpu) \ |
198 | for_each_cpu_mask(cpu, buffer->cpumask) | 198 | for_each_cpu(cpu, buffer->cpumask) |
199 | 199 | ||
200 | #define TS_SHIFT 27 | 200 | #define TS_SHIFT 27 |
201 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) | 201 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) |
@@ -267,7 +267,7 @@ struct ring_buffer { | |||
267 | unsigned pages; | 267 | unsigned pages; |
268 | unsigned flags; | 268 | unsigned flags; |
269 | int cpus; | 269 | int cpus; |
270 | cpumask_t cpumask; | 270 | cpumask_var_t cpumask; |
271 | atomic_t record_disabled; | 271 | atomic_t record_disabled; |
272 | 272 | ||
273 | struct mutex mutex; | 273 | struct mutex mutex; |
@@ -458,6 +458,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
458 | if (!buffer) | 458 | if (!buffer) |
459 | return NULL; | 459 | return NULL; |
460 | 460 | ||
461 | if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) | ||
462 | goto fail_free_buffer; | ||
463 | |||
461 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 464 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
462 | buffer->flags = flags; | 465 | buffer->flags = flags; |
463 | 466 | ||
@@ -465,14 +468,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
465 | if (buffer->pages == 1) | 468 | if (buffer->pages == 1) |
466 | buffer->pages++; | 469 | buffer->pages++; |
467 | 470 | ||
468 | buffer->cpumask = cpu_possible_map; | 471 | cpumask_copy(buffer->cpumask, cpu_possible_mask); |
469 | buffer->cpus = nr_cpu_ids; | 472 | buffer->cpus = nr_cpu_ids; |
470 | 473 | ||
471 | bsize = sizeof(void *) * nr_cpu_ids; | 474 | bsize = sizeof(void *) * nr_cpu_ids; |
472 | buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), | 475 | buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), |
473 | GFP_KERNEL); | 476 | GFP_KERNEL); |
474 | if (!buffer->buffers) | 477 | if (!buffer->buffers) |
475 | goto fail_free_buffer; | 478 | goto fail_free_cpumask; |
476 | 479 | ||
477 | for_each_buffer_cpu(buffer, cpu) { | 480 | for_each_buffer_cpu(buffer, cpu) { |
478 | buffer->buffers[cpu] = | 481 | buffer->buffers[cpu] = |
@@ -492,6 +495,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
492 | } | 495 | } |
493 | kfree(buffer->buffers); | 496 | kfree(buffer->buffers); |
494 | 497 | ||
498 | fail_free_cpumask: | ||
499 | free_cpumask_var(buffer->cpumask); | ||
500 | |||
495 | fail_free_buffer: | 501 | fail_free_buffer: |
496 | kfree(buffer); | 502 | kfree(buffer); |
497 | return NULL; | 503 | return NULL; |
@@ -510,6 +516,8 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
510 | for_each_buffer_cpu(buffer, cpu) | 516 | for_each_buffer_cpu(buffer, cpu) |
511 | rb_free_cpu_buffer(buffer->buffers[cpu]); | 517 | rb_free_cpu_buffer(buffer->buffers[cpu]); |
512 | 518 | ||
519 | free_cpumask_var(buffer->cpumask); | ||
520 | |||
513 | kfree(buffer); | 521 | kfree(buffer); |
514 | } | 522 | } |
515 | EXPORT_SYMBOL_GPL(ring_buffer_free); | 523 | EXPORT_SYMBOL_GPL(ring_buffer_free); |
@@ -1283,7 +1291,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1283 | 1291 | ||
1284 | cpu = raw_smp_processor_id(); | 1292 | cpu = raw_smp_processor_id(); |
1285 | 1293 | ||
1286 | if (!cpu_isset(cpu, buffer->cpumask)) | 1294 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1287 | goto out; | 1295 | goto out; |
1288 | 1296 | ||
1289 | cpu_buffer = buffer->buffers[cpu]; | 1297 | cpu_buffer = buffer->buffers[cpu]; |
@@ -1396,7 +1404,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1396 | 1404 | ||
1397 | cpu = raw_smp_processor_id(); | 1405 | cpu = raw_smp_processor_id(); |
1398 | 1406 | ||
1399 | if (!cpu_isset(cpu, buffer->cpumask)) | 1407 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1400 | goto out; | 1408 | goto out; |
1401 | 1409 | ||
1402 | cpu_buffer = buffer->buffers[cpu]; | 1410 | cpu_buffer = buffer->buffers[cpu]; |
@@ -1478,7 +1486,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) | |||
1478 | { | 1486 | { |
1479 | struct ring_buffer_per_cpu *cpu_buffer; | 1487 | struct ring_buffer_per_cpu *cpu_buffer; |
1480 | 1488 | ||
1481 | if (!cpu_isset(cpu, buffer->cpumask)) | 1489 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1482 | return; | 1490 | return; |
1483 | 1491 | ||
1484 | cpu_buffer = buffer->buffers[cpu]; | 1492 | cpu_buffer = buffer->buffers[cpu]; |
@@ -1498,7 +1506,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | |||
1498 | { | 1506 | { |
1499 | struct ring_buffer_per_cpu *cpu_buffer; | 1507 | struct ring_buffer_per_cpu *cpu_buffer; |
1500 | 1508 | ||
1501 | if (!cpu_isset(cpu, buffer->cpumask)) | 1509 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1502 | return; | 1510 | return; |
1503 | 1511 | ||
1504 | cpu_buffer = buffer->buffers[cpu]; | 1512 | cpu_buffer = buffer->buffers[cpu]; |
@@ -1515,7 +1523,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) | |||
1515 | { | 1523 | { |
1516 | struct ring_buffer_per_cpu *cpu_buffer; | 1524 | struct ring_buffer_per_cpu *cpu_buffer; |
1517 | 1525 | ||
1518 | if (!cpu_isset(cpu, buffer->cpumask)) | 1526 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1519 | return 0; | 1527 | return 0; |
1520 | 1528 | ||
1521 | cpu_buffer = buffer->buffers[cpu]; | 1529 | cpu_buffer = buffer->buffers[cpu]; |
@@ -1532,7 +1540,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) | |||
1532 | { | 1540 | { |
1533 | struct ring_buffer_per_cpu *cpu_buffer; | 1541 | struct ring_buffer_per_cpu *cpu_buffer; |
1534 | 1542 | ||
1535 | if (!cpu_isset(cpu, buffer->cpumask)) | 1543 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1536 | return 0; | 1544 | return 0; |
1537 | 1545 | ||
1538 | cpu_buffer = buffer->buffers[cpu]; | 1546 | cpu_buffer = buffer->buffers[cpu]; |
@@ -1850,7 +1858,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1850 | struct buffer_page *reader; | 1858 | struct buffer_page *reader; |
1851 | int nr_loops = 0; | 1859 | int nr_loops = 0; |
1852 | 1860 | ||
1853 | if (!cpu_isset(cpu, buffer->cpumask)) | 1861 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1854 | return NULL; | 1862 | return NULL; |
1855 | 1863 | ||
1856 | cpu_buffer = buffer->buffers[cpu]; | 1864 | cpu_buffer = buffer->buffers[cpu]; |
@@ -2025,7 +2033,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2025 | struct ring_buffer_event *event; | 2033 | struct ring_buffer_event *event; |
2026 | unsigned long flags; | 2034 | unsigned long flags; |
2027 | 2035 | ||
2028 | if (!cpu_isset(cpu, buffer->cpumask)) | 2036 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2029 | return NULL; | 2037 | return NULL; |
2030 | 2038 | ||
2031 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2039 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
@@ -2062,7 +2070,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
2062 | struct ring_buffer_iter *iter; | 2070 | struct ring_buffer_iter *iter; |
2063 | unsigned long flags; | 2071 | unsigned long flags; |
2064 | 2072 | ||
2065 | if (!cpu_isset(cpu, buffer->cpumask)) | 2073 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2066 | return NULL; | 2074 | return NULL; |
2067 | 2075 | ||
2068 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | 2076 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); |
@@ -2172,7 +2180,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
2172 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 2180 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
2173 | unsigned long flags; | 2181 | unsigned long flags; |
2174 | 2182 | ||
2175 | if (!cpu_isset(cpu, buffer->cpumask)) | 2183 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2176 | return; | 2184 | return; |
2177 | 2185 | ||
2178 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2186 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
@@ -2228,7 +2236,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | |||
2228 | { | 2236 | { |
2229 | struct ring_buffer_per_cpu *cpu_buffer; | 2237 | struct ring_buffer_per_cpu *cpu_buffer; |
2230 | 2238 | ||
2231 | if (!cpu_isset(cpu, buffer->cpumask)) | 2239 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2232 | return 1; | 2240 | return 1; |
2233 | 2241 | ||
2234 | cpu_buffer = buffer->buffers[cpu]; | 2242 | cpu_buffer = buffer->buffers[cpu]; |
@@ -2252,8 +2260,8 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2252 | struct ring_buffer_per_cpu *cpu_buffer_a; | 2260 | struct ring_buffer_per_cpu *cpu_buffer_a; |
2253 | struct ring_buffer_per_cpu *cpu_buffer_b; | 2261 | struct ring_buffer_per_cpu *cpu_buffer_b; |
2254 | 2262 | ||
2255 | if (!cpu_isset(cpu, buffer_a->cpumask) || | 2263 | if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || |
2256 | !cpu_isset(cpu, buffer_b->cpumask)) | 2264 | !cpumask_test_cpu(cpu, buffer_b->cpumask)) |
2257 | return -EINVAL; | 2265 | return -EINVAL; |
2258 | 2266 | ||
2259 | /* At least make sure the two buffers are somewhat the same */ | 2267 | /* At least make sure the two buffers are somewhat the same */ |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 0e91f43b6baf..5d04e27f3b40 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -89,10 +89,10 @@ static inline void ftrace_enable_cpu(void) | |||
89 | preempt_enable(); | 89 | preempt_enable(); |
90 | } | 90 | } |
91 | 91 | ||
92 | static cpumask_t __read_mostly tracing_buffer_mask; | 92 | static cpumask_var_t __read_mostly tracing_buffer_mask; |
93 | 93 | ||
94 | #define for_each_tracing_cpu(cpu) \ | 94 | #define for_each_tracing_cpu(cpu) \ |
95 | for_each_cpu_mask(cpu, tracing_buffer_mask) | 95 | for_each_cpu(cpu, tracing_buffer_mask) |
96 | 96 | ||
97 | /* | 97 | /* |
98 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | 98 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops |
@@ -2646,13 +2646,7 @@ static struct file_operations show_traces_fops = { | |||
2646 | /* | 2646 | /* |
2647 | * Only trace on a CPU if the bitmask is set: | 2647 | * Only trace on a CPU if the bitmask is set: |
2648 | */ | 2648 | */ |
2649 | static cpumask_t tracing_cpumask = CPU_MASK_ALL; | 2649 | static cpumask_var_t tracing_cpumask; |
2650 | |||
2651 | /* | ||
2652 | * When tracing/tracing_cpu_mask is modified then this holds | ||
2653 | * the new bitmask we are about to install: | ||
2654 | */ | ||
2655 | static cpumask_t tracing_cpumask_new; | ||
2656 | 2650 | ||
2657 | /* | 2651 | /* |
2658 | * The tracer itself will not take this lock, but still we want | 2652 | * The tracer itself will not take this lock, but still we want |
@@ -2674,7 +2668,7 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf, | |||
2674 | 2668 | ||
2675 | mutex_lock(&tracing_cpumask_update_lock); | 2669 | mutex_lock(&tracing_cpumask_update_lock); |
2676 | 2670 | ||
2677 | len = cpumask_scnprintf(mask_str, count, &tracing_cpumask); | 2671 | len = cpumask_scnprintf(mask_str, count, tracing_cpumask); |
2678 | if (count - len < 2) { | 2672 | if (count - len < 2) { |
2679 | count = -EINVAL; | 2673 | count = -EINVAL; |
2680 | goto out_err; | 2674 | goto out_err; |
@@ -2693,9 +2687,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2693 | size_t count, loff_t *ppos) | 2687 | size_t count, loff_t *ppos) |
2694 | { | 2688 | { |
2695 | int err, cpu; | 2689 | int err, cpu; |
2690 | cpumask_var_t tracing_cpumask_new; | ||
2691 | |||
2692 | if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) | ||
2693 | return -ENOMEM; | ||
2696 | 2694 | ||
2697 | mutex_lock(&tracing_cpumask_update_lock); | 2695 | mutex_lock(&tracing_cpumask_update_lock); |
2698 | err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new); | 2696 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); |
2699 | if (err) | 2697 | if (err) |
2700 | goto err_unlock; | 2698 | goto err_unlock; |
2701 | 2699 | ||
@@ -2706,26 +2704,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2706 | * Increase/decrease the disabled counter if we are | 2704 | * Increase/decrease the disabled counter if we are |
2707 | * about to flip a bit in the cpumask: | 2705 | * about to flip a bit in the cpumask: |
2708 | */ | 2706 | */ |
2709 | if (cpu_isset(cpu, tracing_cpumask) && | 2707 | if (cpumask_test_cpu(cpu, tracing_cpumask) && |
2710 | !cpu_isset(cpu, tracing_cpumask_new)) { | 2708 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
2711 | atomic_inc(&global_trace.data[cpu]->disabled); | 2709 | atomic_inc(&global_trace.data[cpu]->disabled); |
2712 | } | 2710 | } |
2713 | if (!cpu_isset(cpu, tracing_cpumask) && | 2711 | if (!cpumask_test_cpu(cpu, tracing_cpumask) && |
2714 | cpu_isset(cpu, tracing_cpumask_new)) { | 2712 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
2715 | atomic_dec(&global_trace.data[cpu]->disabled); | 2713 | atomic_dec(&global_trace.data[cpu]->disabled); |
2716 | } | 2714 | } |
2717 | } | 2715 | } |
2718 | __raw_spin_unlock(&ftrace_max_lock); | 2716 | __raw_spin_unlock(&ftrace_max_lock); |
2719 | local_irq_enable(); | 2717 | local_irq_enable(); |
2720 | 2718 | ||
2721 | tracing_cpumask = tracing_cpumask_new; | 2719 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); |
2722 | 2720 | ||
2723 | mutex_unlock(&tracing_cpumask_update_lock); | 2721 | mutex_unlock(&tracing_cpumask_update_lock); |
2722 | free_cpumask_var(tracing_cpumask_new); | ||
2724 | 2723 | ||
2725 | return count; | 2724 | return count; |
2726 | 2725 | ||
2727 | err_unlock: | 2726 | err_unlock: |
2728 | mutex_unlock(&tracing_cpumask_update_lock); | 2727 | mutex_unlock(&tracing_cpumask_update_lock); |
2728 | free_cpumask_var(tracing_cpumask); | ||
2729 | 2729 | ||
2730 | return err; | 2730 | return err; |
2731 | } | 2731 | } |
@@ -3752,7 +3752,6 @@ void ftrace_dump(void) | |||
3752 | static DEFINE_SPINLOCK(ftrace_dump_lock); | 3752 | static DEFINE_SPINLOCK(ftrace_dump_lock); |
3753 | /* use static because iter can be a bit big for the stack */ | 3753 | /* use static because iter can be a bit big for the stack */ |
3754 | static struct trace_iterator iter; | 3754 | static struct trace_iterator iter; |
3755 | static cpumask_t mask; | ||
3756 | static int dump_ran; | 3755 | static int dump_ran; |
3757 | unsigned long flags; | 3756 | unsigned long flags; |
3758 | int cnt = 0, cpu; | 3757 | int cnt = 0, cpu; |
@@ -3786,8 +3785,6 @@ void ftrace_dump(void) | |||
3786 | * and then release the locks again. | 3785 | * and then release the locks again. |
3787 | */ | 3786 | */ |
3788 | 3787 | ||
3789 | cpus_clear(mask); | ||
3790 | |||
3791 | while (!trace_empty(&iter)) { | 3788 | while (!trace_empty(&iter)) { |
3792 | 3789 | ||
3793 | if (!cnt) | 3790 | if (!cnt) |
@@ -3823,19 +3820,28 @@ __init static int tracer_alloc_buffers(void) | |||
3823 | { | 3820 | { |
3824 | struct trace_array_cpu *data; | 3821 | struct trace_array_cpu *data; |
3825 | int i; | 3822 | int i; |
3823 | int ret = -ENOMEM; | ||
3826 | 3824 | ||
3827 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 3825 | if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) |
3828 | tracing_buffer_mask = cpu_possible_map; | 3826 | goto out; |
3827 | |||
3828 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | ||
3829 | goto out_free_buffer_mask; | ||
3829 | 3830 | ||
3831 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | ||
3832 | cpumask_copy(tracing_cpumask, cpu_all_mask); | ||
3833 | |||
3834 | /* TODO: make the number of buffers hot pluggable with CPUS */ | ||
3830 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, | 3835 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, |
3831 | TRACE_BUFFER_FLAGS); | 3836 | TRACE_BUFFER_FLAGS); |
3832 | if (!global_trace.buffer) { | 3837 | if (!global_trace.buffer) { |
3833 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 3838 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); |
3834 | WARN_ON(1); | 3839 | WARN_ON(1); |
3835 | return 0; | 3840 | goto out_free_cpumask; |
3836 | } | 3841 | } |
3837 | global_trace.entries = ring_buffer_size(global_trace.buffer); | 3842 | global_trace.entries = ring_buffer_size(global_trace.buffer); |
3838 | 3843 | ||
3844 | |||
3839 | #ifdef CONFIG_TRACER_MAX_TRACE | 3845 | #ifdef CONFIG_TRACER_MAX_TRACE |
3840 | max_tr.buffer = ring_buffer_alloc(trace_buf_size, | 3846 | max_tr.buffer = ring_buffer_alloc(trace_buf_size, |
3841 | TRACE_BUFFER_FLAGS); | 3847 | TRACE_BUFFER_FLAGS); |
@@ -3843,7 +3849,7 @@ __init static int tracer_alloc_buffers(void) | |||
3843 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 3849 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); |
3844 | WARN_ON(1); | 3850 | WARN_ON(1); |
3845 | ring_buffer_free(global_trace.buffer); | 3851 | ring_buffer_free(global_trace.buffer); |
3846 | return 0; | 3852 | goto out_free_cpumask; |
3847 | } | 3853 | } |
3848 | max_tr.entries = ring_buffer_size(max_tr.buffer); | 3854 | max_tr.entries = ring_buffer_size(max_tr.buffer); |
3849 | WARN_ON(max_tr.entries != global_trace.entries); | 3855 | WARN_ON(max_tr.entries != global_trace.entries); |
@@ -3873,8 +3879,14 @@ __init static int tracer_alloc_buffers(void) | |||
3873 | &trace_panic_notifier); | 3879 | &trace_panic_notifier); |
3874 | 3880 | ||
3875 | register_die_notifier(&trace_die_notifier); | 3881 | register_die_notifier(&trace_die_notifier); |
3882 | ret = 0; | ||
3876 | 3883 | ||
3877 | return 0; | 3884 | out_free_cpumask: |
3885 | free_cpumask_var(tracing_cpumask); | ||
3886 | out_free_buffer_mask: | ||
3887 | free_cpumask_var(tracing_buffer_mask); | ||
3888 | out: | ||
3889 | return ret; | ||
3878 | } | 3890 | } |
3879 | early_initcall(tracer_alloc_buffers); | 3891 | early_initcall(tracer_alloc_buffers); |
3880 | fs_initcall(tracer_init_debugfs); | 3892 | fs_initcall(tracer_init_debugfs); |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index a5779bd975db..eaca5ad803ff 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -196,9 +196,9 @@ static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer) | |||
196 | return HRTIMER_RESTART; | 196 | return HRTIMER_RESTART; |
197 | } | 197 | } |
198 | 198 | ||
199 | static void start_stack_timer(int cpu) | 199 | static void start_stack_timer(void *unused) |
200 | { | 200 | { |
201 | struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); | 201 | struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer); |
202 | 202 | ||
203 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 203 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
204 | hrtimer->function = stack_trace_timer_fn; | 204 | hrtimer->function = stack_trace_timer_fn; |
@@ -208,14 +208,7 @@ static void start_stack_timer(int cpu) | |||
208 | 208 | ||
209 | static void start_stack_timers(void) | 209 | static void start_stack_timers(void) |
210 | { | 210 | { |
211 | cpumask_t saved_mask = current->cpus_allowed; | 211 | on_each_cpu(start_stack_timer, NULL, 1); |
212 | int cpu; | ||
213 | |||
214 | for_each_online_cpu(cpu) { | ||
215 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
216 | start_stack_timer(cpu); | ||
217 | } | ||
218 | set_cpus_allowed_ptr(current, &saved_mask); | ||
219 | } | 212 | } |
220 | 213 | ||
221 | static void stop_stack_timer(int cpu) | 214 | static void stop_stack_timer(int cpu) |