diff options
-rw-r--r-- | kernel/trace/trace.c | 38 |
1 files changed, 24 insertions, 14 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 290e9da7aa9a..5da391c5fb0d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -35,6 +35,12 @@ | |||
35 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; | 35 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; |
36 | unsigned long __read_mostly tracing_thresh; | 36 | unsigned long __read_mostly tracing_thresh; |
37 | 37 | ||
38 | static unsigned long __read_mostly tracing_nr_buffers; | ||
39 | static cpumask_t __read_mostly tracing_buffer_mask; | ||
40 | |||
41 | #define for_each_tracing_cpu(cpu) \ | ||
42 | for_each_cpu_mask(cpu, tracing_buffer_mask) | ||
43 | |||
38 | /* dummy trace to disable tracing */ | 44 | /* dummy trace to disable tracing */ |
39 | static struct tracer no_tracer __read_mostly = { | 45 | static struct tracer no_tracer __read_mostly = { |
40 | .name = "none", | 46 | .name = "none", |
@@ -328,7 +334,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
328 | WARN_ON_ONCE(!irqs_disabled()); | 334 | WARN_ON_ONCE(!irqs_disabled()); |
329 | __raw_spin_lock(&ftrace_max_lock); | 335 | __raw_spin_lock(&ftrace_max_lock); |
330 | /* clear out all the previous traces */ | 336 | /* clear out all the previous traces */ |
331 | for_each_possible_cpu(i) { | 337 | for_each_tracing_cpu(i) { |
332 | data = tr->data[i]; | 338 | data = tr->data[i]; |
333 | flip_trace(max_tr.data[i], data); | 339 | flip_trace(max_tr.data[i], data); |
334 | tracing_reset(data); | 340 | tracing_reset(data); |
@@ -352,7 +358,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
352 | 358 | ||
353 | WARN_ON_ONCE(!irqs_disabled()); | 359 | WARN_ON_ONCE(!irqs_disabled()); |
354 | __raw_spin_lock(&ftrace_max_lock); | 360 | __raw_spin_lock(&ftrace_max_lock); |
355 | for_each_possible_cpu(i) | 361 | for_each_tracing_cpu(i) |
356 | tracing_reset(max_tr.data[i]); | 362 | tracing_reset(max_tr.data[i]); |
357 | 363 | ||
358 | flip_trace(max_tr.data[cpu], data); | 364 | flip_trace(max_tr.data[cpu], data); |
@@ -398,7 +404,7 @@ int register_tracer(struct tracer *type) | |||
398 | * internal tracing to verify that everything is in order. | 404 | * internal tracing to verify that everything is in order. |
399 | * If we fail, we do not register this tracer. | 405 | * If we fail, we do not register this tracer. |
400 | */ | 406 | */ |
401 | for_each_possible_cpu(i) { | 407 | for_each_tracing_cpu(i) { |
402 | data = tr->data[i]; | 408 | data = tr->data[i]; |
403 | if (!head_page(data)) | 409 | if (!head_page(data)) |
404 | continue; | 410 | continue; |
@@ -417,7 +423,7 @@ int register_tracer(struct tracer *type) | |||
417 | goto out; | 423 | goto out; |
418 | } | 424 | } |
419 | /* Only reset on passing, to avoid touching corrupted buffers */ | 425 | /* Only reset on passing, to avoid touching corrupted buffers */ |
420 | for_each_possible_cpu(i) { | 426 | for_each_tracing_cpu(i) { |
421 | data = tr->data[i]; | 427 | data = tr->data[i]; |
422 | if (!head_page(data)) | 428 | if (!head_page(data)) |
423 | continue; | 429 | continue; |
@@ -847,7 +853,7 @@ find_next_entry(struct trace_iterator *iter, int *ent_cpu) | |||
847 | int next_cpu = -1; | 853 | int next_cpu = -1; |
848 | int cpu; | 854 | int cpu; |
849 | 855 | ||
850 | for_each_possible_cpu(cpu) { | 856 | for_each_tracing_cpu(cpu) { |
851 | if (!head_page(tr->data[cpu])) | 857 | if (!head_page(tr->data[cpu])) |
852 | continue; | 858 | continue; |
853 | ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); | 859 | ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); |
@@ -972,7 +978,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
972 | iter->prev_ent = NULL; | 978 | iter->prev_ent = NULL; |
973 | iter->prev_cpu = -1; | 979 | iter->prev_cpu = -1; |
974 | 980 | ||
975 | for_each_possible_cpu(i) { | 981 | for_each_tracing_cpu(i) { |
976 | iter->next_idx[i] = 0; | 982 | iter->next_idx[i] = 0; |
977 | iter->next_page[i] = NULL; | 983 | iter->next_page[i] = NULL; |
978 | } | 984 | } |
@@ -1089,7 +1095,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
1089 | if (type) | 1095 | if (type) |
1090 | name = type->name; | 1096 | name = type->name; |
1091 | 1097 | ||
1092 | for_each_possible_cpu(cpu) { | 1098 | for_each_tracing_cpu(cpu) { |
1093 | if (head_page(tr->data[cpu])) { | 1099 | if (head_page(tr->data[cpu])) { |
1094 | total += tr->data[cpu]->trace_idx; | 1100 | total += tr->data[cpu]->trace_idx; |
1095 | if (tr->data[cpu]->trace_idx > tr->entries) | 1101 | if (tr->data[cpu]->trace_idx > tr->entries) |
@@ -1519,7 +1525,7 @@ static int trace_empty(struct trace_iterator *iter) | |||
1519 | struct trace_array_cpu *data; | 1525 | struct trace_array_cpu *data; |
1520 | int cpu; | 1526 | int cpu; |
1521 | 1527 | ||
1522 | for_each_possible_cpu(cpu) { | 1528 | for_each_tracing_cpu(cpu) { |
1523 | data = iter->tr->data[cpu]; | 1529 | data = iter->tr->data[cpu]; |
1524 | 1530 | ||
1525 | if (head_page(data) && data->trace_idx && | 1531 | if (head_page(data) && data->trace_idx && |
@@ -1831,7 +1837,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
1831 | 1837 | ||
1832 | raw_local_irq_disable(); | 1838 | raw_local_irq_disable(); |
1833 | __raw_spin_lock(&ftrace_max_lock); | 1839 | __raw_spin_lock(&ftrace_max_lock); |
1834 | for_each_possible_cpu(cpu) { | 1840 | for_each_tracing_cpu(cpu) { |
1835 | /* | 1841 | /* |
1836 | * Increase/decrease the disabled counter if we are | 1842 | * Increase/decrease the disabled counter if we are |
1837 | * about to flip a bit in the cpumask: | 1843 | * about to flip a bit in the cpumask: |
@@ -2308,7 +2314,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, | |||
2308 | ftrace_enabled = 0; | 2314 | ftrace_enabled = 0; |
2309 | #endif | 2315 | #endif |
2310 | smp_wmb(); | 2316 | smp_wmb(); |
2311 | for_each_possible_cpu(cpu) { | 2317 | for_each_tracing_cpu(cpu) { |
2312 | data = iter->tr->data[cpu]; | 2318 | data = iter->tr->data[cpu]; |
2313 | 2319 | ||
2314 | if (!head_page(data) || !data->trace_idx) | 2320 | if (!head_page(data) || !data->trace_idx) |
@@ -2605,7 +2611,7 @@ static int trace_alloc_page(void) | |||
2605 | int i; | 2611 | int i; |
2606 | 2612 | ||
2607 | /* first allocate a page for each CPU */ | 2613 | /* first allocate a page for each CPU */ |
2608 | for_each_possible_cpu(i) { | 2614 | for_each_tracing_cpu(i) { |
2609 | array = (void *)__get_free_page(GFP_KERNEL); | 2615 | array = (void *)__get_free_page(GFP_KERNEL); |
2610 | if (array == NULL) { | 2616 | if (array == NULL) { |
2611 | printk(KERN_ERR "tracer: failed to allocate page" | 2617 | printk(KERN_ERR "tracer: failed to allocate page" |
@@ -2630,7 +2636,7 @@ static int trace_alloc_page(void) | |||
2630 | } | 2636 | } |
2631 | 2637 | ||
2632 | /* Now that we successfully allocate a page per CPU, add them */ | 2638 | /* Now that we successfully allocate a page per CPU, add them */ |
2633 | for_each_possible_cpu(i) { | 2639 | for_each_tracing_cpu(i) { |
2634 | data = global_trace.data[i]; | 2640 | data = global_trace.data[i]; |
2635 | page = list_entry(pages.next, struct page, lru); | 2641 | page = list_entry(pages.next, struct page, lru); |
2636 | list_del_init(&page->lru); | 2642 | list_del_init(&page->lru); |
@@ -2666,7 +2672,7 @@ static int trace_free_page(void) | |||
2666 | int ret = 0; | 2672 | int ret = 0; |
2667 | 2673 | ||
2668 | /* free one page from each buffer */ | 2674 | /* free one page from each buffer */ |
2669 | for_each_possible_cpu(i) { | 2675 | for_each_tracing_cpu(i) { |
2670 | data = global_trace.data[i]; | 2676 | data = global_trace.data[i]; |
2671 | p = data->trace_pages.next; | 2677 | p = data->trace_pages.next; |
2672 | if (p == &data->trace_pages) { | 2678 | if (p == &data->trace_pages) { |
@@ -2717,8 +2723,12 @@ __init static int tracer_alloc_buffers(void) | |||
2717 | 2723 | ||
2718 | global_trace.ctrl = tracer_enabled; | 2724 | global_trace.ctrl = tracer_enabled; |
2719 | 2725 | ||
2726 | /* TODO: make the number of buffers hot pluggable with CPUS */ | ||
2727 | tracing_nr_buffers = num_possible_cpus(); | ||
2728 | tracing_buffer_mask = cpu_possible_map; | ||
2729 | |||
2720 | /* Allocate the first page for all buffers */ | 2730 | /* Allocate the first page for all buffers */ |
2721 | for_each_possible_cpu(i) { | 2731 | for_each_tracing_cpu(i) { |
2722 | data = global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 2732 | data = global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
2723 | max_tr.data[i] = &per_cpu(max_data, i); | 2733 | max_tr.data[i] = &per_cpu(max_data, i); |
2724 | 2734 | ||