diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 23 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_event_profile.c | 15 | ||||
-rw-r--r-- | kernel/trace/trace_hw_branches.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 18 |
5 files changed, 37 insertions, 35 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 3724756e41ca..37ba67e33265 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1078,14 +1078,9 @@ static void ftrace_replace_code(int enable) | |||
1078 | failed = __ftrace_replace_code(rec, enable); | 1078 | failed = __ftrace_replace_code(rec, enable); |
1079 | if (failed) { | 1079 | if (failed) { |
1080 | rec->flags |= FTRACE_FL_FAILED; | 1080 | rec->flags |= FTRACE_FL_FAILED; |
1081 | if ((system_state == SYSTEM_BOOTING) || | 1081 | ftrace_bug(failed, rec->ip); |
1082 | !core_kernel_text(rec->ip)) { | 1082 | /* Stop processing */ |
1083 | ftrace_free_rec(rec); | 1083 | return; |
1084 | } else { | ||
1085 | ftrace_bug(failed, rec->ip); | ||
1086 | /* Stop processing */ | ||
1087 | return; | ||
1088 | } | ||
1089 | } | 1084 | } |
1090 | } while_for_each_ftrace_rec(); | 1085 | } while_for_each_ftrace_rec(); |
1091 | } | 1086 | } |
@@ -2662,19 +2657,17 @@ static int ftrace_convert_nops(struct module *mod, | |||
2662 | } | 2657 | } |
2663 | 2658 | ||
2664 | #ifdef CONFIG_MODULES | 2659 | #ifdef CONFIG_MODULES |
2665 | void ftrace_release(void *start, void *end) | 2660 | void ftrace_release_mod(struct module *mod) |
2666 | { | 2661 | { |
2667 | struct dyn_ftrace *rec; | 2662 | struct dyn_ftrace *rec; |
2668 | struct ftrace_page *pg; | 2663 | struct ftrace_page *pg; |
2669 | unsigned long s = (unsigned long)start; | ||
2670 | unsigned long e = (unsigned long)end; | ||
2671 | 2664 | ||
2672 | if (ftrace_disabled || !start || start == end) | 2665 | if (ftrace_disabled) |
2673 | return; | 2666 | return; |
2674 | 2667 | ||
2675 | mutex_lock(&ftrace_lock); | 2668 | mutex_lock(&ftrace_lock); |
2676 | do_for_each_ftrace_rec(pg, rec) { | 2669 | do_for_each_ftrace_rec(pg, rec) { |
2677 | if ((rec->ip >= s) && (rec->ip < e)) { | 2670 | if (within_module_core(rec->ip, mod)) { |
2678 | /* | 2671 | /* |
2679 | * rec->ip is changed in ftrace_free_rec() | 2672 | * rec->ip is changed in ftrace_free_rec() |
2680 | * It should not between s and e if record was freed. | 2673 | * It should not between s and e if record was freed. |
@@ -2706,9 +2699,7 @@ static int ftrace_module_notify(struct notifier_block *self, | |||
2706 | mod->num_ftrace_callsites); | 2699 | mod->num_ftrace_callsites); |
2707 | break; | 2700 | break; |
2708 | case MODULE_STATE_GOING: | 2701 | case MODULE_STATE_GOING: |
2709 | ftrace_release(mod->ftrace_callsites, | 2702 | ftrace_release_mod(mod); |
2710 | mod->ftrace_callsites + | ||
2711 | mod->num_ftrace_callsites); | ||
2712 | break; | 2703 | break; |
2713 | } | 2704 | } |
2714 | 2705 | ||
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 7a7a9fd249a9..4a194f08f88c 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -34,6 +34,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
34 | struct trace_array *tr = branch_tracer; | 34 | struct trace_array *tr = branch_tracer; |
35 | struct ring_buffer_event *event; | 35 | struct ring_buffer_event *event; |
36 | struct trace_branch *entry; | 36 | struct trace_branch *entry; |
37 | struct ring_buffer *buffer; | ||
37 | unsigned long flags; | 38 | unsigned long flags; |
38 | int cpu, pc; | 39 | int cpu, pc; |
39 | const char *p; | 40 | const char *p; |
@@ -54,7 +55,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
54 | goto out; | 55 | goto out; |
55 | 56 | ||
56 | pc = preempt_count(); | 57 | pc = preempt_count(); |
57 | event = trace_buffer_lock_reserve(tr, TRACE_BRANCH, | 58 | buffer = tr->buffer; |
59 | event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, | ||
58 | sizeof(*entry), flags, pc); | 60 | sizeof(*entry), flags, pc); |
59 | if (!event) | 61 | if (!event) |
60 | goto out; | 62 | goto out; |
@@ -74,8 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
74 | entry->line = f->line; | 76 | entry->line = f->line; |
75 | entry->correct = val == expect; | 77 | entry->correct = val == expect; |
76 | 78 | ||
77 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 79 | if (!filter_check_discard(call, entry, buffer, event)) |
78 | ring_buffer_unlock_commit(tr->buffer, event); | 80 | ring_buffer_unlock_commit(buffer, event); |
79 | 81 | ||
80 | out: | 82 | out: |
81 | atomic_dec(&tr->data[cpu]->disabled); | 83 | atomic_dec(&tr->data[cpu]->disabled); |
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index dd44b8768867..8d5c171cc998 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
@@ -31,7 +31,7 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) | |||
31 | if (atomic_inc_return(&event->profile_count)) | 31 | if (atomic_inc_return(&event->profile_count)) |
32 | return 0; | 32 | return 0; |
33 | 33 | ||
34 | if (!total_profile_count++) { | 34 | if (!total_profile_count) { |
35 | buf = (char *)alloc_percpu(profile_buf_t); | 35 | buf = (char *)alloc_percpu(profile_buf_t); |
36 | if (!buf) | 36 | if (!buf) |
37 | goto fail_buf; | 37 | goto fail_buf; |
@@ -46,14 +46,19 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) | |||
46 | } | 46 | } |
47 | 47 | ||
48 | ret = event->profile_enable(); | 48 | ret = event->profile_enable(); |
49 | if (!ret) | 49 | if (!ret) { |
50 | total_profile_count++; | ||
50 | return 0; | 51 | return 0; |
52 | } | ||
51 | 53 | ||
52 | kfree(trace_profile_buf_nmi); | ||
53 | fail_buf_nmi: | 54 | fail_buf_nmi: |
54 | kfree(trace_profile_buf); | 55 | if (!total_profile_count) { |
56 | free_percpu(trace_profile_buf_nmi); | ||
57 | free_percpu(trace_profile_buf); | ||
58 | trace_profile_buf_nmi = NULL; | ||
59 | trace_profile_buf = NULL; | ||
60 | } | ||
55 | fail_buf: | 61 | fail_buf: |
56 | total_profile_count--; | ||
57 | atomic_dec(&event->profile_count); | 62 | atomic_dec(&event->profile_count); |
58 | 63 | ||
59 | return ret; | 64 | return ret; |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index 23b63859130e..69543a905cd5 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -165,6 +165,7 @@ void trace_hw_branch(u64 from, u64 to) | |||
165 | struct ftrace_event_call *call = &event_hw_branch; | 165 | struct ftrace_event_call *call = &event_hw_branch; |
166 | struct trace_array *tr = hw_branch_trace; | 166 | struct trace_array *tr = hw_branch_trace; |
167 | struct ring_buffer_event *event; | 167 | struct ring_buffer_event *event; |
168 | struct ring_buffer *buf; | ||
168 | struct hw_branch_entry *entry; | 169 | struct hw_branch_entry *entry; |
169 | unsigned long irq1; | 170 | unsigned long irq1; |
170 | int cpu; | 171 | int cpu; |
@@ -180,7 +181,8 @@ void trace_hw_branch(u64 from, u64 to) | |||
180 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | 181 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) |
181 | goto out; | 182 | goto out; |
182 | 183 | ||
183 | event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES, | 184 | buf = tr->buffer; |
185 | event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES, | ||
184 | sizeof(*entry), 0, 0); | 186 | sizeof(*entry), 0, 0); |
185 | if (!event) | 187 | if (!event) |
186 | goto out; | 188 | goto out; |
@@ -189,8 +191,8 @@ void trace_hw_branch(u64 from, u64 to) | |||
189 | entry->ent.type = TRACE_HW_BRANCHES; | 191 | entry->ent.type = TRACE_HW_BRANCHES; |
190 | entry->from = from; | 192 | entry->from = from; |
191 | entry->to = to; | 193 | entry->to = to; |
192 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 194 | if (!filter_check_discard(call, entry, buf, event)) |
193 | trace_buffer_unlock_commit(tr, event, 0, 0); | 195 | trace_buffer_unlock_commit(buf, event, 0, 0); |
194 | 196 | ||
195 | out: | 197 | out: |
196 | atomic_dec(&tr->data[cpu]->disabled); | 198 | atomic_dec(&tr->data[cpu]->disabled); |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index f572f44c6e1e..ed17565826b0 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -486,16 +486,18 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | |||
486 | hardirq ? 'h' : softirq ? 's' : '.')) | 486 | hardirq ? 'h' : softirq ? 's' : '.')) |
487 | return 0; | 487 | return 0; |
488 | 488 | ||
489 | if (entry->lock_depth < 0) | 489 | if (entry->preempt_count) |
490 | ret = trace_seq_putc(s, '.'); | 490 | ret = trace_seq_printf(s, "%x", entry->preempt_count); |
491 | else | 491 | else |
492 | ret = trace_seq_printf(s, "%d", entry->lock_depth); | 492 | ret = trace_seq_putc(s, '.'); |
493 | |||
493 | if (!ret) | 494 | if (!ret) |
494 | return 0; | 495 | return 0; |
495 | 496 | ||
496 | if (entry->preempt_count) | 497 | if (entry->lock_depth < 0) |
497 | return trace_seq_printf(s, "%x", entry->preempt_count); | 498 | return trace_seq_putc(s, '.'); |
498 | return trace_seq_putc(s, '.'); | 499 | |
500 | return trace_seq_printf(s, "%d", entry->lock_depth); | ||
499 | } | 501 | } |
500 | 502 | ||
501 | static int | 503 | static int |
@@ -883,7 +885,7 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S) | |||
883 | trace_assign_type(field, iter->ent); | 885 | trace_assign_type(field, iter->ent); |
884 | 886 | ||
885 | if (!S) | 887 | if (!S) |
886 | task_state_char(field->prev_state); | 888 | S = task_state_char(field->prev_state); |
887 | T = task_state_char(field->next_state); | 889 | T = task_state_char(field->next_state); |
888 | if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", | 890 | if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", |
889 | field->prev_pid, | 891 | field->prev_pid, |
@@ -918,7 +920,7 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S) | |||
918 | trace_assign_type(field, iter->ent); | 920 | trace_assign_type(field, iter->ent); |
919 | 921 | ||
920 | if (!S) | 922 | if (!S) |
921 | task_state_char(field->prev_state); | 923 | S = task_state_char(field->prev_state); |
922 | T = task_state_char(field->next_state); | 924 | T = task_state_char(field->next_state); |
923 | 925 | ||
924 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); | 926 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); |