diff options
author | David S. Miller <davem@davemloft.net> | 2009-11-19 01:19:03 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-19 01:19:03 -0500 |
commit | 3505d1a9fd65e2d3e00827857b6795d9d8983658 (patch) | |
tree | 941cfafdb57c427bb6b7ebf6354ee93b2a3693b5 /kernel/trace | |
parent | dfef948ed2ba69cf041840b5e860d6b4e16fa0b1 (diff) | |
parent | 66b00a7c93ec782d118d2c03bd599cfd041e80a1 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
drivers/net/sfc/sfe4001.c
drivers/net/wireless/libertas/cmd.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/rtl8187se/Kconfig
drivers/staging/rtl8192e/Kconfig
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 35 | ||||
-rw-r--r-- | kernel/trace/kmemtrace.c | 2 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_event_profile.c | 15 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_hw_branches.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 23 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 4 |
10 files changed, 67 insertions, 55 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 46592feab5a6..6dc4e5ef7a01 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -225,7 +225,11 @@ static void ftrace_update_pid_func(void) | |||
225 | if (ftrace_trace_function == ftrace_stub) | 225 | if (ftrace_trace_function == ftrace_stub) |
226 | return; | 226 | return; |
227 | 227 | ||
228 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
228 | func = ftrace_trace_function; | 229 | func = ftrace_trace_function; |
230 | #else | ||
231 | func = __ftrace_trace_function; | ||
232 | #endif | ||
229 | 233 | ||
230 | if (ftrace_pid_trace) { | 234 | if (ftrace_pid_trace) { |
231 | set_ftrace_pid_function(func); | 235 | set_ftrace_pid_function(func); |
@@ -736,7 +740,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf, | |||
736 | out: | 740 | out: |
737 | mutex_unlock(&ftrace_profile_lock); | 741 | mutex_unlock(&ftrace_profile_lock); |
738 | 742 | ||
739 | filp->f_pos += cnt; | 743 | *ppos += cnt; |
740 | 744 | ||
741 | return cnt; | 745 | return cnt; |
742 | } | 746 | } |
@@ -1074,14 +1078,9 @@ static void ftrace_replace_code(int enable) | |||
1074 | failed = __ftrace_replace_code(rec, enable); | 1078 | failed = __ftrace_replace_code(rec, enable); |
1075 | if (failed) { | 1079 | if (failed) { |
1076 | rec->flags |= FTRACE_FL_FAILED; | 1080 | rec->flags |= FTRACE_FL_FAILED; |
1077 | if ((system_state == SYSTEM_BOOTING) || | 1081 | ftrace_bug(failed, rec->ip); |
1078 | !core_kernel_text(rec->ip)) { | 1082 | /* Stop processing */ |
1079 | ftrace_free_rec(rec); | 1083 | return; |
1080 | } else { | ||
1081 | ftrace_bug(failed, rec->ip); | ||
1082 | /* Stop processing */ | ||
1083 | return; | ||
1084 | } | ||
1085 | } | 1084 | } |
1086 | } while_for_each_ftrace_rec(); | 1085 | } while_for_each_ftrace_rec(); |
1087 | } | 1086 | } |
@@ -2223,15 +2222,15 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2223 | ret = ftrace_process_regex(parser->buffer, | 2222 | ret = ftrace_process_regex(parser->buffer, |
2224 | parser->idx, enable); | 2223 | parser->idx, enable); |
2225 | if (ret) | 2224 | if (ret) |
2226 | goto out; | 2225 | goto out_unlock; |
2227 | 2226 | ||
2228 | trace_parser_clear(parser); | 2227 | trace_parser_clear(parser); |
2229 | } | 2228 | } |
2230 | 2229 | ||
2231 | ret = read; | 2230 | ret = read; |
2232 | 2231 | out_unlock: | |
2233 | mutex_unlock(&ftrace_regex_lock); | 2232 | mutex_unlock(&ftrace_regex_lock); |
2234 | out: | 2233 | |
2235 | return ret; | 2234 | return ret; |
2236 | } | 2235 | } |
2237 | 2236 | ||
@@ -2658,19 +2657,17 @@ static int ftrace_convert_nops(struct module *mod, | |||
2658 | } | 2657 | } |
2659 | 2658 | ||
2660 | #ifdef CONFIG_MODULES | 2659 | #ifdef CONFIG_MODULES |
2661 | void ftrace_release(void *start, void *end) | 2660 | void ftrace_release_mod(struct module *mod) |
2662 | { | 2661 | { |
2663 | struct dyn_ftrace *rec; | 2662 | struct dyn_ftrace *rec; |
2664 | struct ftrace_page *pg; | 2663 | struct ftrace_page *pg; |
2665 | unsigned long s = (unsigned long)start; | ||
2666 | unsigned long e = (unsigned long)end; | ||
2667 | 2664 | ||
2668 | if (ftrace_disabled || !start || start == end) | 2665 | if (ftrace_disabled) |
2669 | return; | 2666 | return; |
2670 | 2667 | ||
2671 | mutex_lock(&ftrace_lock); | 2668 | mutex_lock(&ftrace_lock); |
2672 | do_for_each_ftrace_rec(pg, rec) { | 2669 | do_for_each_ftrace_rec(pg, rec) { |
2673 | if ((rec->ip >= s) && (rec->ip < e)) { | 2670 | if (within_module_core(rec->ip, mod)) { |
2674 | /* | 2671 | /* |
2675 | * rec->ip is changed in ftrace_free_rec() | 2672 | * rec->ip is changed in ftrace_free_rec() |
2676 | * It should not between s and e if record was freed. | 2673 | * It should not between s and e if record was freed. |
@@ -2702,9 +2699,7 @@ static int ftrace_module_notify(struct notifier_block *self, | |||
2702 | mod->num_ftrace_callsites); | 2699 | mod->num_ftrace_callsites); |
2703 | break; | 2700 | break; |
2704 | case MODULE_STATE_GOING: | 2701 | case MODULE_STATE_GOING: |
2705 | ftrace_release(mod->ftrace_callsites, | 2702 | ftrace_release_mod(mod); |
2706 | mod->ftrace_callsites + | ||
2707 | mod->num_ftrace_callsites); | ||
2708 | break; | 2703 | break; |
2709 | } | 2704 | } |
2710 | 2705 | ||
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index 81b1645c8549..a91da69f153a 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c | |||
@@ -501,7 +501,7 @@ static int __init init_kmem_tracer(void) | |||
501 | return 1; | 501 | return 1; |
502 | } | 502 | } |
503 | 503 | ||
504 | if (!register_tracer(&kmem_tracer)) { | 504 | if (register_tracer(&kmem_tracer) != 0) { |
505 | pr_warning("Warning: could not register the kmem tracer\n"); | 505 | pr_warning("Warning: could not register the kmem tracer\n"); |
506 | return 1; | 506 | return 1; |
507 | } | 507 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d4ff01970547..5dd017fea6f5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -483,7 +483,7 @@ struct ring_buffer_iter { | |||
483 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 483 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
484 | #define DEBUG_SHIFT 0 | 484 | #define DEBUG_SHIFT 0 |
485 | 485 | ||
486 | static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu) | 486 | static inline u64 rb_time_stamp(struct ring_buffer *buffer) |
487 | { | 487 | { |
488 | /* shift to debug/test normalization and TIME_EXTENTS */ | 488 | /* shift to debug/test normalization and TIME_EXTENTS */ |
489 | return buffer->clock() << DEBUG_SHIFT; | 489 | return buffer->clock() << DEBUG_SHIFT; |
@@ -494,7 +494,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) | |||
494 | u64 time; | 494 | u64 time; |
495 | 495 | ||
496 | preempt_disable_notrace(); | 496 | preempt_disable_notrace(); |
497 | time = rb_time_stamp(buffer, cpu); | 497 | time = rb_time_stamp(buffer); |
498 | preempt_enable_no_resched_notrace(); | 498 | preempt_enable_no_resched_notrace(); |
499 | 499 | ||
500 | return time; | 500 | return time; |
@@ -599,7 +599,7 @@ static struct list_head *rb_list_head(struct list_head *list) | |||
599 | } | 599 | } |
600 | 600 | ||
601 | /* | 601 | /* |
602 | * rb_is_head_page - test if the give page is the head page | 602 | * rb_is_head_page - test if the given page is the head page |
603 | * | 603 | * |
604 | * Because the reader may move the head_page pointer, we can | 604 | * Because the reader may move the head_page pointer, we can |
605 | * not trust what the head page is (it may be pointing to | 605 | * not trust what the head page is (it may be pointing to |
@@ -1193,6 +1193,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1193 | atomic_inc(&cpu_buffer->record_disabled); | 1193 | atomic_inc(&cpu_buffer->record_disabled); |
1194 | synchronize_sched(); | 1194 | synchronize_sched(); |
1195 | 1195 | ||
1196 | spin_lock_irq(&cpu_buffer->reader_lock); | ||
1196 | rb_head_page_deactivate(cpu_buffer); | 1197 | rb_head_page_deactivate(cpu_buffer); |
1197 | 1198 | ||
1198 | for (i = 0; i < nr_pages; i++) { | 1199 | for (i = 0; i < nr_pages; i++) { |
@@ -1207,6 +1208,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1207 | return; | 1208 | return; |
1208 | 1209 | ||
1209 | rb_reset_cpu(cpu_buffer); | 1210 | rb_reset_cpu(cpu_buffer); |
1211 | spin_unlock_irq(&cpu_buffer->reader_lock); | ||
1210 | 1212 | ||
1211 | rb_check_pages(cpu_buffer); | 1213 | rb_check_pages(cpu_buffer); |
1212 | 1214 | ||
@@ -1868,7 +1870,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1868 | * Nested commits always have zero deltas, so | 1870 | * Nested commits always have zero deltas, so |
1869 | * just reread the time stamp | 1871 | * just reread the time stamp |
1870 | */ | 1872 | */ |
1871 | *ts = rb_time_stamp(buffer, cpu_buffer->cpu); | 1873 | *ts = rb_time_stamp(buffer); |
1872 | next_page->page->time_stamp = *ts; | 1874 | next_page->page->time_stamp = *ts; |
1873 | } | 1875 | } |
1874 | 1876 | ||
@@ -2111,7 +2113,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
2111 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) | 2113 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) |
2112 | goto out_fail; | 2114 | goto out_fail; |
2113 | 2115 | ||
2114 | ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); | 2116 | ts = rb_time_stamp(cpu_buffer->buffer); |
2115 | 2117 | ||
2116 | /* | 2118 | /* |
2117 | * Only the first commit can update the timestamp. | 2119 | * Only the first commit can update the timestamp. |
@@ -2681,7 +2683,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) | |||
2681 | EXPORT_SYMBOL_GPL(ring_buffer_entries); | 2683 | EXPORT_SYMBOL_GPL(ring_buffer_entries); |
2682 | 2684 | ||
2683 | /** | 2685 | /** |
2684 | * ring_buffer_overrun_cpu - get the number of overruns in buffer | 2686 | * ring_buffer_overruns - get the number of overruns in buffer |
2685 | * @buffer: The ring buffer | 2687 | * @buffer: The ring buffer |
2686 | * | 2688 | * |
2687 | * Returns the total number of overruns in the ring buffer | 2689 | * Returns the total number of overruns in the ring buffer |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 45068269ebb1..b20d3ec75de9 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1393,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1393 | 1393 | ||
1394 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 1394 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) |
1395 | { | 1395 | { |
1396 | return trace_array_printk(&global_trace, ip, fmt, args); | 1396 | return trace_array_vprintk(&global_trace, ip, fmt, args); |
1397 | } | 1397 | } |
1398 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1398 | EXPORT_SYMBOL_GPL(trace_vprintk); |
1399 | 1399 | ||
@@ -2440,7 +2440,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2440 | return ret; | 2440 | return ret; |
2441 | } | 2441 | } |
2442 | 2442 | ||
2443 | filp->f_pos += cnt; | 2443 | *ppos += cnt; |
2444 | 2444 | ||
2445 | return cnt; | 2445 | return cnt; |
2446 | } | 2446 | } |
@@ -2582,7 +2582,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2582 | } | 2582 | } |
2583 | mutex_unlock(&trace_types_lock); | 2583 | mutex_unlock(&trace_types_lock); |
2584 | 2584 | ||
2585 | filp->f_pos += cnt; | 2585 | *ppos += cnt; |
2586 | 2586 | ||
2587 | return cnt; | 2587 | return cnt; |
2588 | } | 2588 | } |
@@ -2764,7 +2764,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2764 | if (err) | 2764 | if (err) |
2765 | return err; | 2765 | return err; |
2766 | 2766 | ||
2767 | filp->f_pos += ret; | 2767 | *ppos += ret; |
2768 | 2768 | ||
2769 | return ret; | 2769 | return ret; |
2770 | } | 2770 | } |
@@ -3299,7 +3299,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3299 | } | 3299 | } |
3300 | } | 3300 | } |
3301 | 3301 | ||
3302 | filp->f_pos += cnt; | 3302 | *ppos += cnt; |
3303 | 3303 | ||
3304 | /* If check pages failed, return ENOMEM */ | 3304 | /* If check pages failed, return ENOMEM */ |
3305 | if (tracing_disabled) | 3305 | if (tracing_disabled) |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 7a7a9fd249a9..4a194f08f88c 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -34,6 +34,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
34 | struct trace_array *tr = branch_tracer; | 34 | struct trace_array *tr = branch_tracer; |
35 | struct ring_buffer_event *event; | 35 | struct ring_buffer_event *event; |
36 | struct trace_branch *entry; | 36 | struct trace_branch *entry; |
37 | struct ring_buffer *buffer; | ||
37 | unsigned long flags; | 38 | unsigned long flags; |
38 | int cpu, pc; | 39 | int cpu, pc; |
39 | const char *p; | 40 | const char *p; |
@@ -54,7 +55,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
54 | goto out; | 55 | goto out; |
55 | 56 | ||
56 | pc = preempt_count(); | 57 | pc = preempt_count(); |
57 | event = trace_buffer_lock_reserve(tr, TRACE_BRANCH, | 58 | buffer = tr->buffer; |
59 | event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, | ||
58 | sizeof(*entry), flags, pc); | 60 | sizeof(*entry), flags, pc); |
59 | if (!event) | 61 | if (!event) |
60 | goto out; | 62 | goto out; |
@@ -74,8 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
74 | entry->line = f->line; | 76 | entry->line = f->line; |
75 | entry->correct = val == expect; | 77 | entry->correct = val == expect; |
76 | 78 | ||
77 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 79 | if (!filter_check_discard(call, entry, buffer, event)) |
78 | ring_buffer_unlock_commit(tr->buffer, event); | 80 | ring_buffer_unlock_commit(buffer, event); |
79 | 81 | ||
80 | out: | 82 | out: |
81 | atomic_dec(&tr->data[cpu]->disabled); | 83 | atomic_dec(&tr->data[cpu]->disabled); |
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index dd44b8768867..8d5c171cc998 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
@@ -31,7 +31,7 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) | |||
31 | if (atomic_inc_return(&event->profile_count)) | 31 | if (atomic_inc_return(&event->profile_count)) |
32 | return 0; | 32 | return 0; |
33 | 33 | ||
34 | if (!total_profile_count++) { | 34 | if (!total_profile_count) { |
35 | buf = (char *)alloc_percpu(profile_buf_t); | 35 | buf = (char *)alloc_percpu(profile_buf_t); |
36 | if (!buf) | 36 | if (!buf) |
37 | goto fail_buf; | 37 | goto fail_buf; |
@@ -46,14 +46,19 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) | |||
46 | } | 46 | } |
47 | 47 | ||
48 | ret = event->profile_enable(); | 48 | ret = event->profile_enable(); |
49 | if (!ret) | 49 | if (!ret) { |
50 | total_profile_count++; | ||
50 | return 0; | 51 | return 0; |
52 | } | ||
51 | 53 | ||
52 | kfree(trace_profile_buf_nmi); | ||
53 | fail_buf_nmi: | 54 | fail_buf_nmi: |
54 | kfree(trace_profile_buf); | 55 | if (!total_profile_count) { |
56 | free_percpu(trace_profile_buf_nmi); | ||
57 | free_percpu(trace_profile_buf); | ||
58 | trace_profile_buf_nmi = NULL; | ||
59 | trace_profile_buf = NULL; | ||
60 | } | ||
55 | fail_buf: | 61 | fail_buf: |
56 | total_profile_count--; | ||
57 | atomic_dec(&event->profile_count); | 62 | atomic_dec(&event->profile_count); |
58 | 63 | ||
59 | return ret; | 64 | return ret; |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 23245785927f..98a6cc5c64ed 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -933,8 +933,9 @@ static void postfix_clear(struct filter_parse_state *ps) | |||
933 | 933 | ||
934 | while (!list_empty(&ps->postfix)) { | 934 | while (!list_empty(&ps->postfix)) { |
935 | elt = list_first_entry(&ps->postfix, struct postfix_elt, list); | 935 | elt = list_first_entry(&ps->postfix, struct postfix_elt, list); |
936 | kfree(elt->operand); | ||
937 | list_del(&elt->list); | 936 | list_del(&elt->list); |
937 | kfree(elt->operand); | ||
938 | kfree(elt); | ||
938 | } | 939 | } |
939 | } | 940 | } |
940 | 941 | ||
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index 23b63859130e..69543a905cd5 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -165,6 +165,7 @@ void trace_hw_branch(u64 from, u64 to) | |||
165 | struct ftrace_event_call *call = &event_hw_branch; | 165 | struct ftrace_event_call *call = &event_hw_branch; |
166 | struct trace_array *tr = hw_branch_trace; | 166 | struct trace_array *tr = hw_branch_trace; |
167 | struct ring_buffer_event *event; | 167 | struct ring_buffer_event *event; |
168 | struct ring_buffer *buf; | ||
168 | struct hw_branch_entry *entry; | 169 | struct hw_branch_entry *entry; |
169 | unsigned long irq1; | 170 | unsigned long irq1; |
170 | int cpu; | 171 | int cpu; |
@@ -180,7 +181,8 @@ void trace_hw_branch(u64 from, u64 to) | |||
180 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | 181 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) |
181 | goto out; | 182 | goto out; |
182 | 183 | ||
183 | event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES, | 184 | buf = tr->buffer; |
185 | event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES, | ||
184 | sizeof(*entry), 0, 0); | 186 | sizeof(*entry), 0, 0); |
185 | if (!event) | 187 | if (!event) |
186 | goto out; | 188 | goto out; |
@@ -189,8 +191,8 @@ void trace_hw_branch(u64 from, u64 to) | |||
189 | entry->ent.type = TRACE_HW_BRANCHES; | 191 | entry->ent.type = TRACE_HW_BRANCHES; |
190 | entry->from = from; | 192 | entry->from = from; |
191 | entry->to = to; | 193 | entry->to = to; |
192 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 194 | if (!filter_check_discard(call, entry, buf, event)) |
193 | trace_buffer_unlock_commit(tr, event, 0, 0); | 195 | trace_buffer_unlock_commit(buf, event, 0, 0); |
194 | 196 | ||
195 | out: | 197 | out: |
196 | atomic_dec(&tr->data[cpu]->disabled); | 198 | atomic_dec(&tr->data[cpu]->disabled); |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index f572f44c6e1e..b6c12c6a1bcd 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -69,6 +69,9 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) | |||
69 | * @s: trace sequence descriptor | 69 | * @s: trace sequence descriptor |
70 | * @fmt: printf format string | 70 | * @fmt: printf format string |
71 | * | 71 | * |
72 | * It returns 0 if the trace oversizes the buffer's free | ||
73 | * space, 1 otherwise. | ||
74 | * | ||
72 | * The tracer may use either sequence operations or its own | 75 | * The tracer may use either sequence operations or its own |
73 | * copy to user routines. To simplify formating of a trace | 76 | * copy to user routines. To simplify formating of a trace |
74 | * trace_seq_printf is used to store strings into a special | 77 | * trace_seq_printf is used to store strings into a special |
@@ -95,7 +98,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | |||
95 | 98 | ||
96 | s->len += ret; | 99 | s->len += ret; |
97 | 100 | ||
98 | return len; | 101 | return 1; |
99 | } | 102 | } |
100 | EXPORT_SYMBOL_GPL(trace_seq_printf); | 103 | EXPORT_SYMBOL_GPL(trace_seq_printf); |
101 | 104 | ||
@@ -486,16 +489,18 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | |||
486 | hardirq ? 'h' : softirq ? 's' : '.')) | 489 | hardirq ? 'h' : softirq ? 's' : '.')) |
487 | return 0; | 490 | return 0; |
488 | 491 | ||
489 | if (entry->lock_depth < 0) | 492 | if (entry->preempt_count) |
490 | ret = trace_seq_putc(s, '.'); | 493 | ret = trace_seq_printf(s, "%x", entry->preempt_count); |
491 | else | 494 | else |
492 | ret = trace_seq_printf(s, "%d", entry->lock_depth); | 495 | ret = trace_seq_putc(s, '.'); |
496 | |||
493 | if (!ret) | 497 | if (!ret) |
494 | return 0; | 498 | return 0; |
495 | 499 | ||
496 | if (entry->preempt_count) | 500 | if (entry->lock_depth < 0) |
497 | return trace_seq_printf(s, "%x", entry->preempt_count); | 501 | return trace_seq_putc(s, '.'); |
498 | return trace_seq_putc(s, '.'); | 502 | |
503 | return trace_seq_printf(s, "%d", entry->lock_depth); | ||
499 | } | 504 | } |
500 | 505 | ||
501 | static int | 506 | static int |
@@ -883,7 +888,7 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S) | |||
883 | trace_assign_type(field, iter->ent); | 888 | trace_assign_type(field, iter->ent); |
884 | 889 | ||
885 | if (!S) | 890 | if (!S) |
886 | task_state_char(field->prev_state); | 891 | S = task_state_char(field->prev_state); |
887 | T = task_state_char(field->next_state); | 892 | T = task_state_char(field->next_state); |
888 | if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", | 893 | if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", |
889 | field->prev_pid, | 894 | field->prev_pid, |
@@ -918,7 +923,7 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S) | |||
918 | trace_assign_type(field, iter->ent); | 923 | trace_assign_type(field, iter->ent); |
919 | 924 | ||
920 | if (!S) | 925 | if (!S) |
921 | task_state_char(field->prev_state); | 926 | S = task_state_char(field->prev_state); |
922 | T = task_state_char(field->next_state); | 927 | T = task_state_char(field->next_state); |
923 | 928 | ||
924 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); | 929 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 9fbce6c9d2e1..527e17eae575 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -166,7 +166,7 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s) | |||
166 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | 166 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" |
167 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", | 167 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", |
168 | SYSCALL_FIELD(int, nr), | 168 | SYSCALL_FIELD(int, nr), |
169 | SYSCALL_FIELD(unsigned long, ret)); | 169 | SYSCALL_FIELD(long, ret)); |
170 | if (!ret) | 170 | if (!ret) |
171 | return 0; | 171 | return 0; |
172 | 172 | ||
@@ -212,7 +212,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call) | |||
212 | if (ret) | 212 | if (ret) |
213 | return ret; | 213 | return ret; |
214 | 214 | ||
215 | ret = trace_define_field(call, SYSCALL_FIELD(unsigned long, ret), 0, | 215 | ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0, |
216 | FILTER_OTHER); | 216 | FILTER_OTHER); |
217 | 217 | ||
218 | return ret; | 218 | return ret; |