diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-09-10 19:09:23 -0400 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2009-09-10 19:09:23 -0400 |
commit | 8f8ffe2485bcaa890800681451d380779cea06af (patch) | |
tree | 1d2ef3a27f1cab9a2b9014f4b75886a96a1ae8db /kernel/trace | |
parent | 70069577323e6f72b845166724f34b9858134437 (diff) | |
parent | d28daf923ac5e4a0d7cecebae56f3e339189366b (diff) |
Merge commit 'tracing/core' into tracing/kprobes
Conflicts:
kernel/trace/trace_export.c
kernel/trace/trace_kprobe.c
Merge reason: This topic branch lacks an important
build fix in tracing/core:
0dd7b74787eaf7858c6c573353a83c3e2766e674:
tracing: Fix double CPP substitution in TRACE_EVENT_FN
that prevents from multiple tracepoint headers inclusion crashes.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 9 | ||||
-rw-r--r-- | kernel/trace/blktrace.c | 24 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 17 | ||||
-rw-r--r-- | kernel/trace/kmemtrace.c | 4 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 172 | ||||
-rw-r--r-- | kernel/trace/trace.c | 387 | ||||
-rw-r--r-- | kernel/trace/trace.h | 28 | ||||
-rw-r--r-- | kernel/trace/trace_boot.c | 16 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 51 | ||||
-rw-r--r-- | kernel/trace/trace_export.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 16 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_power.c | 22 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 18 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 7 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 46 |
19 files changed, 543 insertions, 311 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index fb5fbf75f279..e78dcbde1a81 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -60,9 +60,14 @@ config EVENT_TRACING | |||
60 | bool | 60 | bool |
61 | 61 | ||
62 | config CONTEXT_SWITCH_TRACER | 62 | config CONTEXT_SWITCH_TRACER |
63 | select MARKERS | ||
64 | bool | 63 | bool |
65 | 64 | ||
65 | config RING_BUFFER_ALLOW_SWAP | ||
66 | bool | ||
67 | help | ||
68 | Allow the use of ring_buffer_swap_cpu. | ||
69 | Adds a very slight overhead to tracing when enabled. | ||
70 | |||
66 | # All tracer options should select GENERIC_TRACER. For those options that are | 71 | # All tracer options should select GENERIC_TRACER. For those options that are |
67 | # enabled by all tracers (context switch and event tracer) they select TRACING. | 72 | # enabled by all tracers (context switch and event tracer) they select TRACING. |
68 | # This allows those options to appear when no other tracer is selected. But the | 73 | # This allows those options to appear when no other tracer is selected. But the |
@@ -147,6 +152,7 @@ config IRQSOFF_TRACER | |||
147 | select TRACE_IRQFLAGS | 152 | select TRACE_IRQFLAGS |
148 | select GENERIC_TRACER | 153 | select GENERIC_TRACER |
149 | select TRACER_MAX_TRACE | 154 | select TRACER_MAX_TRACE |
155 | select RING_BUFFER_ALLOW_SWAP | ||
150 | help | 156 | help |
151 | This option measures the time spent in irqs-off critical | 157 | This option measures the time spent in irqs-off critical |
152 | sections, with microsecond accuracy. | 158 | sections, with microsecond accuracy. |
@@ -168,6 +174,7 @@ config PREEMPT_TRACER | |||
168 | depends on PREEMPT | 174 | depends on PREEMPT |
169 | select GENERIC_TRACER | 175 | select GENERIC_TRACER |
170 | select TRACER_MAX_TRACE | 176 | select TRACER_MAX_TRACE |
177 | select RING_BUFFER_ALLOW_SWAP | ||
171 | help | 178 | help |
172 | This option measures the time spent in preemption off critical | 179 | This option measures the time spent in preemption off critical |
173 | sections, with microsecond accuracy. | 180 | sections, with microsecond accuracy. |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 1090b0aed9ba..3eb159c277c8 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -65,13 +65,15 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action, | |||
65 | { | 65 | { |
66 | struct blk_io_trace *t; | 66 | struct blk_io_trace *t; |
67 | struct ring_buffer_event *event = NULL; | 67 | struct ring_buffer_event *event = NULL; |
68 | struct ring_buffer *buffer = NULL; | ||
68 | int pc = 0; | 69 | int pc = 0; |
69 | int cpu = smp_processor_id(); | 70 | int cpu = smp_processor_id(); |
70 | bool blk_tracer = blk_tracer_enabled; | 71 | bool blk_tracer = blk_tracer_enabled; |
71 | 72 | ||
72 | if (blk_tracer) { | 73 | if (blk_tracer) { |
74 | buffer = blk_tr->buffer; | ||
73 | pc = preempt_count(); | 75 | pc = preempt_count(); |
74 | event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK, | 76 | event = trace_buffer_lock_reserve(buffer, TRACE_BLK, |
75 | sizeof(*t) + len, | 77 | sizeof(*t) + len, |
76 | 0, pc); | 78 | 0, pc); |
77 | if (!event) | 79 | if (!event) |
@@ -96,7 +98,7 @@ record_it: | |||
96 | memcpy((void *) t + sizeof(*t), data, len); | 98 | memcpy((void *) t + sizeof(*t), data, len); |
97 | 99 | ||
98 | if (blk_tracer) | 100 | if (blk_tracer) |
99 | trace_buffer_unlock_commit(blk_tr, event, 0, pc); | 101 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
100 | } | 102 | } |
101 | } | 103 | } |
102 | 104 | ||
@@ -179,6 +181,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
179 | { | 181 | { |
180 | struct task_struct *tsk = current; | 182 | struct task_struct *tsk = current; |
181 | struct ring_buffer_event *event = NULL; | 183 | struct ring_buffer_event *event = NULL; |
184 | struct ring_buffer *buffer = NULL; | ||
182 | struct blk_io_trace *t; | 185 | struct blk_io_trace *t; |
183 | unsigned long flags = 0; | 186 | unsigned long flags = 0; |
184 | unsigned long *sequence; | 187 | unsigned long *sequence; |
@@ -204,8 +207,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
204 | if (blk_tracer) { | 207 | if (blk_tracer) { |
205 | tracing_record_cmdline(current); | 208 | tracing_record_cmdline(current); |
206 | 209 | ||
210 | buffer = blk_tr->buffer; | ||
207 | pc = preempt_count(); | 211 | pc = preempt_count(); |
208 | event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK, | 212 | event = trace_buffer_lock_reserve(buffer, TRACE_BLK, |
209 | sizeof(*t) + pdu_len, | 213 | sizeof(*t) + pdu_len, |
210 | 0, pc); | 214 | 0, pc); |
211 | if (!event) | 215 | if (!event) |
@@ -252,7 +256,7 @@ record_it: | |||
252 | memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); | 256 | memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); |
253 | 257 | ||
254 | if (blk_tracer) { | 258 | if (blk_tracer) { |
255 | trace_buffer_unlock_commit(blk_tr, event, 0, pc); | 259 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
256 | return; | 260 | return; |
257 | } | 261 | } |
258 | } | 262 | } |
@@ -267,8 +271,8 @@ static void blk_trace_free(struct blk_trace *bt) | |||
267 | { | 271 | { |
268 | debugfs_remove(bt->msg_file); | 272 | debugfs_remove(bt->msg_file); |
269 | debugfs_remove(bt->dropped_file); | 273 | debugfs_remove(bt->dropped_file); |
270 | debugfs_remove(bt->dir); | ||
271 | relay_close(bt->rchan); | 274 | relay_close(bt->rchan); |
275 | debugfs_remove(bt->dir); | ||
272 | free_percpu(bt->sequence); | 276 | free_percpu(bt->sequence); |
273 | free_percpu(bt->msg_data); | 277 | free_percpu(bt->msg_data); |
274 | kfree(bt); | 278 | kfree(bt); |
@@ -378,18 +382,8 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, | |||
378 | 382 | ||
379 | static int blk_remove_buf_file_callback(struct dentry *dentry) | 383 | static int blk_remove_buf_file_callback(struct dentry *dentry) |
380 | { | 384 | { |
381 | struct dentry *parent = dentry->d_parent; | ||
382 | debugfs_remove(dentry); | 385 | debugfs_remove(dentry); |
383 | 386 | ||
384 | /* | ||
385 | * this will fail for all but the last file, but that is ok. what we | ||
386 | * care about is the top level buts->name directory going away, when | ||
387 | * the last trace file is gone. Then we don't have to rmdir() that | ||
388 | * manually on trace stop, so it nicely solves the issue with | ||
389 | * force killing of running traces. | ||
390 | */ | ||
391 | |||
392 | debugfs_remove(parent); | ||
393 | return 0; | 387 | return 0; |
394 | } | 388 | } |
395 | 389 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1993b7186cdb..8c804e24f96f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2222,7 +2222,11 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2222 | read++; | 2222 | read++; |
2223 | cnt--; | 2223 | cnt--; |
2224 | 2224 | ||
2225 | if (!(iter->flags & ~FTRACE_ITER_CONT)) { | 2225 | /* |
2226 | * If the parser haven't finished with the last write, | ||
2227 | * continue reading the user input without skipping spaces. | ||
2228 | */ | ||
2229 | if (!(iter->flags & FTRACE_ITER_CONT)) { | ||
2226 | /* skip white space */ | 2230 | /* skip white space */ |
2227 | while (cnt && isspace(ch)) { | 2231 | while (cnt && isspace(ch)) { |
2228 | ret = get_user(ch, ubuf++); | 2232 | ret = get_user(ch, ubuf++); |
@@ -2232,8 +2236,9 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2232 | cnt--; | 2236 | cnt--; |
2233 | } | 2237 | } |
2234 | 2238 | ||
2239 | /* only spaces were written */ | ||
2235 | if (isspace(ch)) { | 2240 | if (isspace(ch)) { |
2236 | file->f_pos += read; | 2241 | *ppos += read; |
2237 | ret = read; | 2242 | ret = read; |
2238 | goto out; | 2243 | goto out; |
2239 | } | 2244 | } |
@@ -2262,12 +2267,12 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2262 | if (ret) | 2267 | if (ret) |
2263 | goto out; | 2268 | goto out; |
2264 | iter->buffer_idx = 0; | 2269 | iter->buffer_idx = 0; |
2265 | } else | 2270 | } else { |
2266 | iter->flags |= FTRACE_ITER_CONT; | 2271 | iter->flags |= FTRACE_ITER_CONT; |
2272 | iter->buffer[iter->buffer_idx++] = ch; | ||
2273 | } | ||
2267 | 2274 | ||
2268 | 2275 | *ppos += read; | |
2269 | file->f_pos += read; | ||
2270 | |||
2271 | ret = read; | 2276 | ret = read; |
2272 | out: | 2277 | out: |
2273 | mutex_unlock(&ftrace_regex_lock); | 2278 | mutex_unlock(&ftrace_regex_lock); |
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index dda53ccf749b..81b1645c8549 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c | |||
@@ -183,11 +183,9 @@ static void kmemtrace_stop_probes(void) | |||
183 | 183 | ||
184 | static int kmem_trace_init(struct trace_array *tr) | 184 | static int kmem_trace_init(struct trace_array *tr) |
185 | { | 185 | { |
186 | int cpu; | ||
187 | kmemtrace_array = tr; | 186 | kmemtrace_array = tr; |
188 | 187 | ||
189 | for_each_cpu(cpu, cpu_possible_mask) | 188 | tracing_reset_online_cpus(tr); |
190 | tracing_reset(tr, cpu); | ||
191 | 189 | ||
192 | kmemtrace_start_probes(); | 190 | kmemtrace_start_probes(); |
193 | 191 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index da2c59d8f486..454e74e718cf 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -218,17 +218,12 @@ enum { | |||
218 | 218 | ||
219 | static inline int rb_null_event(struct ring_buffer_event *event) | 219 | static inline int rb_null_event(struct ring_buffer_event *event) |
220 | { | 220 | { |
221 | return event->type_len == RINGBUF_TYPE_PADDING | 221 | return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; |
222 | && event->time_delta == 0; | ||
223 | } | ||
224 | |||
225 | static inline int rb_discarded_event(struct ring_buffer_event *event) | ||
226 | { | ||
227 | return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta; | ||
228 | } | 222 | } |
229 | 223 | ||
230 | static void rb_event_set_padding(struct ring_buffer_event *event) | 224 | static void rb_event_set_padding(struct ring_buffer_event *event) |
231 | { | 225 | { |
226 | /* padding has a NULL time_delta */ | ||
232 | event->type_len = RINGBUF_TYPE_PADDING; | 227 | event->type_len = RINGBUF_TYPE_PADDING; |
233 | event->time_delta = 0; | 228 | event->time_delta = 0; |
234 | } | 229 | } |
@@ -472,14 +467,19 @@ struct ring_buffer_iter { | |||
472 | }; | 467 | }; |
473 | 468 | ||
474 | /* buffer may be either ring_buffer or ring_buffer_per_cpu */ | 469 | /* buffer may be either ring_buffer or ring_buffer_per_cpu */ |
475 | #define RB_WARN_ON(buffer, cond) \ | 470 | #define RB_WARN_ON(b, cond) \ |
476 | ({ \ | 471 | ({ \ |
477 | int _____ret = unlikely(cond); \ | 472 | int _____ret = unlikely(cond); \ |
478 | if (_____ret) { \ | 473 | if (_____ret) { \ |
479 | atomic_inc(&buffer->record_disabled); \ | 474 | if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ |
480 | WARN_ON(1); \ | 475 | struct ring_buffer_per_cpu *__b = \ |
481 | } \ | 476 | (void *)b; \ |
482 | _____ret; \ | 477 | atomic_inc(&__b->buffer->record_disabled); \ |
478 | } else \ | ||
479 | atomic_inc(&b->record_disabled); \ | ||
480 | WARN_ON(1); \ | ||
481 | } \ | ||
482 | _____ret; \ | ||
483 | }) | 483 | }) |
484 | 484 | ||
485 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 485 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
@@ -1778,9 +1778,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1778 | event->type_len = RINGBUF_TYPE_PADDING; | 1778 | event->type_len = RINGBUF_TYPE_PADDING; |
1779 | /* time delta must be non zero */ | 1779 | /* time delta must be non zero */ |
1780 | event->time_delta = 1; | 1780 | event->time_delta = 1; |
1781 | /* Account for this as an entry */ | ||
1782 | local_inc(&tail_page->entries); | ||
1783 | local_inc(&cpu_buffer->entries); | ||
1784 | 1781 | ||
1785 | /* Set write to end of buffer */ | 1782 | /* Set write to end of buffer */ |
1786 | length = (tail + length) - BUF_PAGE_SIZE; | 1783 | length = (tail + length) - BUF_PAGE_SIZE; |
@@ -2076,7 +2073,8 @@ static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) | |||
2076 | } | 2073 | } |
2077 | 2074 | ||
2078 | static struct ring_buffer_event * | 2075 | static struct ring_buffer_event * |
2079 | rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | 2076 | rb_reserve_next_event(struct ring_buffer *buffer, |
2077 | struct ring_buffer_per_cpu *cpu_buffer, | ||
2080 | unsigned long length) | 2078 | unsigned long length) |
2081 | { | 2079 | { |
2082 | struct ring_buffer_event *event; | 2080 | struct ring_buffer_event *event; |
@@ -2086,6 +2084,21 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
2086 | 2084 | ||
2087 | rb_start_commit(cpu_buffer); | 2085 | rb_start_commit(cpu_buffer); |
2088 | 2086 | ||
2087 | #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP | ||
2088 | /* | ||
2089 | * Due to the ability to swap a cpu buffer from a buffer | ||
2090 | * it is possible it was swapped before we committed. | ||
2091 | * (committing stops a swap). We check for it here and | ||
2092 | * if it happened, we have to fail the write. | ||
2093 | */ | ||
2094 | barrier(); | ||
2095 | if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) { | ||
2096 | local_dec(&cpu_buffer->committing); | ||
2097 | local_dec(&cpu_buffer->commits); | ||
2098 | return NULL; | ||
2099 | } | ||
2100 | #endif | ||
2101 | |||
2089 | length = rb_calculate_event_length(length); | 2102 | length = rb_calculate_event_length(length); |
2090 | again: | 2103 | again: |
2091 | /* | 2104 | /* |
@@ -2246,7 +2259,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
2246 | if (length > BUF_MAX_DATA_SIZE) | 2259 | if (length > BUF_MAX_DATA_SIZE) |
2247 | goto out; | 2260 | goto out; |
2248 | 2261 | ||
2249 | event = rb_reserve_next_event(cpu_buffer, length); | 2262 | event = rb_reserve_next_event(buffer, cpu_buffer, length); |
2250 | if (!event) | 2263 | if (!event) |
2251 | goto out; | 2264 | goto out; |
2252 | 2265 | ||
@@ -2269,18 +2282,23 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
2269 | } | 2282 | } |
2270 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); | 2283 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); |
2271 | 2284 | ||
2272 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, | 2285 | static void |
2286 | rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, | ||
2273 | struct ring_buffer_event *event) | 2287 | struct ring_buffer_event *event) |
2274 | { | 2288 | { |
2275 | local_inc(&cpu_buffer->entries); | ||
2276 | |||
2277 | /* | 2289 | /* |
2278 | * The event first in the commit queue updates the | 2290 | * The event first in the commit queue updates the |
2279 | * time stamp. | 2291 | * time stamp. |
2280 | */ | 2292 | */ |
2281 | if (rb_event_is_commit(cpu_buffer, event)) | 2293 | if (rb_event_is_commit(cpu_buffer, event)) |
2282 | cpu_buffer->write_stamp += event->time_delta; | 2294 | cpu_buffer->write_stamp += event->time_delta; |
2295 | } | ||
2283 | 2296 | ||
2297 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, | ||
2298 | struct ring_buffer_event *event) | ||
2299 | { | ||
2300 | local_inc(&cpu_buffer->entries); | ||
2301 | rb_update_write_stamp(cpu_buffer, event); | ||
2284 | rb_end_commit(cpu_buffer); | 2302 | rb_end_commit(cpu_buffer); |
2285 | } | 2303 | } |
2286 | 2304 | ||
@@ -2327,32 +2345,57 @@ static inline void rb_event_discard(struct ring_buffer_event *event) | |||
2327 | event->time_delta = 1; | 2345 | event->time_delta = 1; |
2328 | } | 2346 | } |
2329 | 2347 | ||
2330 | /** | 2348 | /* |
2331 | * ring_buffer_event_discard - discard any event in the ring buffer | 2349 | * Decrement the entries to the page that an event is on. |
2332 | * @event: the event to discard | 2350 | * The event does not even need to exist, only the pointer |
2333 | * | 2351 | * to the page it is on. This may only be called before the commit |
2334 | * Sometimes a event that is in the ring buffer needs to be ignored. | 2352 | * takes place. |
2335 | * This function lets the user discard an event in the ring buffer | ||
2336 | * and then that event will not be read later. | ||
2337 | * | ||
2338 | * Note, it is up to the user to be careful with this, and protect | ||
2339 | * against races. If the user discards an event that has been consumed | ||
2340 | * it is possible that it could corrupt the ring buffer. | ||
2341 | */ | 2353 | */ |
2342 | void ring_buffer_event_discard(struct ring_buffer_event *event) | 2354 | static inline void |
2355 | rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, | ||
2356 | struct ring_buffer_event *event) | ||
2343 | { | 2357 | { |
2344 | rb_event_discard(event); | 2358 | unsigned long addr = (unsigned long)event; |
2359 | struct buffer_page *bpage = cpu_buffer->commit_page; | ||
2360 | struct buffer_page *start; | ||
2361 | |||
2362 | addr &= PAGE_MASK; | ||
2363 | |||
2364 | /* Do the likely case first */ | ||
2365 | if (likely(bpage->page == (void *)addr)) { | ||
2366 | local_dec(&bpage->entries); | ||
2367 | return; | ||
2368 | } | ||
2369 | |||
2370 | /* | ||
2371 | * Because the commit page may be on the reader page we | ||
2372 | * start with the next page and check the end loop there. | ||
2373 | */ | ||
2374 | rb_inc_page(cpu_buffer, &bpage); | ||
2375 | start = bpage; | ||
2376 | do { | ||
2377 | if (bpage->page == (void *)addr) { | ||
2378 | local_dec(&bpage->entries); | ||
2379 | return; | ||
2380 | } | ||
2381 | rb_inc_page(cpu_buffer, &bpage); | ||
2382 | } while (bpage != start); | ||
2383 | |||
2384 | /* commit not part of this buffer?? */ | ||
2385 | RB_WARN_ON(cpu_buffer, 1); | ||
2345 | } | 2386 | } |
2346 | EXPORT_SYMBOL_GPL(ring_buffer_event_discard); | ||
2347 | 2387 | ||
2348 | /** | 2388 | /** |
2349 | * ring_buffer_commit_discard - discard an event that has not been committed | 2389 | * ring_buffer_commit_discard - discard an event that has not been committed |
2350 | * @buffer: the ring buffer | 2390 | * @buffer: the ring buffer |
2351 | * @event: non committed event to discard | 2391 | * @event: non committed event to discard |
2352 | * | 2392 | * |
2353 | * This is similar to ring_buffer_event_discard but must only be | 2393 | * Sometimes an event that is in the ring buffer needs to be ignored. |
2354 | * performed on an event that has not been committed yet. The difference | 2394 | * This function lets the user discard an event in the ring buffer |
2355 | * is that this will also try to free the event from the ring buffer | 2395 | * and then that event will not be read later. |
2396 | * | ||
2397 | * This function only works if it is called before the the item has been | ||
2398 | * committed. It will try to free the event from the ring buffer | ||
2356 | * if another event has not been added behind it. | 2399 | * if another event has not been added behind it. |
2357 | * | 2400 | * |
2358 | * If another event has been added behind it, it will set the event | 2401 | * If another event has been added behind it, it will set the event |
@@ -2380,14 +2423,15 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, | |||
2380 | */ | 2423 | */ |
2381 | RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); | 2424 | RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); |
2382 | 2425 | ||
2426 | rb_decrement_entry(cpu_buffer, event); | ||
2383 | if (rb_try_to_discard(cpu_buffer, event)) | 2427 | if (rb_try_to_discard(cpu_buffer, event)) |
2384 | goto out; | 2428 | goto out; |
2385 | 2429 | ||
2386 | /* | 2430 | /* |
2387 | * The commit is still visible by the reader, so we | 2431 | * The commit is still visible by the reader, so we |
2388 | * must increment entries. | 2432 | * must still update the timestamp. |
2389 | */ | 2433 | */ |
2390 | local_inc(&cpu_buffer->entries); | 2434 | rb_update_write_stamp(cpu_buffer, event); |
2391 | out: | 2435 | out: |
2392 | rb_end_commit(cpu_buffer); | 2436 | rb_end_commit(cpu_buffer); |
2393 | 2437 | ||
@@ -2448,7 +2492,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
2448 | if (length > BUF_MAX_DATA_SIZE) | 2492 | if (length > BUF_MAX_DATA_SIZE) |
2449 | goto out; | 2493 | goto out; |
2450 | 2494 | ||
2451 | event = rb_reserve_next_event(cpu_buffer, length); | 2495 | event = rb_reserve_next_event(buffer, cpu_buffer, length); |
2452 | if (!event) | 2496 | if (!event) |
2453 | goto out; | 2497 | goto out; |
2454 | 2498 | ||
@@ -2899,8 +2943,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) | |||
2899 | 2943 | ||
2900 | event = rb_reader_event(cpu_buffer); | 2944 | event = rb_reader_event(cpu_buffer); |
2901 | 2945 | ||
2902 | if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX | 2946 | if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) |
2903 | || rb_discarded_event(event)) | ||
2904 | cpu_buffer->read++; | 2947 | cpu_buffer->read++; |
2905 | 2948 | ||
2906 | rb_update_read_stamp(cpu_buffer, event); | 2949 | rb_update_read_stamp(cpu_buffer, event); |
@@ -3132,10 +3175,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3132 | spin_unlock(&cpu_buffer->reader_lock); | 3175 | spin_unlock(&cpu_buffer->reader_lock); |
3133 | local_irq_restore(flags); | 3176 | local_irq_restore(flags); |
3134 | 3177 | ||
3135 | if (event && event->type_len == RINGBUF_TYPE_PADDING) { | 3178 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
3136 | cpu_relax(); | ||
3137 | goto again; | 3179 | goto again; |
3138 | } | ||
3139 | 3180 | ||
3140 | return event; | 3181 | return event; |
3141 | } | 3182 | } |
@@ -3160,10 +3201,8 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
3160 | event = rb_iter_peek(iter, ts); | 3201 | event = rb_iter_peek(iter, ts); |
3161 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3202 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3162 | 3203 | ||
3163 | if (event && event->type_len == RINGBUF_TYPE_PADDING) { | 3204 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
3164 | cpu_relax(); | ||
3165 | goto again; | 3205 | goto again; |
3166 | } | ||
3167 | 3206 | ||
3168 | return event; | 3207 | return event; |
3169 | } | 3208 | } |
@@ -3209,10 +3248,8 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3209 | out: | 3248 | out: |
3210 | preempt_enable(); | 3249 | preempt_enable(); |
3211 | 3250 | ||
3212 | if (event && event->type_len == RINGBUF_TYPE_PADDING) { | 3251 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
3213 | cpu_relax(); | ||
3214 | goto again; | 3252 | goto again; |
3215 | } | ||
3216 | 3253 | ||
3217 | return event; | 3254 | return event; |
3218 | } | 3255 | } |
@@ -3292,21 +3329,19 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | |||
3292 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 3329 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
3293 | unsigned long flags; | 3330 | unsigned long flags; |
3294 | 3331 | ||
3295 | again: | ||
3296 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3332 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3333 | again: | ||
3297 | event = rb_iter_peek(iter, ts); | 3334 | event = rb_iter_peek(iter, ts); |
3298 | if (!event) | 3335 | if (!event) |
3299 | goto out; | 3336 | goto out; |
3300 | 3337 | ||
3338 | if (event->type_len == RINGBUF_TYPE_PADDING) | ||
3339 | goto again; | ||
3340 | |||
3301 | rb_advance_iter(iter); | 3341 | rb_advance_iter(iter); |
3302 | out: | 3342 | out: |
3303 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3343 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3304 | 3344 | ||
3305 | if (event && event->type_len == RINGBUF_TYPE_PADDING) { | ||
3306 | cpu_relax(); | ||
3307 | goto again; | ||
3308 | } | ||
3309 | |||
3310 | return event; | 3345 | return event; |
3311 | } | 3346 | } |
3312 | EXPORT_SYMBOL_GPL(ring_buffer_read); | 3347 | EXPORT_SYMBOL_GPL(ring_buffer_read); |
@@ -3373,12 +3408,16 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
3373 | 3408 | ||
3374 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3409 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3375 | 3410 | ||
3411 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) | ||
3412 | goto out; | ||
3413 | |||
3376 | __raw_spin_lock(&cpu_buffer->lock); | 3414 | __raw_spin_lock(&cpu_buffer->lock); |
3377 | 3415 | ||
3378 | rb_reset_cpu(cpu_buffer); | 3416 | rb_reset_cpu(cpu_buffer); |
3379 | 3417 | ||
3380 | __raw_spin_unlock(&cpu_buffer->lock); | 3418 | __raw_spin_unlock(&cpu_buffer->lock); |
3381 | 3419 | ||
3420 | out: | ||
3382 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3421 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3383 | 3422 | ||
3384 | atomic_dec(&cpu_buffer->record_disabled); | 3423 | atomic_dec(&cpu_buffer->record_disabled); |
@@ -3461,6 +3500,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | |||
3461 | } | 3500 | } |
3462 | EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); | 3501 | EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); |
3463 | 3502 | ||
3503 | #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP | ||
3464 | /** | 3504 | /** |
3465 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers | 3505 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers |
3466 | * @buffer_a: One buffer to swap with | 3506 | * @buffer_a: One buffer to swap with |
@@ -3515,20 +3555,28 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
3515 | atomic_inc(&cpu_buffer_a->record_disabled); | 3555 | atomic_inc(&cpu_buffer_a->record_disabled); |
3516 | atomic_inc(&cpu_buffer_b->record_disabled); | 3556 | atomic_inc(&cpu_buffer_b->record_disabled); |
3517 | 3557 | ||
3558 | ret = -EBUSY; | ||
3559 | if (local_read(&cpu_buffer_a->committing)) | ||
3560 | goto out_dec; | ||
3561 | if (local_read(&cpu_buffer_b->committing)) | ||
3562 | goto out_dec; | ||
3563 | |||
3518 | buffer_a->buffers[cpu] = cpu_buffer_b; | 3564 | buffer_a->buffers[cpu] = cpu_buffer_b; |
3519 | buffer_b->buffers[cpu] = cpu_buffer_a; | 3565 | buffer_b->buffers[cpu] = cpu_buffer_a; |
3520 | 3566 | ||
3521 | cpu_buffer_b->buffer = buffer_a; | 3567 | cpu_buffer_b->buffer = buffer_a; |
3522 | cpu_buffer_a->buffer = buffer_b; | 3568 | cpu_buffer_a->buffer = buffer_b; |
3523 | 3569 | ||
3570 | ret = 0; | ||
3571 | |||
3572 | out_dec: | ||
3524 | atomic_dec(&cpu_buffer_a->record_disabled); | 3573 | atomic_dec(&cpu_buffer_a->record_disabled); |
3525 | atomic_dec(&cpu_buffer_b->record_disabled); | 3574 | atomic_dec(&cpu_buffer_b->record_disabled); |
3526 | |||
3527 | ret = 0; | ||
3528 | out: | 3575 | out: |
3529 | return ret; | 3576 | return ret; |
3530 | } | 3577 | } |
3531 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | 3578 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); |
3579 | #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ | ||
3532 | 3580 | ||
3533 | /** | 3581 | /** |
3534 | * ring_buffer_alloc_read_page - allocate a page to read from buffer | 3582 | * ring_buffer_alloc_read_page - allocate a page to read from buffer |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 63dbc7ff213f..5c75deeefe30 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -43,9 +43,6 @@ | |||
43 | 43 | ||
44 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) | 44 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) |
45 | 45 | ||
46 | unsigned long __read_mostly tracing_max_latency; | ||
47 | unsigned long __read_mostly tracing_thresh; | ||
48 | |||
49 | /* | 46 | /* |
50 | * On boot up, the ring buffer is set to the minimum size, so that | 47 | * On boot up, the ring buffer is set to the minimum size, so that |
51 | * we do not waste memory on systems that are not using tracing. | 48 | * we do not waste memory on systems that are not using tracing. |
@@ -172,10 +169,11 @@ static struct trace_array global_trace; | |||
172 | 169 | ||
173 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 170 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); |
174 | 171 | ||
175 | int filter_current_check_discard(struct ftrace_event_call *call, void *rec, | 172 | int filter_current_check_discard(struct ring_buffer *buffer, |
173 | struct ftrace_event_call *call, void *rec, | ||
176 | struct ring_buffer_event *event) | 174 | struct ring_buffer_event *event) |
177 | { | 175 | { |
178 | return filter_check_discard(call, rec, global_trace.buffer, event); | 176 | return filter_check_discard(call, rec, buffer, event); |
179 | } | 177 | } |
180 | EXPORT_SYMBOL_GPL(filter_current_check_discard); | 178 | EXPORT_SYMBOL_GPL(filter_current_check_discard); |
181 | 179 | ||
@@ -266,6 +264,9 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
266 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | | 264 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
267 | TRACE_ITER_GRAPH_TIME; | 265 | TRACE_ITER_GRAPH_TIME; |
268 | 266 | ||
267 | static int trace_stop_count; | ||
268 | static DEFINE_SPINLOCK(tracing_start_lock); | ||
269 | |||
269 | /** | 270 | /** |
270 | * trace_wake_up - wake up tasks waiting for trace input | 271 | * trace_wake_up - wake up tasks waiting for trace input |
271 | * | 272 | * |
@@ -338,45 +339,6 @@ static struct { | |||
338 | 339 | ||
339 | int trace_clock_id; | 340 | int trace_clock_id; |
340 | 341 | ||
341 | /* | ||
342 | * ftrace_max_lock is used to protect the swapping of buffers | ||
343 | * when taking a max snapshot. The buffers themselves are | ||
344 | * protected by per_cpu spinlocks. But the action of the swap | ||
345 | * needs its own lock. | ||
346 | * | ||
347 | * This is defined as a raw_spinlock_t in order to help | ||
348 | * with performance when lockdep debugging is enabled. | ||
349 | */ | ||
350 | static raw_spinlock_t ftrace_max_lock = | ||
351 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
352 | |||
353 | /* | ||
354 | * Copy the new maximum trace into the separate maximum-trace | ||
355 | * structure. (this way the maximum trace is permanently saved, | ||
356 | * for later retrieval via /sys/kernel/debug/tracing/latency_trace) | ||
357 | */ | ||
358 | static void | ||
359 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | ||
360 | { | ||
361 | struct trace_array_cpu *data = tr->data[cpu]; | ||
362 | |||
363 | max_tr.cpu = cpu; | ||
364 | max_tr.time_start = data->preempt_timestamp; | ||
365 | |||
366 | data = max_tr.data[cpu]; | ||
367 | data->saved_latency = tracing_max_latency; | ||
368 | |||
369 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | ||
370 | data->pid = tsk->pid; | ||
371 | data->uid = task_uid(tsk); | ||
372 | data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | ||
373 | data->policy = tsk->policy; | ||
374 | data->rt_priority = tsk->rt_priority; | ||
375 | |||
376 | /* record this tasks comm */ | ||
377 | tracing_record_cmdline(tsk); | ||
378 | } | ||
379 | |||
380 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | 342 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) |
381 | { | 343 | { |
382 | int len; | 344 | int len; |
@@ -420,6 +382,56 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
420 | return cnt; | 382 | return cnt; |
421 | } | 383 | } |
422 | 384 | ||
385 | /* | ||
386 | * ftrace_max_lock is used to protect the swapping of buffers | ||
387 | * when taking a max snapshot. The buffers themselves are | ||
388 | * protected by per_cpu spinlocks. But the action of the swap | ||
389 | * needs its own lock. | ||
390 | * | ||
391 | * This is defined as a raw_spinlock_t in order to help | ||
392 | * with performance when lockdep debugging is enabled. | ||
393 | * | ||
394 | * It is also used in other places outside the update_max_tr | ||
395 | * so it needs to be defined outside of the | ||
396 | * CONFIG_TRACER_MAX_TRACE. | ||
397 | */ | ||
398 | static raw_spinlock_t ftrace_max_lock = | ||
399 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
400 | |||
401 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
402 | unsigned long __read_mostly tracing_max_latency; | ||
403 | unsigned long __read_mostly tracing_thresh; | ||
404 | |||
405 | /* | ||
406 | * Copy the new maximum trace into the separate maximum-trace | ||
407 | * structure. (this way the maximum trace is permanently saved, | ||
408 | * for later retrieval via /sys/kernel/debug/tracing/latency_trace) | ||
409 | */ | ||
410 | static void | ||
411 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | ||
412 | { | ||
413 | struct trace_array_cpu *data = tr->data[cpu]; | ||
414 | struct trace_array_cpu *max_data = tr->data[cpu]; | ||
415 | |||
416 | max_tr.cpu = cpu; | ||
417 | max_tr.time_start = data->preempt_timestamp; | ||
418 | |||
419 | max_data = max_tr.data[cpu]; | ||
420 | max_data->saved_latency = tracing_max_latency; | ||
421 | max_data->critical_start = data->critical_start; | ||
422 | max_data->critical_end = data->critical_end; | ||
423 | |||
424 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | ||
425 | max_data->pid = tsk->pid; | ||
426 | max_data->uid = task_uid(tsk); | ||
427 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | ||
428 | max_data->policy = tsk->policy; | ||
429 | max_data->rt_priority = tsk->rt_priority; | ||
430 | |||
431 | /* record this tasks comm */ | ||
432 | tracing_record_cmdline(tsk); | ||
433 | } | ||
434 | |||
423 | /** | 435 | /** |
424 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr | 436 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr |
425 | * @tr: tracer | 437 | * @tr: tracer |
@@ -434,16 +446,15 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
434 | { | 446 | { |
435 | struct ring_buffer *buf = tr->buffer; | 447 | struct ring_buffer *buf = tr->buffer; |
436 | 448 | ||
449 | if (trace_stop_count) | ||
450 | return; | ||
451 | |||
437 | WARN_ON_ONCE(!irqs_disabled()); | 452 | WARN_ON_ONCE(!irqs_disabled()); |
438 | __raw_spin_lock(&ftrace_max_lock); | 453 | __raw_spin_lock(&ftrace_max_lock); |
439 | 454 | ||
440 | tr->buffer = max_tr.buffer; | 455 | tr->buffer = max_tr.buffer; |
441 | max_tr.buffer = buf; | 456 | max_tr.buffer = buf; |
442 | 457 | ||
443 | ftrace_disable_cpu(); | ||
444 | ring_buffer_reset(tr->buffer); | ||
445 | ftrace_enable_cpu(); | ||
446 | |||
447 | __update_max_tr(tr, tsk, cpu); | 458 | __update_max_tr(tr, tsk, cpu); |
448 | __raw_spin_unlock(&ftrace_max_lock); | 459 | __raw_spin_unlock(&ftrace_max_lock); |
449 | } | 460 | } |
@@ -461,21 +472,35 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
461 | { | 472 | { |
462 | int ret; | 473 | int ret; |
463 | 474 | ||
475 | if (trace_stop_count) | ||
476 | return; | ||
477 | |||
464 | WARN_ON_ONCE(!irqs_disabled()); | 478 | WARN_ON_ONCE(!irqs_disabled()); |
465 | __raw_spin_lock(&ftrace_max_lock); | 479 | __raw_spin_lock(&ftrace_max_lock); |
466 | 480 | ||
467 | ftrace_disable_cpu(); | 481 | ftrace_disable_cpu(); |
468 | 482 | ||
469 | ring_buffer_reset(max_tr.buffer); | ||
470 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); | 483 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); |
471 | 484 | ||
485 | if (ret == -EBUSY) { | ||
486 | /* | ||
487 | * We failed to swap the buffer due to a commit taking | ||
488 | * place on this CPU. We fail to record, but we reset | ||
489 | * the max trace buffer (no one writes directly to it) | ||
490 | * and flag that it failed. | ||
491 | */ | ||
492 | trace_array_printk(&max_tr, _THIS_IP_, | ||
493 | "Failed to swap buffers due to commit in progress\n"); | ||
494 | } | ||
495 | |||
472 | ftrace_enable_cpu(); | 496 | ftrace_enable_cpu(); |
473 | 497 | ||
474 | WARN_ON_ONCE(ret && ret != -EAGAIN); | 498 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
475 | 499 | ||
476 | __update_max_tr(tr, tsk, cpu); | 500 | __update_max_tr(tr, tsk, cpu); |
477 | __raw_spin_unlock(&ftrace_max_lock); | 501 | __raw_spin_unlock(&ftrace_max_lock); |
478 | } | 502 | } |
503 | #endif /* CONFIG_TRACER_MAX_TRACE */ | ||
479 | 504 | ||
480 | /** | 505 | /** |
481 | * register_tracer - register a tracer with the ftrace system. | 506 | * register_tracer - register a tracer with the ftrace system. |
@@ -532,7 +557,6 @@ __acquires(kernel_lock) | |||
532 | if (type->selftest && !tracing_selftest_disabled) { | 557 | if (type->selftest && !tracing_selftest_disabled) { |
533 | struct tracer *saved_tracer = current_trace; | 558 | struct tracer *saved_tracer = current_trace; |
534 | struct trace_array *tr = &global_trace; | 559 | struct trace_array *tr = &global_trace; |
535 | int i; | ||
536 | 560 | ||
537 | /* | 561 | /* |
538 | * Run a selftest on this tracer. | 562 | * Run a selftest on this tracer. |
@@ -541,8 +565,7 @@ __acquires(kernel_lock) | |||
541 | * internal tracing to verify that everything is in order. | 565 | * internal tracing to verify that everything is in order. |
542 | * If we fail, we do not register this tracer. | 566 | * If we fail, we do not register this tracer. |
543 | */ | 567 | */ |
544 | for_each_tracing_cpu(i) | 568 | tracing_reset_online_cpus(tr); |
545 | tracing_reset(tr, i); | ||
546 | 569 | ||
547 | current_trace = type; | 570 | current_trace = type; |
548 | /* the test is responsible for initializing and enabling */ | 571 | /* the test is responsible for initializing and enabling */ |
@@ -555,8 +578,7 @@ __acquires(kernel_lock) | |||
555 | goto out; | 578 | goto out; |
556 | } | 579 | } |
557 | /* Only reset on passing, to avoid touching corrupted buffers */ | 580 | /* Only reset on passing, to avoid touching corrupted buffers */ |
558 | for_each_tracing_cpu(i) | 581 | tracing_reset_online_cpus(tr); |
559 | tracing_reset(tr, i); | ||
560 | 582 | ||
561 | printk(KERN_CONT "PASSED\n"); | 583 | printk(KERN_CONT "PASSED\n"); |
562 | } | 584 | } |
@@ -631,21 +653,42 @@ void unregister_tracer(struct tracer *type) | |||
631 | mutex_unlock(&trace_types_lock); | 653 | mutex_unlock(&trace_types_lock); |
632 | } | 654 | } |
633 | 655 | ||
634 | void tracing_reset(struct trace_array *tr, int cpu) | 656 | static void __tracing_reset(struct trace_array *tr, int cpu) |
635 | { | 657 | { |
636 | ftrace_disable_cpu(); | 658 | ftrace_disable_cpu(); |
637 | ring_buffer_reset_cpu(tr->buffer, cpu); | 659 | ring_buffer_reset_cpu(tr->buffer, cpu); |
638 | ftrace_enable_cpu(); | 660 | ftrace_enable_cpu(); |
639 | } | 661 | } |
640 | 662 | ||
663 | void tracing_reset(struct trace_array *tr, int cpu) | ||
664 | { | ||
665 | struct ring_buffer *buffer = tr->buffer; | ||
666 | |||
667 | ring_buffer_record_disable(buffer); | ||
668 | |||
669 | /* Make sure all commits have finished */ | ||
670 | synchronize_sched(); | ||
671 | __tracing_reset(tr, cpu); | ||
672 | |||
673 | ring_buffer_record_enable(buffer); | ||
674 | } | ||
675 | |||
641 | void tracing_reset_online_cpus(struct trace_array *tr) | 676 | void tracing_reset_online_cpus(struct trace_array *tr) |
642 | { | 677 | { |
678 | struct ring_buffer *buffer = tr->buffer; | ||
643 | int cpu; | 679 | int cpu; |
644 | 680 | ||
681 | ring_buffer_record_disable(buffer); | ||
682 | |||
683 | /* Make sure all commits have finished */ | ||
684 | synchronize_sched(); | ||
685 | |||
645 | tr->time_start = ftrace_now(tr->cpu); | 686 | tr->time_start = ftrace_now(tr->cpu); |
646 | 687 | ||
647 | for_each_online_cpu(cpu) | 688 | for_each_online_cpu(cpu) |
648 | tracing_reset(tr, cpu); | 689 | __tracing_reset(tr, cpu); |
690 | |||
691 | ring_buffer_record_enable(buffer); | ||
649 | } | 692 | } |
650 | 693 | ||
651 | void tracing_reset_current(int cpu) | 694 | void tracing_reset_current(int cpu) |
@@ -676,9 +719,6 @@ static void trace_init_cmdlines(void) | |||
676 | cmdline_idx = 0; | 719 | cmdline_idx = 0; |
677 | } | 720 | } |
678 | 721 | ||
679 | static int trace_stop_count; | ||
680 | static DEFINE_SPINLOCK(tracing_start_lock); | ||
681 | |||
682 | /** | 722 | /** |
683 | * ftrace_off_permanent - disable all ftrace code permanently | 723 | * ftrace_off_permanent - disable all ftrace code permanently |
684 | * | 724 | * |
@@ -859,14 +899,15 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
859 | } | 899 | } |
860 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); | 900 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); |
861 | 901 | ||
862 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | 902 | struct ring_buffer_event * |
863 | int type, | 903 | trace_buffer_lock_reserve(struct ring_buffer *buffer, |
864 | unsigned long len, | 904 | int type, |
865 | unsigned long flags, int pc) | 905 | unsigned long len, |
906 | unsigned long flags, int pc) | ||
866 | { | 907 | { |
867 | struct ring_buffer_event *event; | 908 | struct ring_buffer_event *event; |
868 | 909 | ||
869 | event = ring_buffer_lock_reserve(tr->buffer, len); | 910 | event = ring_buffer_lock_reserve(buffer, len); |
870 | if (event != NULL) { | 911 | if (event != NULL) { |
871 | struct trace_entry *ent = ring_buffer_event_data(event); | 912 | struct trace_entry *ent = ring_buffer_event_data(event); |
872 | 913 | ||
@@ -877,53 +918,59 @@ struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | |||
877 | return event; | 918 | return event; |
878 | } | 919 | } |
879 | 920 | ||
880 | static inline void __trace_buffer_unlock_commit(struct trace_array *tr, | 921 | static inline void |
881 | struct ring_buffer_event *event, | 922 | __trace_buffer_unlock_commit(struct ring_buffer *buffer, |
882 | unsigned long flags, int pc, | 923 | struct ring_buffer_event *event, |
883 | int wake) | 924 | unsigned long flags, int pc, |
925 | int wake) | ||
884 | { | 926 | { |
885 | ring_buffer_unlock_commit(tr->buffer, event); | 927 | ring_buffer_unlock_commit(buffer, event); |
886 | 928 | ||
887 | ftrace_trace_stack(tr, flags, 6, pc); | 929 | ftrace_trace_stack(buffer, flags, 6, pc); |
888 | ftrace_trace_userstack(tr, flags, pc); | 930 | ftrace_trace_userstack(buffer, flags, pc); |
889 | 931 | ||
890 | if (wake) | 932 | if (wake) |
891 | trace_wake_up(); | 933 | trace_wake_up(); |
892 | } | 934 | } |
893 | 935 | ||
894 | void trace_buffer_unlock_commit(struct trace_array *tr, | 936 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, |
895 | struct ring_buffer_event *event, | 937 | struct ring_buffer_event *event, |
896 | unsigned long flags, int pc) | 938 | unsigned long flags, int pc) |
897 | { | 939 | { |
898 | __trace_buffer_unlock_commit(tr, event, flags, pc, 1); | 940 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); |
899 | } | 941 | } |
900 | 942 | ||
901 | struct ring_buffer_event * | 943 | struct ring_buffer_event * |
902 | trace_current_buffer_lock_reserve(int type, unsigned long len, | 944 | trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, |
945 | int type, unsigned long len, | ||
903 | unsigned long flags, int pc) | 946 | unsigned long flags, int pc) |
904 | { | 947 | { |
905 | return trace_buffer_lock_reserve(&global_trace, | 948 | *current_rb = global_trace.buffer; |
949 | return trace_buffer_lock_reserve(*current_rb, | ||
906 | type, len, flags, pc); | 950 | type, len, flags, pc); |
907 | } | 951 | } |
908 | EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); | 952 | EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); |
909 | 953 | ||
910 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | 954 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, |
955 | struct ring_buffer_event *event, | ||
911 | unsigned long flags, int pc) | 956 | unsigned long flags, int pc) |
912 | { | 957 | { |
913 | __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); | 958 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); |
914 | } | 959 | } |
915 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); | 960 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); |
916 | 961 | ||
917 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | 962 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, |
918 | unsigned long flags, int pc) | 963 | struct ring_buffer_event *event, |
964 | unsigned long flags, int pc) | ||
919 | { | 965 | { |
920 | __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); | 966 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 0); |
921 | } | 967 | } |
922 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); | 968 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); |
923 | 969 | ||
924 | void trace_current_buffer_discard_commit(struct ring_buffer_event *event) | 970 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, |
971 | struct ring_buffer_event *event) | ||
925 | { | 972 | { |
926 | ring_buffer_discard_commit(global_trace.buffer, event); | 973 | ring_buffer_discard_commit(buffer, event); |
927 | } | 974 | } |
928 | EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); | 975 | EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); |
929 | 976 | ||
@@ -933,6 +980,7 @@ trace_function(struct trace_array *tr, | |||
933 | int pc) | 980 | int pc) |
934 | { | 981 | { |
935 | struct ftrace_event_call *call = &event_function; | 982 | struct ftrace_event_call *call = &event_function; |
983 | struct ring_buffer *buffer = tr->buffer; | ||
936 | struct ring_buffer_event *event; | 984 | struct ring_buffer_event *event; |
937 | struct ftrace_entry *entry; | 985 | struct ftrace_entry *entry; |
938 | 986 | ||
@@ -940,7 +988,7 @@ trace_function(struct trace_array *tr, | |||
940 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 988 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
941 | return; | 989 | return; |
942 | 990 | ||
943 | event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry), | 991 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
944 | flags, pc); | 992 | flags, pc); |
945 | if (!event) | 993 | if (!event) |
946 | return; | 994 | return; |
@@ -948,8 +996,8 @@ trace_function(struct trace_array *tr, | |||
948 | entry->ip = ip; | 996 | entry->ip = ip; |
949 | entry->parent_ip = parent_ip; | 997 | entry->parent_ip = parent_ip; |
950 | 998 | ||
951 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 999 | if (!filter_check_discard(call, entry, buffer, event)) |
952 | ring_buffer_unlock_commit(tr->buffer, event); | 1000 | ring_buffer_unlock_commit(buffer, event); |
953 | } | 1001 | } |
954 | 1002 | ||
955 | void | 1003 | void |
@@ -962,7 +1010,7 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, | |||
962 | } | 1010 | } |
963 | 1011 | ||
964 | #ifdef CONFIG_STACKTRACE | 1012 | #ifdef CONFIG_STACKTRACE |
965 | static void __ftrace_trace_stack(struct trace_array *tr, | 1013 | static void __ftrace_trace_stack(struct ring_buffer *buffer, |
966 | unsigned long flags, | 1014 | unsigned long flags, |
967 | int skip, int pc) | 1015 | int skip, int pc) |
968 | { | 1016 | { |
@@ -971,7 +1019,7 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
971 | struct stack_entry *entry; | 1019 | struct stack_entry *entry; |
972 | struct stack_trace trace; | 1020 | struct stack_trace trace; |
973 | 1021 | ||
974 | event = trace_buffer_lock_reserve(tr, TRACE_STACK, | 1022 | event = trace_buffer_lock_reserve(buffer, TRACE_STACK, |
975 | sizeof(*entry), flags, pc); | 1023 | sizeof(*entry), flags, pc); |
976 | if (!event) | 1024 | if (!event) |
977 | return; | 1025 | return; |
@@ -984,26 +1032,27 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
984 | trace.entries = entry->caller; | 1032 | trace.entries = entry->caller; |
985 | 1033 | ||
986 | save_stack_trace(&trace); | 1034 | save_stack_trace(&trace); |
987 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1035 | if (!filter_check_discard(call, entry, buffer, event)) |
988 | ring_buffer_unlock_commit(tr->buffer, event); | 1036 | ring_buffer_unlock_commit(buffer, event); |
989 | } | 1037 | } |
990 | 1038 | ||
991 | void ftrace_trace_stack(struct trace_array *tr, unsigned long flags, int skip, | 1039 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, |
992 | int pc) | 1040 | int skip, int pc) |
993 | { | 1041 | { |
994 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | 1042 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) |
995 | return; | 1043 | return; |
996 | 1044 | ||
997 | __ftrace_trace_stack(tr, flags, skip, pc); | 1045 | __ftrace_trace_stack(buffer, flags, skip, pc); |
998 | } | 1046 | } |
999 | 1047 | ||
1000 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | 1048 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
1001 | int pc) | 1049 | int pc) |
1002 | { | 1050 | { |
1003 | __ftrace_trace_stack(tr, flags, skip, pc); | 1051 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); |
1004 | } | 1052 | } |
1005 | 1053 | ||
1006 | void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, int pc) | 1054 | void |
1055 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | ||
1007 | { | 1056 | { |
1008 | struct ftrace_event_call *call = &event_user_stack; | 1057 | struct ftrace_event_call *call = &event_user_stack; |
1009 | struct ring_buffer_event *event; | 1058 | struct ring_buffer_event *event; |
@@ -1013,7 +1062,7 @@ void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, int pc) | |||
1013 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 1062 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
1014 | return; | 1063 | return; |
1015 | 1064 | ||
1016 | event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK, | 1065 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
1017 | sizeof(*entry), flags, pc); | 1066 | sizeof(*entry), flags, pc); |
1018 | if (!event) | 1067 | if (!event) |
1019 | return; | 1068 | return; |
@@ -1027,8 +1076,8 @@ void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, int pc) | |||
1027 | trace.entries = entry->caller; | 1076 | trace.entries = entry->caller; |
1028 | 1077 | ||
1029 | save_stack_trace_user(&trace); | 1078 | save_stack_trace_user(&trace); |
1030 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1079 | if (!filter_check_discard(call, entry, buffer, event)) |
1031 | ring_buffer_unlock_commit(tr->buffer, event); | 1080 | ring_buffer_unlock_commit(buffer, event); |
1032 | } | 1081 | } |
1033 | 1082 | ||
1034 | #ifdef UNUSED | 1083 | #ifdef UNUSED |
@@ -1047,9 +1096,10 @@ ftrace_trace_special(void *__tr, | |||
1047 | { | 1096 | { |
1048 | struct ring_buffer_event *event; | 1097 | struct ring_buffer_event *event; |
1049 | struct trace_array *tr = __tr; | 1098 | struct trace_array *tr = __tr; |
1099 | struct ring_buffer *buffer = tr->buffer; | ||
1050 | struct special_entry *entry; | 1100 | struct special_entry *entry; |
1051 | 1101 | ||
1052 | event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL, | 1102 | event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL, |
1053 | sizeof(*entry), 0, pc); | 1103 | sizeof(*entry), 0, pc); |
1054 | if (!event) | 1104 | if (!event) |
1055 | return; | 1105 | return; |
@@ -1057,7 +1107,7 @@ ftrace_trace_special(void *__tr, | |||
1057 | entry->arg1 = arg1; | 1107 | entry->arg1 = arg1; |
1058 | entry->arg2 = arg2; | 1108 | entry->arg2 = arg2; |
1059 | entry->arg3 = arg3; | 1109 | entry->arg3 = arg3; |
1060 | trace_buffer_unlock_commit(tr, event, 0, pc); | 1110 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
1061 | } | 1111 | } |
1062 | 1112 | ||
1063 | void | 1113 | void |
@@ -1103,6 +1153,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1103 | 1153 | ||
1104 | struct ftrace_event_call *call = &event_bprint; | 1154 | struct ftrace_event_call *call = &event_bprint; |
1105 | struct ring_buffer_event *event; | 1155 | struct ring_buffer_event *event; |
1156 | struct ring_buffer *buffer; | ||
1106 | struct trace_array *tr = &global_trace; | 1157 | struct trace_array *tr = &global_trace; |
1107 | struct trace_array_cpu *data; | 1158 | struct trace_array_cpu *data; |
1108 | struct bprint_entry *entry; | 1159 | struct bprint_entry *entry; |
@@ -1135,7 +1186,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1135 | goto out_unlock; | 1186 | goto out_unlock; |
1136 | 1187 | ||
1137 | size = sizeof(*entry) + sizeof(u32) * len; | 1188 | size = sizeof(*entry) + sizeof(u32) * len; |
1138 | event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc); | 1189 | buffer = tr->buffer; |
1190 | event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, | ||
1191 | flags, pc); | ||
1139 | if (!event) | 1192 | if (!event) |
1140 | goto out_unlock; | 1193 | goto out_unlock; |
1141 | entry = ring_buffer_event_data(event); | 1194 | entry = ring_buffer_event_data(event); |
@@ -1143,8 +1196,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1143 | entry->fmt = fmt; | 1196 | entry->fmt = fmt; |
1144 | 1197 | ||
1145 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1198 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); |
1146 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1199 | if (!filter_check_discard(call, entry, buffer, event)) |
1147 | ring_buffer_unlock_commit(tr->buffer, event); | 1200 | ring_buffer_unlock_commit(buffer, event); |
1148 | 1201 | ||
1149 | out_unlock: | 1202 | out_unlock: |
1150 | __raw_spin_unlock(&trace_buf_lock); | 1203 | __raw_spin_unlock(&trace_buf_lock); |
@@ -1159,14 +1212,30 @@ out: | |||
1159 | } | 1212 | } |
1160 | EXPORT_SYMBOL_GPL(trace_vbprintk); | 1213 | EXPORT_SYMBOL_GPL(trace_vbprintk); |
1161 | 1214 | ||
1162 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 1215 | int trace_array_printk(struct trace_array *tr, |
1216 | unsigned long ip, const char *fmt, ...) | ||
1217 | { | ||
1218 | int ret; | ||
1219 | va_list ap; | ||
1220 | |||
1221 | if (!(trace_flags & TRACE_ITER_PRINTK)) | ||
1222 | return 0; | ||
1223 | |||
1224 | va_start(ap, fmt); | ||
1225 | ret = trace_array_vprintk(tr, ip, fmt, ap); | ||
1226 | va_end(ap); | ||
1227 | return ret; | ||
1228 | } | ||
1229 | |||
1230 | int trace_array_vprintk(struct trace_array *tr, | ||
1231 | unsigned long ip, const char *fmt, va_list args) | ||
1163 | { | 1232 | { |
1164 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 1233 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; |
1165 | static char trace_buf[TRACE_BUF_SIZE]; | 1234 | static char trace_buf[TRACE_BUF_SIZE]; |
1166 | 1235 | ||
1167 | struct ftrace_event_call *call = &event_print; | 1236 | struct ftrace_event_call *call = &event_print; |
1168 | struct ring_buffer_event *event; | 1237 | struct ring_buffer_event *event; |
1169 | struct trace_array *tr = &global_trace; | 1238 | struct ring_buffer *buffer; |
1170 | struct trace_array_cpu *data; | 1239 | struct trace_array_cpu *data; |
1171 | int cpu, len = 0, size, pc; | 1240 | int cpu, len = 0, size, pc; |
1172 | struct print_entry *entry; | 1241 | struct print_entry *entry; |
@@ -1194,7 +1263,9 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
1194 | trace_buf[len] = 0; | 1263 | trace_buf[len] = 0; |
1195 | 1264 | ||
1196 | size = sizeof(*entry) + len + 1; | 1265 | size = sizeof(*entry) + len + 1; |
1197 | event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); | 1266 | buffer = tr->buffer; |
1267 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | ||
1268 | irq_flags, pc); | ||
1198 | if (!event) | 1269 | if (!event) |
1199 | goto out_unlock; | 1270 | goto out_unlock; |
1200 | entry = ring_buffer_event_data(event); | 1271 | entry = ring_buffer_event_data(event); |
@@ -1202,8 +1273,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
1202 | 1273 | ||
1203 | memcpy(&entry->buf, trace_buf, len); | 1274 | memcpy(&entry->buf, trace_buf, len); |
1204 | entry->buf[len] = 0; | 1275 | entry->buf[len] = 0; |
1205 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1276 | if (!filter_check_discard(call, entry, buffer, event)) |
1206 | ring_buffer_unlock_commit(tr->buffer, event); | 1277 | ring_buffer_unlock_commit(buffer, event); |
1207 | 1278 | ||
1208 | out_unlock: | 1279 | out_unlock: |
1209 | __raw_spin_unlock(&trace_buf_lock); | 1280 | __raw_spin_unlock(&trace_buf_lock); |
@@ -1215,6 +1286,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
1215 | 1286 | ||
1216 | return len; | 1287 | return len; |
1217 | } | 1288 | } |
1289 | |||
1290 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | ||
1291 | { | ||
1292 | return trace_array_printk(&global_trace, ip, fmt, args); | ||
1293 | } | ||
1218 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1294 | EXPORT_SYMBOL_GPL(trace_vprintk); |
1219 | 1295 | ||
1220 | enum trace_file_type { | 1296 | enum trace_file_type { |
@@ -1354,6 +1430,37 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
1354 | return ent; | 1430 | return ent; |
1355 | } | 1431 | } |
1356 | 1432 | ||
1433 | static void tracing_iter_reset(struct trace_iterator *iter, int cpu) | ||
1434 | { | ||
1435 | struct trace_array *tr = iter->tr; | ||
1436 | struct ring_buffer_event *event; | ||
1437 | struct ring_buffer_iter *buf_iter; | ||
1438 | unsigned long entries = 0; | ||
1439 | u64 ts; | ||
1440 | |||
1441 | tr->data[cpu]->skipped_entries = 0; | ||
1442 | |||
1443 | if (!iter->buffer_iter[cpu]) | ||
1444 | return; | ||
1445 | |||
1446 | buf_iter = iter->buffer_iter[cpu]; | ||
1447 | ring_buffer_iter_reset(buf_iter); | ||
1448 | |||
1449 | /* | ||
1450 | * We could have the case with the max latency tracers | ||
1451 | * that a reset never took place on a cpu. This is evident | ||
1452 | * by the timestamp being before the start of the buffer. | ||
1453 | */ | ||
1454 | while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { | ||
1455 | if (ts >= iter->tr->time_start) | ||
1456 | break; | ||
1457 | entries++; | ||
1458 | ring_buffer_read(buf_iter, NULL); | ||
1459 | } | ||
1460 | |||
1461 | tr->data[cpu]->skipped_entries = entries; | ||
1462 | } | ||
1463 | |||
1357 | /* | 1464 | /* |
1358 | * No necessary locking here. The worst thing which can | 1465 | * No necessary locking here. The worst thing which can |
1359 | * happen is loosing events consumed at the same time | 1466 | * happen is loosing events consumed at the same time |
@@ -1392,10 +1499,9 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1392 | 1499 | ||
1393 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | 1500 | if (cpu_file == TRACE_PIPE_ALL_CPU) { |
1394 | for_each_tracing_cpu(cpu) | 1501 | for_each_tracing_cpu(cpu) |
1395 | ring_buffer_iter_reset(iter->buffer_iter[cpu]); | 1502 | tracing_iter_reset(iter, cpu); |
1396 | } else | 1503 | } else |
1397 | ring_buffer_iter_reset(iter->buffer_iter[cpu_file]); | 1504 | tracing_iter_reset(iter, cpu_file); |
1398 | |||
1399 | 1505 | ||
1400 | ftrace_enable_cpu(); | 1506 | ftrace_enable_cpu(); |
1401 | 1507 | ||
@@ -1444,16 +1550,32 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
1444 | struct trace_array *tr = iter->tr; | 1550 | struct trace_array *tr = iter->tr; |
1445 | struct trace_array_cpu *data = tr->data[tr->cpu]; | 1551 | struct trace_array_cpu *data = tr->data[tr->cpu]; |
1446 | struct tracer *type = current_trace; | 1552 | struct tracer *type = current_trace; |
1447 | unsigned long total; | 1553 | unsigned long entries = 0; |
1448 | unsigned long entries; | 1554 | unsigned long total = 0; |
1555 | unsigned long count; | ||
1449 | const char *name = "preemption"; | 1556 | const char *name = "preemption"; |
1557 | int cpu; | ||
1450 | 1558 | ||
1451 | if (type) | 1559 | if (type) |
1452 | name = type->name; | 1560 | name = type->name; |
1453 | 1561 | ||
1454 | entries = ring_buffer_entries(iter->tr->buffer); | 1562 | |
1455 | total = entries + | 1563 | for_each_tracing_cpu(cpu) { |
1456 | ring_buffer_overruns(iter->tr->buffer); | 1564 | count = ring_buffer_entries_cpu(tr->buffer, cpu); |
1565 | /* | ||
1566 | * If this buffer has skipped entries, then we hold all | ||
1567 | * entries for the trace and we need to ignore the | ||
1568 | * ones before the time stamp. | ||
1569 | */ | ||
1570 | if (tr->data[cpu]->skipped_entries) { | ||
1571 | count -= tr->data[cpu]->skipped_entries; | ||
1572 | /* total is the same as the entries */ | ||
1573 | total += count; | ||
1574 | } else | ||
1575 | total += count + | ||
1576 | ring_buffer_overrun_cpu(tr->buffer, cpu); | ||
1577 | entries += count; | ||
1578 | } | ||
1457 | 1579 | ||
1458 | seq_printf(m, "# %s latency trace v1.1.5 on %s\n", | 1580 | seq_printf(m, "# %s latency trace v1.1.5 on %s\n", |
1459 | name, UTS_RELEASE); | 1581 | name, UTS_RELEASE); |
@@ -1495,7 +1617,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
1495 | seq_puts(m, "\n# => ended at: "); | 1617 | seq_puts(m, "\n# => ended at: "); |
1496 | seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); | 1618 | seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); |
1497 | trace_print_seq(m, &iter->seq); | 1619 | trace_print_seq(m, &iter->seq); |
1498 | seq_puts(m, "#\n"); | 1620 | seq_puts(m, "\n#\n"); |
1499 | } | 1621 | } |
1500 | 1622 | ||
1501 | seq_puts(m, "#\n"); | 1623 | seq_puts(m, "#\n"); |
@@ -1514,6 +1636,9 @@ static void test_cpu_buff_start(struct trace_iterator *iter) | |||
1514 | if (cpumask_test_cpu(iter->cpu, iter->started)) | 1636 | if (cpumask_test_cpu(iter->cpu, iter->started)) |
1515 | return; | 1637 | return; |
1516 | 1638 | ||
1639 | if (iter->tr->data[iter->cpu]->skipped_entries) | ||
1640 | return; | ||
1641 | |||
1517 | cpumask_set_cpu(iter->cpu, iter->started); | 1642 | cpumask_set_cpu(iter->cpu, iter->started); |
1518 | 1643 | ||
1519 | /* Don't print started cpu buffer for the first entry of the trace */ | 1644 | /* Don't print started cpu buffer for the first entry of the trace */ |
@@ -1776,19 +1901,23 @@ __tracing_open(struct inode *inode, struct file *file) | |||
1776 | if (ring_buffer_overruns(iter->tr->buffer)) | 1901 | if (ring_buffer_overruns(iter->tr->buffer)) |
1777 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | 1902 | iter->iter_flags |= TRACE_FILE_ANNOTATE; |
1778 | 1903 | ||
1904 | /* stop the trace while dumping */ | ||
1905 | tracing_stop(); | ||
1906 | |||
1779 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 1907 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { |
1780 | for_each_tracing_cpu(cpu) { | 1908 | for_each_tracing_cpu(cpu) { |
1781 | 1909 | ||
1782 | iter->buffer_iter[cpu] = | 1910 | iter->buffer_iter[cpu] = |
1783 | ring_buffer_read_start(iter->tr->buffer, cpu); | 1911 | ring_buffer_read_start(iter->tr->buffer, cpu); |
1912 | tracing_iter_reset(iter, cpu); | ||
1784 | } | 1913 | } |
1785 | } else { | 1914 | } else { |
1786 | cpu = iter->cpu_file; | 1915 | cpu = iter->cpu_file; |
1787 | iter->buffer_iter[cpu] = | 1916 | iter->buffer_iter[cpu] = |
1788 | ring_buffer_read_start(iter->tr->buffer, cpu); | 1917 | ring_buffer_read_start(iter->tr->buffer, cpu); |
1918 | tracing_iter_reset(iter, cpu); | ||
1789 | } | 1919 | } |
1790 | 1920 | ||
1791 | /* TODO stop tracer */ | ||
1792 | ret = seq_open(file, &tracer_seq_ops); | 1921 | ret = seq_open(file, &tracer_seq_ops); |
1793 | if (ret < 0) { | 1922 | if (ret < 0) { |
1794 | fail_ret = ERR_PTR(ret); | 1923 | fail_ret = ERR_PTR(ret); |
@@ -1798,9 +1927,6 @@ __tracing_open(struct inode *inode, struct file *file) | |||
1798 | m = file->private_data; | 1927 | m = file->private_data; |
1799 | m->private = iter; | 1928 | m->private = iter; |
1800 | 1929 | ||
1801 | /* stop the trace while dumping */ | ||
1802 | tracing_stop(); | ||
1803 | |||
1804 | mutex_unlock(&trace_types_lock); | 1930 | mutex_unlock(&trace_types_lock); |
1805 | 1931 | ||
1806 | return iter; | 1932 | return iter; |
@@ -1811,6 +1937,7 @@ __tracing_open(struct inode *inode, struct file *file) | |||
1811 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | 1937 | ring_buffer_read_finish(iter->buffer_iter[cpu]); |
1812 | } | 1938 | } |
1813 | free_cpumask_var(iter->started); | 1939 | free_cpumask_var(iter->started); |
1940 | tracing_start(); | ||
1814 | fail: | 1941 | fail: |
1815 | mutex_unlock(&trace_types_lock); | 1942 | mutex_unlock(&trace_types_lock); |
1816 | kfree(iter->trace); | 1943 | kfree(iter->trace); |
@@ -3774,17 +3901,9 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
3774 | if (ret < 0) | 3901 | if (ret < 0) |
3775 | return ret; | 3902 | return ret; |
3776 | 3903 | ||
3777 | switch (val) { | 3904 | if (val != 0 && val != 1) |
3778 | case 0: | ||
3779 | trace_flags &= ~(1 << index); | ||
3780 | break; | ||
3781 | case 1: | ||
3782 | trace_flags |= 1 << index; | ||
3783 | break; | ||
3784 | |||
3785 | default: | ||
3786 | return -EINVAL; | 3905 | return -EINVAL; |
3787 | } | 3906 | set_tracer_flags(1 << index, val); |
3788 | 3907 | ||
3789 | *ppos += cnt; | 3908 | *ppos += cnt; |
3790 | 3909 | ||
@@ -3952,11 +4071,13 @@ static __init int tracer_init_debugfs(void) | |||
3952 | trace_create_file("current_tracer", 0644, d_tracer, | 4071 | trace_create_file("current_tracer", 0644, d_tracer, |
3953 | &global_trace, &set_tracer_fops); | 4072 | &global_trace, &set_tracer_fops); |
3954 | 4073 | ||
4074 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
3955 | trace_create_file("tracing_max_latency", 0644, d_tracer, | 4075 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
3956 | &tracing_max_latency, &tracing_max_lat_fops); | 4076 | &tracing_max_latency, &tracing_max_lat_fops); |
3957 | 4077 | ||
3958 | trace_create_file("tracing_thresh", 0644, d_tracer, | 4078 | trace_create_file("tracing_thresh", 0644, d_tracer, |
3959 | &tracing_thresh, &tracing_max_lat_fops); | 4079 | &tracing_thresh, &tracing_max_lat_fops); |
4080 | #endif | ||
3960 | 4081 | ||
3961 | trace_create_file("README", 0444, d_tracer, | 4082 | trace_create_file("README", 0444, d_tracer, |
3962 | NULL, &tracing_readme_fops); | 4083 | NULL, &tracing_readme_fops); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f5362a0529eb..821064914c80 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -258,9 +258,6 @@ struct trace_array_cpu { | |||
258 | atomic_t disabled; | 258 | atomic_t disabled; |
259 | void *buffer_page; /* ring buffer spare */ | 259 | void *buffer_page; /* ring buffer spare */ |
260 | 260 | ||
261 | /* these fields get copied into max-trace: */ | ||
262 | unsigned long trace_idx; | ||
263 | unsigned long overrun; | ||
264 | unsigned long saved_latency; | 261 | unsigned long saved_latency; |
265 | unsigned long critical_start; | 262 | unsigned long critical_start; |
266 | unsigned long critical_end; | 263 | unsigned long critical_end; |
@@ -268,6 +265,7 @@ struct trace_array_cpu { | |||
268 | unsigned long nice; | 265 | unsigned long nice; |
269 | unsigned long policy; | 266 | unsigned long policy; |
270 | unsigned long rt_priority; | 267 | unsigned long rt_priority; |
268 | unsigned long skipped_entries; | ||
271 | cycle_t preempt_timestamp; | 269 | cycle_t preempt_timestamp; |
272 | pid_t pid; | 270 | pid_t pid; |
273 | uid_t uid; | 271 | uid_t uid; |
@@ -441,12 +439,13 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | |||
441 | 439 | ||
442 | struct ring_buffer_event; | 440 | struct ring_buffer_event; |
443 | 441 | ||
444 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | 442 | struct ring_buffer_event * |
445 | int type, | 443 | trace_buffer_lock_reserve(struct ring_buffer *buffer, |
446 | unsigned long len, | 444 | int type, |
447 | unsigned long flags, | 445 | unsigned long len, |
448 | int pc); | 446 | unsigned long flags, |
449 | void trace_buffer_unlock_commit(struct trace_array *tr, | 447 | int pc); |
448 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, | ||
450 | struct ring_buffer_event *event, | 449 | struct ring_buffer_event *event, |
451 | unsigned long flags, int pc); | 450 | unsigned long flags, int pc); |
452 | 451 | ||
@@ -497,18 +496,20 @@ void unregister_tracer(struct tracer *type); | |||
497 | 496 | ||
498 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | 497 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
499 | 498 | ||
499 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
500 | extern unsigned long tracing_max_latency; | 500 | extern unsigned long tracing_max_latency; |
501 | extern unsigned long tracing_thresh; | 501 | extern unsigned long tracing_thresh; |
502 | 502 | ||
503 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | 503 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); |
504 | void update_max_tr_single(struct trace_array *tr, | 504 | void update_max_tr_single(struct trace_array *tr, |
505 | struct task_struct *tsk, int cpu); | 505 | struct task_struct *tsk, int cpu); |
506 | #endif /* CONFIG_TRACER_MAX_TRACE */ | ||
506 | 507 | ||
507 | #ifdef CONFIG_STACKTRACE | 508 | #ifdef CONFIG_STACKTRACE |
508 | void ftrace_trace_stack(struct trace_array *tr, unsigned long flags, | 509 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, |
509 | int skip, int pc); | 510 | int skip, int pc); |
510 | 511 | ||
511 | void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, | 512 | void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, |
512 | int pc); | 513 | int pc); |
513 | 514 | ||
514 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | 515 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
@@ -589,6 +590,11 @@ extern int | |||
589 | trace_vbprintk(unsigned long ip, const char *fmt, va_list args); | 590 | trace_vbprintk(unsigned long ip, const char *fmt, va_list args); |
590 | extern int | 591 | extern int |
591 | trace_vprintk(unsigned long ip, const char *fmt, va_list args); | 592 | trace_vprintk(unsigned long ip, const char *fmt, va_list args); |
593 | extern int | ||
594 | trace_array_vprintk(struct trace_array *tr, | ||
595 | unsigned long ip, const char *fmt, va_list args); | ||
596 | int trace_array_printk(struct trace_array *tr, | ||
597 | unsigned long ip, const char *fmt, ...); | ||
592 | 598 | ||
593 | extern unsigned long trace_flags; | 599 | extern unsigned long trace_flags; |
594 | 600 | ||
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index a29ef23ffb47..19bfc75d467e 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
@@ -41,14 +41,12 @@ void disable_boot_trace(void) | |||
41 | 41 | ||
42 | static int boot_trace_init(struct trace_array *tr) | 42 | static int boot_trace_init(struct trace_array *tr) |
43 | { | 43 | { |
44 | int cpu; | ||
45 | boot_trace = tr; | 44 | boot_trace = tr; |
46 | 45 | ||
47 | if (!tr) | 46 | if (!tr) |
48 | return 0; | 47 | return 0; |
49 | 48 | ||
50 | for_each_cpu(cpu, cpu_possible_mask) | 49 | tracing_reset_online_cpus(tr); |
51 | tracing_reset(tr, cpu); | ||
52 | 50 | ||
53 | tracing_sched_switch_assign_trace(tr); | 51 | tracing_sched_switch_assign_trace(tr); |
54 | return 0; | 52 | return 0; |
@@ -132,6 +130,7 @@ struct tracer boot_tracer __read_mostly = | |||
132 | void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | 130 | void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) |
133 | { | 131 | { |
134 | struct ring_buffer_event *event; | 132 | struct ring_buffer_event *event; |
133 | struct ring_buffer *buffer; | ||
135 | struct trace_boot_call *entry; | 134 | struct trace_boot_call *entry; |
136 | struct trace_array *tr = boot_trace; | 135 | struct trace_array *tr = boot_trace; |
137 | 136 | ||
@@ -144,13 +143,14 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | |||
144 | sprint_symbol(bt->func, (unsigned long)fn); | 143 | sprint_symbol(bt->func, (unsigned long)fn); |
145 | preempt_disable(); | 144 | preempt_disable(); |
146 | 145 | ||
147 | event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL, | 146 | buffer = tr->buffer; |
147 | event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_CALL, | ||
148 | sizeof(*entry), 0, 0); | 148 | sizeof(*entry), 0, 0); |
149 | if (!event) | 149 | if (!event) |
150 | goto out; | 150 | goto out; |
151 | entry = ring_buffer_event_data(event); | 151 | entry = ring_buffer_event_data(event); |
152 | entry->boot_call = *bt; | 152 | entry->boot_call = *bt; |
153 | trace_buffer_unlock_commit(tr, event, 0, 0); | 153 | trace_buffer_unlock_commit(buffer, event, 0, 0); |
154 | out: | 154 | out: |
155 | preempt_enable(); | 155 | preempt_enable(); |
156 | } | 156 | } |
@@ -158,6 +158,7 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | |||
158 | void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | 158 | void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) |
159 | { | 159 | { |
160 | struct ring_buffer_event *event; | 160 | struct ring_buffer_event *event; |
161 | struct ring_buffer *buffer; | ||
161 | struct trace_boot_ret *entry; | 162 | struct trace_boot_ret *entry; |
162 | struct trace_array *tr = boot_trace; | 163 | struct trace_array *tr = boot_trace; |
163 | 164 | ||
@@ -167,13 +168,14 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | |||
167 | sprint_symbol(bt->func, (unsigned long)fn); | 168 | sprint_symbol(bt->func, (unsigned long)fn); |
168 | preempt_disable(); | 169 | preempt_disable(); |
169 | 170 | ||
170 | event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET, | 171 | buffer = tr->buffer; |
172 | event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_RET, | ||
171 | sizeof(*entry), 0, 0); | 173 | sizeof(*entry), 0, 0); |
172 | if (!event) | 174 | if (!event) |
173 | goto out; | 175 | goto out; |
174 | entry = ring_buffer_event_data(event); | 176 | entry = ring_buffer_event_data(event); |
175 | entry->boot_ret = *bt; | 177 | entry->boot_ret = *bt; |
176 | trace_buffer_unlock_commit(tr, event, 0, 0); | 178 | trace_buffer_unlock_commit(buffer, event, 0, 0); |
177 | out: | 179 | out: |
178 | preempt_enable(); | 180 | preempt_enable(); |
179 | } | 181 | } |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 197cdaa96c43..ba3492076ab2 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -1485,6 +1485,7 @@ static void | |||
1485 | function_test_events_call(unsigned long ip, unsigned long parent_ip) | 1485 | function_test_events_call(unsigned long ip, unsigned long parent_ip) |
1486 | { | 1486 | { |
1487 | struct ring_buffer_event *event; | 1487 | struct ring_buffer_event *event; |
1488 | struct ring_buffer *buffer; | ||
1488 | struct ftrace_entry *entry; | 1489 | struct ftrace_entry *entry; |
1489 | unsigned long flags; | 1490 | unsigned long flags; |
1490 | long disabled; | 1491 | long disabled; |
@@ -1502,7 +1503,8 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
1502 | 1503 | ||
1503 | local_save_flags(flags); | 1504 | local_save_flags(flags); |
1504 | 1505 | ||
1505 | event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry), | 1506 | event = trace_current_buffer_lock_reserve(&buffer, |
1507 | TRACE_FN, sizeof(*entry), | ||
1506 | flags, pc); | 1508 | flags, pc); |
1507 | if (!event) | 1509 | if (!event) |
1508 | goto out; | 1510 | goto out; |
@@ -1510,7 +1512,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
1510 | entry->ip = ip; | 1512 | entry->ip = ip; |
1511 | entry->parent_ip = parent_ip; | 1513 | entry->parent_ip = parent_ip; |
1512 | 1514 | ||
1513 | trace_nowake_buffer_unlock_commit(event, flags, pc); | 1515 | trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); |
1514 | 1516 | ||
1515 | out: | 1517 | out: |
1516 | atomic_dec(&per_cpu(test_event_disable, cpu)); | 1518 | atomic_dec(&per_cpu(test_event_disable, cpu)); |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 9f03082c81d8..93660fbbf629 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -309,7 +309,7 @@ void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) | |||
309 | struct event_filter *filter = call->filter; | 309 | struct event_filter *filter = call->filter; |
310 | 310 | ||
311 | mutex_lock(&event_mutex); | 311 | mutex_lock(&event_mutex); |
312 | if (filter->filter_string) | 312 | if (filter && filter->filter_string) |
313 | trace_seq_printf(s, "%s\n", filter->filter_string); | 313 | trace_seq_printf(s, "%s\n", filter->filter_string); |
314 | else | 314 | else |
315 | trace_seq_printf(s, "none\n"); | 315 | trace_seq_printf(s, "none\n"); |
@@ -322,7 +322,7 @@ void print_subsystem_event_filter(struct event_subsystem *system, | |||
322 | struct event_filter *filter = system->filter; | 322 | struct event_filter *filter = system->filter; |
323 | 323 | ||
324 | mutex_lock(&event_mutex); | 324 | mutex_lock(&event_mutex); |
325 | if (filter->filter_string) | 325 | if (filter && filter->filter_string) |
326 | trace_seq_printf(s, "%s\n", filter->filter_string); | 326 | trace_seq_printf(s, "%s\n", filter->filter_string); |
327 | else | 327 | else |
328 | trace_seq_printf(s, "none\n"); | 328 | trace_seq_printf(s, "none\n"); |
@@ -390,6 +390,9 @@ void destroy_preds(struct ftrace_event_call *call) | |||
390 | struct event_filter *filter = call->filter; | 390 | struct event_filter *filter = call->filter; |
391 | int i; | 391 | int i; |
392 | 392 | ||
393 | if (!filter) | ||
394 | return; | ||
395 | |||
393 | for (i = 0; i < MAX_FILTER_PRED; i++) { | 396 | for (i = 0; i < MAX_FILTER_PRED; i++) { |
394 | if (filter->preds[i]) | 397 | if (filter->preds[i]) |
395 | filter_free_pred(filter->preds[i]); | 398 | filter_free_pred(filter->preds[i]); |
@@ -400,17 +403,19 @@ void destroy_preds(struct ftrace_event_call *call) | |||
400 | call->filter = NULL; | 403 | call->filter = NULL; |
401 | } | 404 | } |
402 | 405 | ||
403 | int init_preds(struct ftrace_event_call *call) | 406 | static int init_preds(struct ftrace_event_call *call) |
404 | { | 407 | { |
405 | struct event_filter *filter; | 408 | struct event_filter *filter; |
406 | struct filter_pred *pred; | 409 | struct filter_pred *pred; |
407 | int i; | 410 | int i; |
408 | 411 | ||
412 | if (call->filter) | ||
413 | return 0; | ||
414 | |||
409 | filter = call->filter = kzalloc(sizeof(*filter), GFP_KERNEL); | 415 | filter = call->filter = kzalloc(sizeof(*filter), GFP_KERNEL); |
410 | if (!call->filter) | 416 | if (!call->filter) |
411 | return -ENOMEM; | 417 | return -ENOMEM; |
412 | 418 | ||
413 | call->filter_active = 0; | ||
414 | filter->n_preds = 0; | 419 | filter->n_preds = 0; |
415 | 420 | ||
416 | filter->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred), GFP_KERNEL); | 421 | filter->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred), GFP_KERNEL); |
@@ -432,7 +437,26 @@ oom: | |||
432 | 437 | ||
433 | return -ENOMEM; | 438 | return -ENOMEM; |
434 | } | 439 | } |
435 | EXPORT_SYMBOL_GPL(init_preds); | 440 | |
441 | static int init_subsystem_preds(struct event_subsystem *system) | ||
442 | { | ||
443 | struct ftrace_event_call *call; | ||
444 | int err; | ||
445 | |||
446 | list_for_each_entry(call, &ftrace_events, list) { | ||
447 | if (!call->define_fields) | ||
448 | continue; | ||
449 | |||
450 | if (strcmp(call->system, system->name) != 0) | ||
451 | continue; | ||
452 | |||
453 | err = init_preds(call); | ||
454 | if (err) | ||
455 | return err; | ||
456 | } | ||
457 | |||
458 | return 0; | ||
459 | } | ||
436 | 460 | ||
437 | enum { | 461 | enum { |
438 | FILTER_DISABLE_ALL, | 462 | FILTER_DISABLE_ALL, |
@@ -449,6 +473,9 @@ static void filter_free_subsystem_preds(struct event_subsystem *system, | |||
449 | if (!call->define_fields) | 473 | if (!call->define_fields) |
450 | continue; | 474 | continue; |
451 | 475 | ||
476 | if (strcmp(call->system, system->name) != 0) | ||
477 | continue; | ||
478 | |||
452 | if (flag == FILTER_INIT_NO_RESET) { | 479 | if (flag == FILTER_INIT_NO_RESET) { |
453 | call->filter->no_reset = false; | 480 | call->filter->no_reset = false; |
454 | continue; | 481 | continue; |
@@ -457,10 +484,8 @@ static void filter_free_subsystem_preds(struct event_subsystem *system, | |||
457 | if (flag == FILTER_SKIP_NO_RESET && call->filter->no_reset) | 484 | if (flag == FILTER_SKIP_NO_RESET && call->filter->no_reset) |
458 | continue; | 485 | continue; |
459 | 486 | ||
460 | if (!strcmp(call->system, system->name)) { | 487 | filter_disable_preds(call); |
461 | filter_disable_preds(call); | 488 | remove_filter_string(call->filter); |
462 | remove_filter_string(call->filter); | ||
463 | } | ||
464 | } | 489 | } |
465 | } | 490 | } |
466 | 491 | ||
@@ -1094,6 +1119,10 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | |||
1094 | 1119 | ||
1095 | mutex_lock(&event_mutex); | 1120 | mutex_lock(&event_mutex); |
1096 | 1121 | ||
1122 | err = init_preds(call); | ||
1123 | if (err) | ||
1124 | goto out_unlock; | ||
1125 | |||
1097 | if (!strcmp(strstrip(filter_string), "0")) { | 1126 | if (!strcmp(strstrip(filter_string), "0")) { |
1098 | filter_disable_preds(call); | 1127 | filter_disable_preds(call); |
1099 | remove_filter_string(call->filter); | 1128 | remove_filter_string(call->filter); |
@@ -1139,6 +1168,10 @@ int apply_subsystem_event_filter(struct event_subsystem *system, | |||
1139 | 1168 | ||
1140 | mutex_lock(&event_mutex); | 1169 | mutex_lock(&event_mutex); |
1141 | 1170 | ||
1171 | err = init_subsystem_preds(system); | ||
1172 | if (err) | ||
1173 | goto out_unlock; | ||
1174 | |||
1142 | if (!strcmp(strstrip(filter_string), "0")) { | 1175 | if (!strcmp(strstrip(filter_string), "0")) { |
1143 | filter_free_subsystem_preds(system, FILTER_DISABLE_ALL); | 1176 | filter_free_subsystem_preds(system, FILTER_DISABLE_ALL); |
1144 | remove_filter_string(system->filter); | 1177 | remove_filter_string(system->filter); |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index f75faeccf68e..a79ef6f193c0 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -120,7 +120,7 @@ ftrace_format_##call(struct ftrace_event_call *unused, \ | |||
120 | static int ftrace_raw_init_event(struct ftrace_event_call *event_call) | 120 | static int ftrace_raw_init_event(struct ftrace_event_call *event_call) |
121 | { | 121 | { |
122 | INIT_LIST_HEAD(&event_call->fields); | 122 | INIT_LIST_HEAD(&event_call->fields); |
123 | init_preds(event_call); | 123 | |
124 | return 0; | 124 | return 0; |
125 | } | 125 | } |
126 | 126 | ||
@@ -137,7 +137,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
137 | .raw_init = ftrace_raw_init_event, \ | 137 | .raw_init = ftrace_raw_init_event, \ |
138 | .show_format = ftrace_format_##call, \ | 138 | .show_format = ftrace_format_##call, \ |
139 | .define_fields = ftrace_define_fields_##call, \ | 139 | .define_fields = ftrace_define_fields_##call, \ |
140 | }; | 140 | }; \ |
141 | 141 | ||
142 | #undef TRACE_EVENT_FORMAT_NOFILTER | 142 | #undef TRACE_EVENT_FORMAT_NOFILTER |
143 | #define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, \ | 143 | #define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, \ |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 3f4a251b7d16..b3749a2c3132 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -173,19 +173,20 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
173 | { | 173 | { |
174 | struct ftrace_event_call *call = &event_funcgraph_entry; | 174 | struct ftrace_event_call *call = &event_funcgraph_entry; |
175 | struct ring_buffer_event *event; | 175 | struct ring_buffer_event *event; |
176 | struct ring_buffer *buffer = tr->buffer; | ||
176 | struct ftrace_graph_ent_entry *entry; | 177 | struct ftrace_graph_ent_entry *entry; |
177 | 178 | ||
178 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 179 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
179 | return 0; | 180 | return 0; |
180 | 181 | ||
181 | event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_ENT, | 182 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
182 | sizeof(*entry), flags, pc); | 183 | sizeof(*entry), flags, pc); |
183 | if (!event) | 184 | if (!event) |
184 | return 0; | 185 | return 0; |
185 | entry = ring_buffer_event_data(event); | 186 | entry = ring_buffer_event_data(event); |
186 | entry->graph_ent = *trace; | 187 | entry->graph_ent = *trace; |
187 | if (!filter_current_check_discard(call, entry, event)) | 188 | if (!filter_current_check_discard(buffer, call, entry, event)) |
188 | ring_buffer_unlock_commit(tr->buffer, event); | 189 | ring_buffer_unlock_commit(buffer, event); |
189 | 190 | ||
190 | return 1; | 191 | return 1; |
191 | } | 192 | } |
@@ -236,19 +237,20 @@ static void __trace_graph_return(struct trace_array *tr, | |||
236 | { | 237 | { |
237 | struct ftrace_event_call *call = &event_funcgraph_exit; | 238 | struct ftrace_event_call *call = &event_funcgraph_exit; |
238 | struct ring_buffer_event *event; | 239 | struct ring_buffer_event *event; |
240 | struct ring_buffer *buffer = tr->buffer; | ||
239 | struct ftrace_graph_ret_entry *entry; | 241 | struct ftrace_graph_ret_entry *entry; |
240 | 242 | ||
241 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 243 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
242 | return; | 244 | return; |
243 | 245 | ||
244 | event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_RET, | 246 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
245 | sizeof(*entry), flags, pc); | 247 | sizeof(*entry), flags, pc); |
246 | if (!event) | 248 | if (!event) |
247 | return; | 249 | return; |
248 | entry = ring_buffer_event_data(event); | 250 | entry = ring_buffer_event_data(event); |
249 | entry->ret = *trace; | 251 | entry->ret = *trace; |
250 | if (!filter_current_check_discard(call, entry, event)) | 252 | if (!filter_current_check_discard(buffer, call, entry, event)) |
251 | ring_buffer_unlock_commit(tr->buffer, event); | 253 | ring_buffer_unlock_commit(buffer, event); |
252 | } | 254 | } |
253 | 255 | ||
254 | void trace_graph_return(struct ftrace_graph_ret *trace) | 256 | void trace_graph_return(struct ftrace_graph_ret *trace) |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index b923d13e2fad..5555b75a0d12 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -178,7 +178,6 @@ out_unlock: | |||
178 | out: | 178 | out: |
179 | data->critical_sequence = max_sequence; | 179 | data->critical_sequence = max_sequence; |
180 | data->preempt_timestamp = ftrace_now(cpu); | 180 | data->preempt_timestamp = ftrace_now(cpu); |
181 | tracing_reset(tr, cpu); | ||
182 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); | 181 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
183 | } | 182 | } |
184 | 183 | ||
@@ -208,7 +207,6 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
208 | data->critical_sequence = max_sequence; | 207 | data->critical_sequence = max_sequence; |
209 | data->preempt_timestamp = ftrace_now(cpu); | 208 | data->preempt_timestamp = ftrace_now(cpu); |
210 | data->critical_start = parent_ip ? : ip; | 209 | data->critical_start = parent_ip ? : ip; |
211 | tracing_reset(tr, cpu); | ||
212 | 210 | ||
213 | local_save_flags(flags); | 211 | local_save_flags(flags); |
214 | 212 | ||
@@ -379,6 +377,7 @@ static void __irqsoff_tracer_init(struct trace_array *tr) | |||
379 | irqsoff_trace = tr; | 377 | irqsoff_trace = tr; |
380 | /* make sure that the tracer is visible */ | 378 | /* make sure that the tracer is visible */ |
381 | smp_wmb(); | 379 | smp_wmb(); |
380 | tracing_reset_online_cpus(tr); | ||
382 | start_irqsoff_tracer(tr); | 381 | start_irqsoff_tracer(tr); |
383 | } | 382 | } |
384 | 383 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f4ec3fc87b2d..19a6de63b44b 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -819,6 +819,7 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
819 | struct trace_probe *tp = container_of(kp, struct trace_probe, kp); | 819 | struct trace_probe *tp = container_of(kp, struct trace_probe, kp); |
820 | struct kprobe_trace_entry *entry; | 820 | struct kprobe_trace_entry *entry; |
821 | struct ring_buffer_event *event; | 821 | struct ring_buffer_event *event; |
822 | struct ring_buffer *buffer; | ||
822 | int size, i, pc; | 823 | int size, i, pc; |
823 | unsigned long irq_flags; | 824 | unsigned long irq_flags; |
824 | struct ftrace_event_call *call = &tp->call; | 825 | struct ftrace_event_call *call = &tp->call; |
@@ -830,7 +831,7 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
830 | 831 | ||
831 | size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); | 832 | size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); |
832 | 833 | ||
833 | event = trace_current_buffer_lock_reserve(call->id, size, | 834 | event = trace_current_buffer_lock_reserve(&buffer, call->id, size, |
834 | irq_flags, pc); | 835 | irq_flags, pc); |
835 | if (!event) | 836 | if (!event) |
836 | return 0; | 837 | return 0; |
@@ -841,8 +842,8 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
841 | for (i = 0; i < tp->nr_args; i++) | 842 | for (i = 0; i < tp->nr_args; i++) |
842 | entry->args[i] = call_fetch(&tp->args[i], regs); | 843 | entry->args[i] = call_fetch(&tp->args[i], regs); |
843 | 844 | ||
844 | if (!filter_current_check_discard(call, entry, event)) | 845 | if (!filter_current_check_discard(buffer, call, entry, event)) |
845 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); | 846 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); |
846 | return 0; | 847 | return 0; |
847 | } | 848 | } |
848 | 849 | ||
@@ -853,6 +854,7 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, | |||
853 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | 854 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); |
854 | struct kretprobe_trace_entry *entry; | 855 | struct kretprobe_trace_entry *entry; |
855 | struct ring_buffer_event *event; | 856 | struct ring_buffer_event *event; |
857 | struct ring_buffer *buffer; | ||
856 | int size, i, pc; | 858 | int size, i, pc; |
857 | unsigned long irq_flags; | 859 | unsigned long irq_flags; |
858 | struct ftrace_event_call *call = &tp->call; | 860 | struct ftrace_event_call *call = &tp->call; |
@@ -862,7 +864,7 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, | |||
862 | 864 | ||
863 | size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); | 865 | size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); |
864 | 866 | ||
865 | event = trace_current_buffer_lock_reserve(call->id, size, | 867 | event = trace_current_buffer_lock_reserve(&buffer, call->id, size, |
866 | irq_flags, pc); | 868 | irq_flags, pc); |
867 | if (!event) | 869 | if (!event) |
868 | return 0; | 870 | return 0; |
@@ -874,8 +876,8 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, | |||
874 | for (i = 0; i < tp->nr_args; i++) | 876 | for (i = 0; i < tp->nr_args; i++) |
875 | entry->args[i] = call_fetch(&tp->args[i], regs); | 877 | entry->args[i] = call_fetch(&tp->args[i], regs); |
876 | 878 | ||
877 | if (!filter_current_check_discard(call, entry, event)) | 879 | if (!filter_current_check_discard(buffer, call, entry, event)) |
878 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); | 880 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); |
879 | 881 | ||
880 | return 0; | 882 | return 0; |
881 | } | 883 | } |
@@ -964,7 +966,7 @@ static void probe_event_disable(struct ftrace_event_call *call) | |||
964 | static int probe_event_raw_init(struct ftrace_event_call *event_call) | 966 | static int probe_event_raw_init(struct ftrace_event_call *event_call) |
965 | { | 967 | { |
966 | INIT_LIST_HEAD(&event_call->fields); | 968 | INIT_LIST_HEAD(&event_call->fields); |
967 | init_preds(event_call); | 969 | |
968 | return 0; | 970 | return 0; |
969 | } | 971 | } |
970 | 972 | ||
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index d53b45ed0806..c4c9bbda53d3 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -307,11 +307,12 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
307 | struct trace_array_cpu *data, | 307 | struct trace_array_cpu *data, |
308 | struct mmiotrace_rw *rw) | 308 | struct mmiotrace_rw *rw) |
309 | { | 309 | { |
310 | struct ring_buffer *buffer = tr->buffer; | ||
310 | struct ring_buffer_event *event; | 311 | struct ring_buffer_event *event; |
311 | struct trace_mmiotrace_rw *entry; | 312 | struct trace_mmiotrace_rw *entry; |
312 | int pc = preempt_count(); | 313 | int pc = preempt_count(); |
313 | 314 | ||
314 | event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW, | 315 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, |
315 | sizeof(*entry), 0, pc); | 316 | sizeof(*entry), 0, pc); |
316 | if (!event) { | 317 | if (!event) { |
317 | atomic_inc(&dropped_count); | 318 | atomic_inc(&dropped_count); |
@@ -319,7 +320,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
319 | } | 320 | } |
320 | entry = ring_buffer_event_data(event); | 321 | entry = ring_buffer_event_data(event); |
321 | entry->rw = *rw; | 322 | entry->rw = *rw; |
322 | trace_buffer_unlock_commit(tr, event, 0, pc); | 323 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
323 | } | 324 | } |
324 | 325 | ||
325 | void mmio_trace_rw(struct mmiotrace_rw *rw) | 326 | void mmio_trace_rw(struct mmiotrace_rw *rw) |
@@ -333,11 +334,12 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
333 | struct trace_array_cpu *data, | 334 | struct trace_array_cpu *data, |
334 | struct mmiotrace_map *map) | 335 | struct mmiotrace_map *map) |
335 | { | 336 | { |
337 | struct ring_buffer *buffer = tr->buffer; | ||
336 | struct ring_buffer_event *event; | 338 | struct ring_buffer_event *event; |
337 | struct trace_mmiotrace_map *entry; | 339 | struct trace_mmiotrace_map *entry; |
338 | int pc = preempt_count(); | 340 | int pc = preempt_count(); |
339 | 341 | ||
340 | event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP, | 342 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, |
341 | sizeof(*entry), 0, pc); | 343 | sizeof(*entry), 0, pc); |
342 | if (!event) { | 344 | if (!event) { |
343 | atomic_inc(&dropped_count); | 345 | atomic_inc(&dropped_count); |
@@ -345,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
345 | } | 347 | } |
346 | entry = ring_buffer_event_data(event); | 348 | entry = ring_buffer_event_data(event); |
347 | entry->map = *map; | 349 | entry->map = *map; |
348 | trace_buffer_unlock_commit(tr, event, 0, pc); | 350 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
349 | } | 351 | } |
350 | 352 | ||
351 | void mmio_trace_mapping(struct mmiotrace_map *map) | 353 | void mmio_trace_mapping(struct mmiotrace_map *map) |
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index 8a30d9874cd4..fe1a00f1445a 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c | |||
@@ -38,6 +38,7 @@ static void probe_power_end(struct power_trace *it) | |||
38 | { | 38 | { |
39 | struct ftrace_event_call *call = &event_power; | 39 | struct ftrace_event_call *call = &event_power; |
40 | struct ring_buffer_event *event; | 40 | struct ring_buffer_event *event; |
41 | struct ring_buffer *buffer; | ||
41 | struct trace_power *entry; | 42 | struct trace_power *entry; |
42 | struct trace_array_cpu *data; | 43 | struct trace_array_cpu *data; |
43 | struct trace_array *tr = power_trace; | 44 | struct trace_array *tr = power_trace; |
@@ -45,18 +46,20 @@ static void probe_power_end(struct power_trace *it) | |||
45 | if (!trace_power_enabled) | 46 | if (!trace_power_enabled) |
46 | return; | 47 | return; |
47 | 48 | ||
49 | buffer = tr->buffer; | ||
50 | |||
48 | preempt_disable(); | 51 | preempt_disable(); |
49 | it->end = ktime_get(); | 52 | it->end = ktime_get(); |
50 | data = tr->data[smp_processor_id()]; | 53 | data = tr->data[smp_processor_id()]; |
51 | 54 | ||
52 | event = trace_buffer_lock_reserve(tr, TRACE_POWER, | 55 | event = trace_buffer_lock_reserve(buffer, TRACE_POWER, |
53 | sizeof(*entry), 0, 0); | 56 | sizeof(*entry), 0, 0); |
54 | if (!event) | 57 | if (!event) |
55 | goto out; | 58 | goto out; |
56 | entry = ring_buffer_event_data(event); | 59 | entry = ring_buffer_event_data(event); |
57 | entry->state_data = *it; | 60 | entry->state_data = *it; |
58 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 61 | if (!filter_check_discard(call, entry, buffer, event)) |
59 | trace_buffer_unlock_commit(tr, event, 0, 0); | 62 | trace_buffer_unlock_commit(buffer, event, 0, 0); |
60 | out: | 63 | out: |
61 | preempt_enable(); | 64 | preempt_enable(); |
62 | } | 65 | } |
@@ -66,6 +69,7 @@ static void probe_power_mark(struct power_trace *it, unsigned int type, | |||
66 | { | 69 | { |
67 | struct ftrace_event_call *call = &event_power; | 70 | struct ftrace_event_call *call = &event_power; |
68 | struct ring_buffer_event *event; | 71 | struct ring_buffer_event *event; |
72 | struct ring_buffer *buffer; | ||
69 | struct trace_power *entry; | 73 | struct trace_power *entry; |
70 | struct trace_array_cpu *data; | 74 | struct trace_array_cpu *data; |
71 | struct trace_array *tr = power_trace; | 75 | struct trace_array *tr = power_trace; |
@@ -73,6 +77,8 @@ static void probe_power_mark(struct power_trace *it, unsigned int type, | |||
73 | if (!trace_power_enabled) | 77 | if (!trace_power_enabled) |
74 | return; | 78 | return; |
75 | 79 | ||
80 | buffer = tr->buffer; | ||
81 | |||
76 | memset(it, 0, sizeof(struct power_trace)); | 82 | memset(it, 0, sizeof(struct power_trace)); |
77 | it->state = level; | 83 | it->state = level; |
78 | it->type = type; | 84 | it->type = type; |
@@ -81,14 +87,14 @@ static void probe_power_mark(struct power_trace *it, unsigned int type, | |||
81 | it->end = it->stamp; | 87 | it->end = it->stamp; |
82 | data = tr->data[smp_processor_id()]; | 88 | data = tr->data[smp_processor_id()]; |
83 | 89 | ||
84 | event = trace_buffer_lock_reserve(tr, TRACE_POWER, | 90 | event = trace_buffer_lock_reserve(buffer, TRACE_POWER, |
85 | sizeof(*entry), 0, 0); | 91 | sizeof(*entry), 0, 0); |
86 | if (!event) | 92 | if (!event) |
87 | goto out; | 93 | goto out; |
88 | entry = ring_buffer_event_data(event); | 94 | entry = ring_buffer_event_data(event); |
89 | entry->state_data = *it; | 95 | entry->state_data = *it; |
90 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 96 | if (!filter_check_discard(call, entry, buffer, event)) |
91 | trace_buffer_unlock_commit(tr, event, 0, 0); | 97 | trace_buffer_unlock_commit(buffer, event, 0, 0); |
92 | out: | 98 | out: |
93 | preempt_enable(); | 99 | preempt_enable(); |
94 | } | 100 | } |
@@ -144,14 +150,12 @@ static void power_trace_reset(struct trace_array *tr) | |||
144 | 150 | ||
145 | static int power_trace_init(struct trace_array *tr) | 151 | static int power_trace_init(struct trace_array *tr) |
146 | { | 152 | { |
147 | int cpu; | ||
148 | power_trace = tr; | 153 | power_trace = tr; |
149 | 154 | ||
150 | trace_power_enabled = 1; | 155 | trace_power_enabled = 1; |
151 | tracing_power_register(); | 156 | tracing_power_register(); |
152 | 157 | ||
153 | for_each_cpu(cpu, cpu_possible_mask) | 158 | tracing_reset_online_cpus(tr); |
154 | tracing_reset(tr, cpu); | ||
155 | return 0; | 159 | return 0; |
156 | } | 160 | } |
157 | 161 | ||
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index e1285d7b5488..5fca0f51fde4 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -28,10 +28,11 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
28 | unsigned long flags, int pc) | 28 | unsigned long flags, int pc) |
29 | { | 29 | { |
30 | struct ftrace_event_call *call = &event_context_switch; | 30 | struct ftrace_event_call *call = &event_context_switch; |
31 | struct ring_buffer *buffer = tr->buffer; | ||
31 | struct ring_buffer_event *event; | 32 | struct ring_buffer_event *event; |
32 | struct ctx_switch_entry *entry; | 33 | struct ctx_switch_entry *entry; |
33 | 34 | ||
34 | event = trace_buffer_lock_reserve(tr, TRACE_CTX, | 35 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, |
35 | sizeof(*entry), flags, pc); | 36 | sizeof(*entry), flags, pc); |
36 | if (!event) | 37 | if (!event) |
37 | return; | 38 | return; |
@@ -44,8 +45,8 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
44 | entry->next_state = next->state; | 45 | entry->next_state = next->state; |
45 | entry->next_cpu = task_cpu(next); | 46 | entry->next_cpu = task_cpu(next); |
46 | 47 | ||
47 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 48 | if (!filter_check_discard(call, entry, buffer, event)) |
48 | trace_buffer_unlock_commit(tr, event, flags, pc); | 49 | trace_buffer_unlock_commit(buffer, event, flags, pc); |
49 | } | 50 | } |
50 | 51 | ||
51 | static void | 52 | static void |
@@ -86,8 +87,9 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
86 | struct ftrace_event_call *call = &event_wakeup; | 87 | struct ftrace_event_call *call = &event_wakeup; |
87 | struct ring_buffer_event *event; | 88 | struct ring_buffer_event *event; |
88 | struct ctx_switch_entry *entry; | 89 | struct ctx_switch_entry *entry; |
90 | struct ring_buffer *buffer = tr->buffer; | ||
89 | 91 | ||
90 | event = trace_buffer_lock_reserve(tr, TRACE_WAKE, | 92 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, |
91 | sizeof(*entry), flags, pc); | 93 | sizeof(*entry), flags, pc); |
92 | if (!event) | 94 | if (!event) |
93 | return; | 95 | return; |
@@ -100,10 +102,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
100 | entry->next_state = wakee->state; | 102 | entry->next_state = wakee->state; |
101 | entry->next_cpu = task_cpu(wakee); | 103 | entry->next_cpu = task_cpu(wakee); |
102 | 104 | ||
103 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 105 | if (!filter_check_discard(call, entry, buffer, event)) |
104 | ring_buffer_unlock_commit(tr->buffer, event); | 106 | ring_buffer_unlock_commit(buffer, event); |
105 | ftrace_trace_stack(tr, flags, 6, pc); | 107 | ftrace_trace_stack(tr->buffer, flags, 6, pc); |
106 | ftrace_trace_userstack(tr, flags, pc); | 108 | ftrace_trace_userstack(tr->buffer, flags, pc); |
107 | } | 109 | } |
108 | 110 | ||
109 | static void | 111 | static void |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index eacb27225173..ad69f105a7c6 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -186,11 +186,6 @@ out: | |||
186 | 186 | ||
187 | static void __wakeup_reset(struct trace_array *tr) | 187 | static void __wakeup_reset(struct trace_array *tr) |
188 | { | 188 | { |
189 | int cpu; | ||
190 | |||
191 | for_each_possible_cpu(cpu) | ||
192 | tracing_reset(tr, cpu); | ||
193 | |||
194 | wakeup_cpu = -1; | 189 | wakeup_cpu = -1; |
195 | wakeup_prio = -1; | 190 | wakeup_prio = -1; |
196 | 191 | ||
@@ -204,6 +199,8 @@ static void wakeup_reset(struct trace_array *tr) | |||
204 | { | 199 | { |
205 | unsigned long flags; | 200 | unsigned long flags; |
206 | 201 | ||
202 | tracing_reset_online_cpus(tr); | ||
203 | |||
207 | local_irq_save(flags); | 204 | local_irq_save(flags); |
208 | __raw_spin_lock(&wakeup_lock); | 205 | __raw_spin_lock(&wakeup_lock); |
209 | __wakeup_reset(tr); | 206 | __wakeup_reset(tr); |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index e7c676e50a7f..dfc55fed2099 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -11,8 +11,8 @@ | |||
11 | static DEFINE_MUTEX(syscall_trace_lock); | 11 | static DEFINE_MUTEX(syscall_trace_lock); |
12 | static int sys_refcount_enter; | 12 | static int sys_refcount_enter; |
13 | static int sys_refcount_exit; | 13 | static int sys_refcount_exit; |
14 | static DECLARE_BITMAP(enabled_enter_syscalls, FTRACE_SYSCALL_MAX); | 14 | static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); |
15 | static DECLARE_BITMAP(enabled_exit_syscalls, FTRACE_SYSCALL_MAX); | 15 | static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); |
16 | 16 | ||
17 | enum print_line_t | 17 | enum print_line_t |
18 | print_syscall_enter(struct trace_iterator *iter, int flags) | 18 | print_syscall_enter(struct trace_iterator *iter, int flags) |
@@ -223,10 +223,13 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id) | |||
223 | struct syscall_trace_enter *entry; | 223 | struct syscall_trace_enter *entry; |
224 | struct syscall_metadata *sys_data; | 224 | struct syscall_metadata *sys_data; |
225 | struct ring_buffer_event *event; | 225 | struct ring_buffer_event *event; |
226 | struct ring_buffer *buffer; | ||
226 | int size; | 227 | int size; |
227 | int syscall_nr; | 228 | int syscall_nr; |
228 | 229 | ||
229 | syscall_nr = syscall_get_nr(current, regs); | 230 | syscall_nr = syscall_get_nr(current, regs); |
231 | if (syscall_nr < 0) | ||
232 | return; | ||
230 | if (!test_bit(syscall_nr, enabled_enter_syscalls)) | 233 | if (!test_bit(syscall_nr, enabled_enter_syscalls)) |
231 | return; | 234 | return; |
232 | 235 | ||
@@ -236,8 +239,8 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id) | |||
236 | 239 | ||
237 | size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; | 240 | size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; |
238 | 241 | ||
239 | event = trace_current_buffer_lock_reserve(sys_data->enter_id, size, | 242 | event = trace_current_buffer_lock_reserve(&buffer, sys_data->enter_id, |
240 | 0, 0); | 243 | size, 0, 0); |
241 | if (!event) | 244 | if (!event) |
242 | return; | 245 | return; |
243 | 246 | ||
@@ -245,8 +248,9 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id) | |||
245 | entry->nr = syscall_nr; | 248 | entry->nr = syscall_nr; |
246 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); | 249 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); |
247 | 250 | ||
248 | if (!filter_current_check_discard(sys_data->enter_event, entry, event)) | 251 | if (!filter_current_check_discard(buffer, sys_data->enter_event, |
249 | trace_current_buffer_unlock_commit(event, 0, 0); | 252 | entry, event)) |
253 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); | ||
250 | } | 254 | } |
251 | 255 | ||
252 | void ftrace_syscall_exit(struct pt_regs *regs, long ret) | 256 | void ftrace_syscall_exit(struct pt_regs *regs, long ret) |
@@ -254,9 +258,12 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret) | |||
254 | struct syscall_trace_exit *entry; | 258 | struct syscall_trace_exit *entry; |
255 | struct syscall_metadata *sys_data; | 259 | struct syscall_metadata *sys_data; |
256 | struct ring_buffer_event *event; | 260 | struct ring_buffer_event *event; |
261 | struct ring_buffer *buffer; | ||
257 | int syscall_nr; | 262 | int syscall_nr; |
258 | 263 | ||
259 | syscall_nr = syscall_get_nr(current, regs); | 264 | syscall_nr = syscall_get_nr(current, regs); |
265 | if (syscall_nr < 0) | ||
266 | return; | ||
260 | if (!test_bit(syscall_nr, enabled_exit_syscalls)) | 267 | if (!test_bit(syscall_nr, enabled_exit_syscalls)) |
261 | return; | 268 | return; |
262 | 269 | ||
@@ -264,7 +271,7 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret) | |||
264 | if (!sys_data) | 271 | if (!sys_data) |
265 | return; | 272 | return; |
266 | 273 | ||
267 | event = trace_current_buffer_lock_reserve(sys_data->exit_id, | 274 | event = trace_current_buffer_lock_reserve(&buffer, sys_data->exit_id, |
268 | sizeof(*entry), 0, 0); | 275 | sizeof(*entry), 0, 0); |
269 | if (!event) | 276 | if (!event) |
270 | return; | 277 | return; |
@@ -273,8 +280,9 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret) | |||
273 | entry->nr = syscall_nr; | 280 | entry->nr = syscall_nr; |
274 | entry->ret = syscall_get_return_value(current, regs); | 281 | entry->ret = syscall_get_return_value(current, regs); |
275 | 282 | ||
276 | if (!filter_current_check_discard(sys_data->exit_event, entry, event)) | 283 | if (!filter_current_check_discard(buffer, sys_data->exit_event, |
277 | trace_current_buffer_unlock_commit(event, 0, 0); | 284 | entry, event)) |
285 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); | ||
278 | } | 286 | } |
279 | 287 | ||
280 | int reg_event_syscall_enter(struct ftrace_event_call *call) | 288 | int reg_event_syscall_enter(struct ftrace_event_call *call) |
@@ -285,7 +293,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) | |||
285 | 293 | ||
286 | name = (char *)call->data; | 294 | name = (char *)call->data; |
287 | num = syscall_name_to_nr(name); | 295 | num = syscall_name_to_nr(name); |
288 | if (num < 0 || num >= FTRACE_SYSCALL_MAX) | 296 | if (num < 0 || num >= NR_syscalls) |
289 | return -ENOSYS; | 297 | return -ENOSYS; |
290 | mutex_lock(&syscall_trace_lock); | 298 | mutex_lock(&syscall_trace_lock); |
291 | if (!sys_refcount_enter) | 299 | if (!sys_refcount_enter) |
@@ -308,7 +316,7 @@ void unreg_event_syscall_enter(struct ftrace_event_call *call) | |||
308 | 316 | ||
309 | name = (char *)call->data; | 317 | name = (char *)call->data; |
310 | num = syscall_name_to_nr(name); | 318 | num = syscall_name_to_nr(name); |
311 | if (num < 0 || num >= FTRACE_SYSCALL_MAX) | 319 | if (num < 0 || num >= NR_syscalls) |
312 | return; | 320 | return; |
313 | mutex_lock(&syscall_trace_lock); | 321 | mutex_lock(&syscall_trace_lock); |
314 | sys_refcount_enter--; | 322 | sys_refcount_enter--; |
@@ -326,7 +334,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) | |||
326 | 334 | ||
327 | name = call->data; | 335 | name = call->data; |
328 | num = syscall_name_to_nr(name); | 336 | num = syscall_name_to_nr(name); |
329 | if (num < 0 || num >= FTRACE_SYSCALL_MAX) | 337 | if (num < 0 || num >= NR_syscalls) |
330 | return -ENOSYS; | 338 | return -ENOSYS; |
331 | mutex_lock(&syscall_trace_lock); | 339 | mutex_lock(&syscall_trace_lock); |
332 | if (!sys_refcount_exit) | 340 | if (!sys_refcount_exit) |
@@ -349,7 +357,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call) | |||
349 | 357 | ||
350 | name = call->data; | 358 | name = call->data; |
351 | num = syscall_name_to_nr(name); | 359 | num = syscall_name_to_nr(name); |
352 | if (num < 0 || num >= FTRACE_SYSCALL_MAX) | 360 | if (num < 0 || num >= NR_syscalls) |
353 | return; | 361 | return; |
354 | mutex_lock(&syscall_trace_lock); | 362 | mutex_lock(&syscall_trace_lock); |
355 | sys_refcount_exit--; | 363 | sys_refcount_exit--; |
@@ -369,8 +377,8 @@ struct trace_event event_syscall_exit = { | |||
369 | 377 | ||
370 | #ifdef CONFIG_EVENT_PROFILE | 378 | #ifdef CONFIG_EVENT_PROFILE |
371 | 379 | ||
372 | static DECLARE_BITMAP(enabled_prof_enter_syscalls, FTRACE_SYSCALL_MAX); | 380 | static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); |
373 | static DECLARE_BITMAP(enabled_prof_exit_syscalls, FTRACE_SYSCALL_MAX); | 381 | static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); |
374 | static int sys_prof_refcount_enter; | 382 | static int sys_prof_refcount_enter; |
375 | static int sys_prof_refcount_exit; | 383 | static int sys_prof_refcount_exit; |
376 | 384 | ||
@@ -416,7 +424,7 @@ int reg_prof_syscall_enter(char *name) | |||
416 | int num; | 424 | int num; |
417 | 425 | ||
418 | num = syscall_name_to_nr(name); | 426 | num = syscall_name_to_nr(name); |
419 | if (num < 0 || num >= FTRACE_SYSCALL_MAX) | 427 | if (num < 0 || num >= NR_syscalls) |
420 | return -ENOSYS; | 428 | return -ENOSYS; |
421 | 429 | ||
422 | mutex_lock(&syscall_trace_lock); | 430 | mutex_lock(&syscall_trace_lock); |
@@ -438,7 +446,7 @@ void unreg_prof_syscall_enter(char *name) | |||
438 | int num; | 446 | int num; |
439 | 447 | ||
440 | num = syscall_name_to_nr(name); | 448 | num = syscall_name_to_nr(name); |
441 | if (num < 0 || num >= FTRACE_SYSCALL_MAX) | 449 | if (num < 0 || num >= NR_syscalls) |
442 | return; | 450 | return; |
443 | 451 | ||
444 | mutex_lock(&syscall_trace_lock); | 452 | mutex_lock(&syscall_trace_lock); |
@@ -477,7 +485,7 @@ int reg_prof_syscall_exit(char *name) | |||
477 | int num; | 485 | int num; |
478 | 486 | ||
479 | num = syscall_name_to_nr(name); | 487 | num = syscall_name_to_nr(name); |
480 | if (num < 0 || num >= FTRACE_SYSCALL_MAX) | 488 | if (num < 0 || num >= NR_syscalls) |
481 | return -ENOSYS; | 489 | return -ENOSYS; |
482 | 490 | ||
483 | mutex_lock(&syscall_trace_lock); | 491 | mutex_lock(&syscall_trace_lock); |
@@ -499,7 +507,7 @@ void unreg_prof_syscall_exit(char *name) | |||
499 | int num; | 507 | int num; |
500 | 508 | ||
501 | num = syscall_name_to_nr(name); | 509 | num = syscall_name_to_nr(name); |
502 | if (num < 0 || num >= FTRACE_SYSCALL_MAX) | 510 | if (num < 0 || num >= NR_syscalls) |
503 | return; | 511 | return; |
504 | 512 | ||
505 | mutex_lock(&syscall_trace_lock); | 513 | mutex_lock(&syscall_trace_lock); |