diff options
author | Sage Weil <sage@inktank.com> | 2013-08-15 14:11:45 -0400 |
---|---|---|
committer | Sage Weil <sage@inktank.com> | 2013-08-15 14:11:45 -0400 |
commit | ee3e542fec6e69bc9fb668698889a37d93950ddf (patch) | |
tree | e74ee766a4764769ef1d3d45d266b4dea64101d3 /kernel/trace | |
parent | fe2a801b50c0bb8039d627e5ae1fec249d10ff39 (diff) | |
parent | f1d6e17f540af37bb1891480143669ba7636c4cf (diff) |
Merge remote-tracking branch 'linus/master' into testing
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 143 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 26 | ||||
-rw-r--r-- | kernel/trace/trace.c | 496 | ||||
-rw-r--r-- | kernel/trace/trace.h | 35 | ||||
-rw-r--r-- | kernel/trace/trace_event_perf.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 406 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 27 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 105 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 54 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 230 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 18 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 47 | ||||
-rw-r--r-- | kernel/trace/trace_uprobe.c | 57 |
16 files changed, 1089 insertions, 591 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6c508ff33c62..a6d098c6df3f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -413,6 +413,17 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
413 | return 0; | 413 | return 0; |
414 | } | 414 | } |
415 | 415 | ||
416 | static void ftrace_sync(struct work_struct *work) | ||
417 | { | ||
418 | /* | ||
419 | * This function is just a stub to implement a hard force | ||
420 | * of synchronize_sched(). This requires synchronizing | ||
421 | * tasks even in userspace and idle. | ||
422 | * | ||
423 | * Yes, function tracing is rude. | ||
424 | */ | ||
425 | } | ||
426 | |||
416 | static int __unregister_ftrace_function(struct ftrace_ops *ops) | 427 | static int __unregister_ftrace_function(struct ftrace_ops *ops) |
417 | { | 428 | { |
418 | int ret; | 429 | int ret; |
@@ -440,8 +451,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
440 | * so there'll be no new users. We must ensure | 451 | * so there'll be no new users. We must ensure |
441 | * all current users are done before we free | 452 | * all current users are done before we free |
442 | * the control data. | 453 | * the control data. |
454 | * Note synchronize_sched() is not enough, as we | ||
455 | * use preempt_disable() to do RCU, but the function | ||
456 | * tracer can be called where RCU is not active | ||
457 | * (before user_exit()). | ||
443 | */ | 458 | */ |
444 | synchronize_sched(); | 459 | schedule_on_each_cpu(ftrace_sync); |
445 | control_ops_free(ops); | 460 | control_ops_free(ops); |
446 | } | 461 | } |
447 | } else | 462 | } else |
@@ -456,9 +471,13 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
456 | /* | 471 | /* |
457 | * Dynamic ops may be freed, we must make sure that all | 472 | * Dynamic ops may be freed, we must make sure that all |
458 | * callers are done before leaving this function. | 473 | * callers are done before leaving this function. |
474 | * | ||
475 | * Again, normal synchronize_sched() is not good enough. | ||
476 | * We need to do a hard force of sched synchronization. | ||
459 | */ | 477 | */ |
460 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) | 478 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) |
461 | synchronize_sched(); | 479 | schedule_on_each_cpu(ftrace_sync); |
480 | |||
462 | 481 | ||
463 | return 0; | 482 | return 0; |
464 | } | 483 | } |
@@ -622,12 +641,18 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
622 | if (rec->counter <= 1) | 641 | if (rec->counter <= 1) |
623 | stddev = 0; | 642 | stddev = 0; |
624 | else { | 643 | else { |
625 | stddev = rec->time_squared - rec->counter * avg * avg; | 644 | /* |
645 | * Apply Welford's method: | ||
646 | * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) | ||
647 | */ | ||
648 | stddev = rec->counter * rec->time_squared - | ||
649 | rec->time * rec->time; | ||
650 | |||
626 | /* | 651 | /* |
627 | * Divide only 1000 for ns^2 -> us^2 conversion. | 652 | * Divide only 1000 for ns^2 -> us^2 conversion. |
628 | * trace_print_graph_duration will divide 1000 again. | 653 | * trace_print_graph_duration will divide 1000 again. |
629 | */ | 654 | */ |
630 | do_div(stddev, (rec->counter - 1) * 1000); | 655 | do_div(stddev, rec->counter * (rec->counter - 1) * 1000); |
631 | } | 656 | } |
632 | 657 | ||
633 | trace_seq_init(&s); | 658 | trace_seq_init(&s); |
@@ -1416,12 +1441,22 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, | |||
1416 | * the hashes are freed with call_rcu_sched(). | 1441 | * the hashes are freed with call_rcu_sched(). |
1417 | */ | 1442 | */ |
1418 | static int | 1443 | static int |
1419 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | 1444 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
1420 | { | 1445 | { |
1421 | struct ftrace_hash *filter_hash; | 1446 | struct ftrace_hash *filter_hash; |
1422 | struct ftrace_hash *notrace_hash; | 1447 | struct ftrace_hash *notrace_hash; |
1423 | int ret; | 1448 | int ret; |
1424 | 1449 | ||
1450 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS | ||
1451 | /* | ||
1452 | * There's a small race when adding ops that the ftrace handler | ||
1453 | * that wants regs, may be called without them. We can not | ||
1454 | * allow that handler to be called if regs is NULL. | ||
1455 | */ | ||
1456 | if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) | ||
1457 | return 0; | ||
1458 | #endif | ||
1459 | |||
1425 | filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); | 1460 | filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); |
1426 | notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); | 1461 | notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); |
1427 | 1462 | ||
@@ -2134,12 +2169,57 @@ static cycle_t ftrace_update_time; | |||
2134 | static unsigned long ftrace_update_cnt; | 2169 | static unsigned long ftrace_update_cnt; |
2135 | unsigned long ftrace_update_tot_cnt; | 2170 | unsigned long ftrace_update_tot_cnt; |
2136 | 2171 | ||
2137 | static int ops_traces_mod(struct ftrace_ops *ops) | 2172 | static inline int ops_traces_mod(struct ftrace_ops *ops) |
2138 | { | 2173 | { |
2139 | struct ftrace_hash *hash; | 2174 | /* |
2175 | * Filter_hash being empty will default to trace module. | ||
2176 | * But notrace hash requires a test of individual module functions. | ||
2177 | */ | ||
2178 | return ftrace_hash_empty(ops->filter_hash) && | ||
2179 | ftrace_hash_empty(ops->notrace_hash); | ||
2180 | } | ||
2181 | |||
2182 | /* | ||
2183 | * Check if the current ops references the record. | ||
2184 | * | ||
2185 | * If the ops traces all functions, then it was already accounted for. | ||
2186 | * If the ops does not trace the current record function, skip it. | ||
2187 | * If the ops ignores the function via notrace filter, skip it. | ||
2188 | */ | ||
2189 | static inline bool | ||
2190 | ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) | ||
2191 | { | ||
2192 | /* If ops isn't enabled, ignore it */ | ||
2193 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | ||
2194 | return 0; | ||
2140 | 2195 | ||
2141 | hash = ops->filter_hash; | 2196 | /* If ops traces all mods, we already accounted for it */ |
2142 | return ftrace_hash_empty(hash); | 2197 | if (ops_traces_mod(ops)) |
2198 | return 0; | ||
2199 | |||
2200 | /* The function must be in the filter */ | ||
2201 | if (!ftrace_hash_empty(ops->filter_hash) && | ||
2202 | !ftrace_lookup_ip(ops->filter_hash, rec->ip)) | ||
2203 | return 0; | ||
2204 | |||
2205 | /* If in notrace hash, we ignore it too */ | ||
2206 | if (ftrace_lookup_ip(ops->notrace_hash, rec->ip)) | ||
2207 | return 0; | ||
2208 | |||
2209 | return 1; | ||
2210 | } | ||
2211 | |||
2212 | static int referenced_filters(struct dyn_ftrace *rec) | ||
2213 | { | ||
2214 | struct ftrace_ops *ops; | ||
2215 | int cnt = 0; | ||
2216 | |||
2217 | for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { | ||
2218 | if (ops_references_rec(ops, rec)) | ||
2219 | cnt++; | ||
2220 | } | ||
2221 | |||
2222 | return cnt; | ||
2143 | } | 2223 | } |
2144 | 2224 | ||
2145 | static int ftrace_update_code(struct module *mod) | 2225 | static int ftrace_update_code(struct module *mod) |
@@ -2148,6 +2228,7 @@ static int ftrace_update_code(struct module *mod) | |||
2148 | struct dyn_ftrace *p; | 2228 | struct dyn_ftrace *p; |
2149 | cycle_t start, stop; | 2229 | cycle_t start, stop; |
2150 | unsigned long ref = 0; | 2230 | unsigned long ref = 0; |
2231 | bool test = false; | ||
2151 | int i; | 2232 | int i; |
2152 | 2233 | ||
2153 | /* | 2234 | /* |
@@ -2161,9 +2242,12 @@ static int ftrace_update_code(struct module *mod) | |||
2161 | 2242 | ||
2162 | for (ops = ftrace_ops_list; | 2243 | for (ops = ftrace_ops_list; |
2163 | ops != &ftrace_list_end; ops = ops->next) { | 2244 | ops != &ftrace_list_end; ops = ops->next) { |
2164 | if (ops->flags & FTRACE_OPS_FL_ENABLED && | 2245 | if (ops->flags & FTRACE_OPS_FL_ENABLED) { |
2165 | ops_traces_mod(ops)) | 2246 | if (ops_traces_mod(ops)) |
2166 | ref++; | 2247 | ref++; |
2248 | else | ||
2249 | test = true; | ||
2250 | } | ||
2167 | } | 2251 | } |
2168 | } | 2252 | } |
2169 | 2253 | ||
@@ -2173,12 +2257,16 @@ static int ftrace_update_code(struct module *mod) | |||
2173 | for (pg = ftrace_new_pgs; pg; pg = pg->next) { | 2257 | for (pg = ftrace_new_pgs; pg; pg = pg->next) { |
2174 | 2258 | ||
2175 | for (i = 0; i < pg->index; i++) { | 2259 | for (i = 0; i < pg->index; i++) { |
2260 | int cnt = ref; | ||
2261 | |||
2176 | /* If something went wrong, bail without enabling anything */ | 2262 | /* If something went wrong, bail without enabling anything */ |
2177 | if (unlikely(ftrace_disabled)) | 2263 | if (unlikely(ftrace_disabled)) |
2178 | return -1; | 2264 | return -1; |
2179 | 2265 | ||
2180 | p = &pg->records[i]; | 2266 | p = &pg->records[i]; |
2181 | p->flags = ref; | 2267 | if (test) |
2268 | cnt += referenced_filters(p); | ||
2269 | p->flags = cnt; | ||
2182 | 2270 | ||
2183 | /* | 2271 | /* |
2184 | * Do the initial record conversion from mcount jump | 2272 | * Do the initial record conversion from mcount jump |
@@ -2198,7 +2286,7 @@ static int ftrace_update_code(struct module *mod) | |||
2198 | * conversion puts the module to the correct state, thus | 2286 | * conversion puts the module to the correct state, thus |
2199 | * passing the ftrace_make_call check. | 2287 | * passing the ftrace_make_call check. |
2200 | */ | 2288 | */ |
2201 | if (ftrace_start_up && ref) { | 2289 | if (ftrace_start_up && cnt) { |
2202 | int failed = __ftrace_replace_code(p, 1); | 2290 | int failed = __ftrace_replace_code(p, 1); |
2203 | if (failed) | 2291 | if (failed) |
2204 | ftrace_bug(failed, p->ip); | 2292 | ftrace_bug(failed, p->ip); |
@@ -3349,6 +3437,12 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) | |||
3349 | return add_hash_entry(hash, ip); | 3437 | return add_hash_entry(hash, ip); |
3350 | } | 3438 | } |
3351 | 3439 | ||
3440 | static void ftrace_ops_update_code(struct ftrace_ops *ops) | ||
3441 | { | ||
3442 | if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) | ||
3443 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | ||
3444 | } | ||
3445 | |||
3352 | static int | 3446 | static int |
3353 | ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | 3447 | ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, |
3354 | unsigned long ip, int remove, int reset, int enable) | 3448 | unsigned long ip, int remove, int reset, int enable) |
@@ -3391,9 +3485,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
3391 | 3485 | ||
3392 | mutex_lock(&ftrace_lock); | 3486 | mutex_lock(&ftrace_lock); |
3393 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | 3487 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); |
3394 | if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED | 3488 | if (!ret) |
3395 | && ftrace_enabled) | 3489 | ftrace_ops_update_code(ops); |
3396 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | ||
3397 | 3490 | ||
3398 | mutex_unlock(&ftrace_lock); | 3491 | mutex_unlock(&ftrace_lock); |
3399 | 3492 | ||
@@ -3512,8 +3605,12 @@ EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); | |||
3512 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | 3605 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
3513 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; | 3606 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; |
3514 | 3607 | ||
3608 | /* Used by function selftest to not test if filter is set */ | ||
3609 | bool ftrace_filter_param __initdata; | ||
3610 | |||
3515 | static int __init set_ftrace_notrace(char *str) | 3611 | static int __init set_ftrace_notrace(char *str) |
3516 | { | 3612 | { |
3613 | ftrace_filter_param = true; | ||
3517 | strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); | 3614 | strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); |
3518 | return 1; | 3615 | return 1; |
3519 | } | 3616 | } |
@@ -3521,6 +3618,7 @@ __setup("ftrace_notrace=", set_ftrace_notrace); | |||
3521 | 3618 | ||
3522 | static int __init set_ftrace_filter(char *str) | 3619 | static int __init set_ftrace_filter(char *str) |
3523 | { | 3620 | { |
3621 | ftrace_filter_param = true; | ||
3524 | strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); | 3622 | strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); |
3525 | return 1; | 3623 | return 1; |
3526 | } | 3624 | } |
@@ -3615,9 +3713,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
3615 | mutex_lock(&ftrace_lock); | 3713 | mutex_lock(&ftrace_lock); |
3616 | ret = ftrace_hash_move(iter->ops, filter_hash, | 3714 | ret = ftrace_hash_move(iter->ops, filter_hash, |
3617 | orig_hash, iter->hash); | 3715 | orig_hash, iter->hash); |
3618 | if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED) | 3716 | if (!ret) |
3619 | && ftrace_enabled) | 3717 | ftrace_ops_update_code(iter->ops); |
3620 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | ||
3621 | 3718 | ||
3622 | mutex_unlock(&ftrace_lock); | 3719 | mutex_unlock(&ftrace_lock); |
3623 | } | 3720 | } |
@@ -4188,7 +4285,7 @@ static inline void ftrace_startup_enable(int command) { } | |||
4188 | # define ftrace_shutdown_sysctl() do { } while (0) | 4285 | # define ftrace_shutdown_sysctl() do { } while (0) |
4189 | 4286 | ||
4190 | static inline int | 4287 | static inline int |
4191 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | 4288 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
4192 | { | 4289 | { |
4193 | return 1; | 4290 | return 1; |
4194 | } | 4291 | } |
@@ -4211,7 +4308,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, | |||
4211 | do_for_each_ftrace_op(op, ftrace_control_list) { | 4308 | do_for_each_ftrace_op(op, ftrace_control_list) { |
4212 | if (!(op->flags & FTRACE_OPS_FL_STUB) && | 4309 | if (!(op->flags & FTRACE_OPS_FL_STUB) && |
4213 | !ftrace_function_local_disabled(op) && | 4310 | !ftrace_function_local_disabled(op) && |
4214 | ftrace_ops_test(op, ip)) | 4311 | ftrace_ops_test(op, ip, regs)) |
4215 | op->func(ip, parent_ip, op, regs); | 4312 | op->func(ip, parent_ip, op, regs); |
4216 | } while_for_each_ftrace_op(op); | 4313 | } while_for_each_ftrace_op(op); |
4217 | trace_recursion_clear(TRACE_CONTROL_BIT); | 4314 | trace_recursion_clear(TRACE_CONTROL_BIT); |
@@ -4244,7 +4341,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
4244 | */ | 4341 | */ |
4245 | preempt_disable_notrace(); | 4342 | preempt_disable_notrace(); |
4246 | do_for_each_ftrace_op(op, ftrace_ops_list) { | 4343 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
4247 | if (ftrace_ops_test(op, ip)) | 4344 | if (ftrace_ops_test(op, ip, regs)) |
4248 | op->func(ip, parent_ip, op, regs); | 4345 | op->func(ip, parent_ip, op, regs); |
4249 | } while_for_each_ftrace_op(op); | 4346 | } while_for_each_ftrace_op(op); |
4250 | preempt_enable_notrace(); | 4347 | preempt_enable_notrace(); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index e444ff88f0a4..cc2f66f68dc5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -36,11 +36,11 @@ int ring_buffer_print_entry_header(struct trace_seq *s) | |||
36 | { | 36 | { |
37 | int ret; | 37 | int ret; |
38 | 38 | ||
39 | ret = trace_seq_printf(s, "# compressed entry header\n"); | 39 | ret = trace_seq_puts(s, "# compressed entry header\n"); |
40 | ret = trace_seq_printf(s, "\ttype_len : 5 bits\n"); | 40 | ret = trace_seq_puts(s, "\ttype_len : 5 bits\n"); |
41 | ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n"); | 41 | ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n"); |
42 | ret = trace_seq_printf(s, "\tarray : 32 bits\n"); | 42 | ret = trace_seq_puts(s, "\tarray : 32 bits\n"); |
43 | ret = trace_seq_printf(s, "\n"); | 43 | ret = trace_seq_putc(s, '\n'); |
44 | ret = trace_seq_printf(s, "\tpadding : type == %d\n", | 44 | ret = trace_seq_printf(s, "\tpadding : type == %d\n", |
45 | RINGBUF_TYPE_PADDING); | 45 | RINGBUF_TYPE_PADDING); |
46 | ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", | 46 | ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", |
@@ -1066,7 +1066,7 @@ static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, | |||
1066 | } | 1066 | } |
1067 | 1067 | ||
1068 | /** | 1068 | /** |
1069 | * check_pages - integrity check of buffer pages | 1069 | * rb_check_pages - integrity check of buffer pages |
1070 | * @cpu_buffer: CPU buffer with pages to test | 1070 | * @cpu_buffer: CPU buffer with pages to test |
1071 | * | 1071 | * |
1072 | * As a safety measure we check to make sure the data pages have not | 1072 | * As a safety measure we check to make sure the data pages have not |
@@ -1258,7 +1258,7 @@ static int rb_cpu_notify(struct notifier_block *self, | |||
1258 | #endif | 1258 | #endif |
1259 | 1259 | ||
1260 | /** | 1260 | /** |
1261 | * ring_buffer_alloc - allocate a new ring_buffer | 1261 | * __ring_buffer_alloc - allocate a new ring_buffer |
1262 | * @size: the size in bytes per cpu that is needed. | 1262 | * @size: the size in bytes per cpu that is needed. |
1263 | * @flags: attributes to set for the ring buffer. | 1263 | * @flags: attributes to set for the ring buffer. |
1264 | * | 1264 | * |
@@ -1607,6 +1607,7 @@ static void update_pages_handler(struct work_struct *work) | |||
1607 | * ring_buffer_resize - resize the ring buffer | 1607 | * ring_buffer_resize - resize the ring buffer |
1608 | * @buffer: the buffer to resize. | 1608 | * @buffer: the buffer to resize. |
1609 | * @size: the new size. | 1609 | * @size: the new size. |
1610 | * @cpu_id: the cpu buffer to resize | ||
1610 | * | 1611 | * |
1611 | * Minimum size is 2 * BUF_PAGE_SIZE. | 1612 | * Minimum size is 2 * BUF_PAGE_SIZE. |
1612 | * | 1613 | * |
@@ -3956,11 +3957,11 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume); | |||
3956 | * expected. | 3957 | * expected. |
3957 | * | 3958 | * |
3958 | * After a sequence of ring_buffer_read_prepare calls, the user is | 3959 | * After a sequence of ring_buffer_read_prepare calls, the user is |
3959 | * expected to make at least one call to ring_buffer_prepare_sync. | 3960 | * expected to make at least one call to ring_buffer_read_prepare_sync. |
3960 | * Afterwards, ring_buffer_read_start is invoked to get things going | 3961 | * Afterwards, ring_buffer_read_start is invoked to get things going |
3961 | * for real. | 3962 | * for real. |
3962 | * | 3963 | * |
3963 | * This overall must be paired with ring_buffer_finish. | 3964 | * This overall must be paired with ring_buffer_read_finish. |
3964 | */ | 3965 | */ |
3965 | struct ring_buffer_iter * | 3966 | struct ring_buffer_iter * |
3966 | ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) | 3967 | ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) |
@@ -4009,7 +4010,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); | |||
4009 | * an intervening ring_buffer_read_prepare_sync must have been | 4010 | * an intervening ring_buffer_read_prepare_sync must have been |
4010 | * performed. | 4011 | * performed. |
4011 | * | 4012 | * |
4012 | * Must be paired with ring_buffer_finish. | 4013 | * Must be paired with ring_buffer_read_finish. |
4013 | */ | 4014 | */ |
4014 | void | 4015 | void |
4015 | ring_buffer_read_start(struct ring_buffer_iter *iter) | 4016 | ring_buffer_read_start(struct ring_buffer_iter *iter) |
@@ -4031,7 +4032,7 @@ ring_buffer_read_start(struct ring_buffer_iter *iter) | |||
4031 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | 4032 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); |
4032 | 4033 | ||
4033 | /** | 4034 | /** |
4034 | * ring_buffer_finish - finish reading the iterator of the buffer | 4035 | * ring_buffer_read_finish - finish reading the iterator of the buffer |
4035 | * @iter: The iterator retrieved by ring_buffer_start | 4036 | * @iter: The iterator retrieved by ring_buffer_start |
4036 | * | 4037 | * |
4037 | * This re-enables the recording to the buffer, and frees the | 4038 | * This re-enables the recording to the buffer, and frees the |
@@ -4346,6 +4347,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | |||
4346 | /** | 4347 | /** |
4347 | * ring_buffer_alloc_read_page - allocate a page to read from buffer | 4348 | * ring_buffer_alloc_read_page - allocate a page to read from buffer |
4348 | * @buffer: the buffer to allocate for. | 4349 | * @buffer: the buffer to allocate for. |
4350 | * @cpu: the cpu buffer to allocate. | ||
4349 | * | 4351 | * |
4350 | * This function is used in conjunction with ring_buffer_read_page. | 4352 | * This function is used in conjunction with ring_buffer_read_page. |
4351 | * When reading a full page from the ring buffer, these functions | 4353 | * When reading a full page from the ring buffer, these functions |
@@ -4403,7 +4405,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); | |||
4403 | * to swap with a page in the ring buffer. | 4405 | * to swap with a page in the ring buffer. |
4404 | * | 4406 | * |
4405 | * for example: | 4407 | * for example: |
4406 | * rpage = ring_buffer_alloc_read_page(buffer); | 4408 | * rpage = ring_buffer_alloc_read_page(buffer, cpu); |
4407 | * if (!rpage) | 4409 | * if (!rpage) |
4408 | * return error; | 4410 | * return error; |
4409 | * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); | 4411 | * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e71a8be4a6ee..496f94d57698 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -115,6 +115,9 @@ cpumask_var_t __read_mostly tracing_buffer_mask; | |||
115 | 115 | ||
116 | enum ftrace_dump_mode ftrace_dump_on_oops; | 116 | enum ftrace_dump_mode ftrace_dump_on_oops; |
117 | 117 | ||
118 | /* When set, tracing will stop when a WARN*() is hit */ | ||
119 | int __disable_trace_on_warning; | ||
120 | |||
118 | static int tracing_set_tracer(const char *buf); | 121 | static int tracing_set_tracer(const char *buf); |
119 | 122 | ||
120 | #define MAX_TRACER_SIZE 100 | 123 | #define MAX_TRACER_SIZE 100 |
@@ -149,6 +152,13 @@ static int __init set_ftrace_dump_on_oops(char *str) | |||
149 | } | 152 | } |
150 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | 153 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); |
151 | 154 | ||
155 | static int __init stop_trace_on_warning(char *str) | ||
156 | { | ||
157 | __disable_trace_on_warning = 1; | ||
158 | return 1; | ||
159 | } | ||
160 | __setup("traceoff_on_warning=", stop_trace_on_warning); | ||
161 | |||
152 | static int __init boot_alloc_snapshot(char *str) | 162 | static int __init boot_alloc_snapshot(char *str) |
153 | { | 163 | { |
154 | allocate_snapshot = true; | 164 | allocate_snapshot = true; |
@@ -170,6 +180,7 @@ static int __init set_trace_boot_options(char *str) | |||
170 | } | 180 | } |
171 | __setup("trace_options=", set_trace_boot_options); | 181 | __setup("trace_options=", set_trace_boot_options); |
172 | 182 | ||
183 | |||
173 | unsigned long long ns2usecs(cycle_t nsec) | 184 | unsigned long long ns2usecs(cycle_t nsec) |
174 | { | 185 | { |
175 | nsec += 500; | 186 | nsec += 500; |
@@ -193,6 +204,37 @@ static struct trace_array global_trace; | |||
193 | 204 | ||
194 | LIST_HEAD(ftrace_trace_arrays); | 205 | LIST_HEAD(ftrace_trace_arrays); |
195 | 206 | ||
207 | int trace_array_get(struct trace_array *this_tr) | ||
208 | { | ||
209 | struct trace_array *tr; | ||
210 | int ret = -ENODEV; | ||
211 | |||
212 | mutex_lock(&trace_types_lock); | ||
213 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | ||
214 | if (tr == this_tr) { | ||
215 | tr->ref++; | ||
216 | ret = 0; | ||
217 | break; | ||
218 | } | ||
219 | } | ||
220 | mutex_unlock(&trace_types_lock); | ||
221 | |||
222 | return ret; | ||
223 | } | ||
224 | |||
225 | static void __trace_array_put(struct trace_array *this_tr) | ||
226 | { | ||
227 | WARN_ON(!this_tr->ref); | ||
228 | this_tr->ref--; | ||
229 | } | ||
230 | |||
231 | void trace_array_put(struct trace_array *this_tr) | ||
232 | { | ||
233 | mutex_lock(&trace_types_lock); | ||
234 | __trace_array_put(this_tr); | ||
235 | mutex_unlock(&trace_types_lock); | ||
236 | } | ||
237 | |||
196 | int filter_current_check_discard(struct ring_buffer *buffer, | 238 | int filter_current_check_discard(struct ring_buffer *buffer, |
197 | struct ftrace_event_call *call, void *rec, | 239 | struct ftrace_event_call *call, void *rec, |
198 | struct ring_buffer_event *event) | 240 | struct ring_buffer_event *event) |
@@ -201,23 +243,43 @@ int filter_current_check_discard(struct ring_buffer *buffer, | |||
201 | } | 243 | } |
202 | EXPORT_SYMBOL_GPL(filter_current_check_discard); | 244 | EXPORT_SYMBOL_GPL(filter_current_check_discard); |
203 | 245 | ||
204 | cycle_t ftrace_now(int cpu) | 246 | cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) |
205 | { | 247 | { |
206 | u64 ts; | 248 | u64 ts; |
207 | 249 | ||
208 | /* Early boot up does not have a buffer yet */ | 250 | /* Early boot up does not have a buffer yet */ |
209 | if (!global_trace.trace_buffer.buffer) | 251 | if (!buf->buffer) |
210 | return trace_clock_local(); | 252 | return trace_clock_local(); |
211 | 253 | ||
212 | ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu); | 254 | ts = ring_buffer_time_stamp(buf->buffer, cpu); |
213 | ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts); | 255 | ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); |
214 | 256 | ||
215 | return ts; | 257 | return ts; |
216 | } | 258 | } |
217 | 259 | ||
260 | cycle_t ftrace_now(int cpu) | ||
261 | { | ||
262 | return buffer_ftrace_now(&global_trace.trace_buffer, cpu); | ||
263 | } | ||
264 | |||
265 | /** | ||
266 | * tracing_is_enabled - Show if global_trace has been disabled | ||
267 | * | ||
268 | * Shows if the global trace has been enabled or not. It uses the | ||
269 | * mirror flag "buffer_disabled" to be used in fast paths such as for | ||
270 | * the irqsoff tracer. But it may be inaccurate due to races. If you | ||
271 | * need to know the accurate state, use tracing_is_on() which is a little | ||
272 | * slower, but accurate. | ||
273 | */ | ||
218 | int tracing_is_enabled(void) | 274 | int tracing_is_enabled(void) |
219 | { | 275 | { |
220 | return tracing_is_on(); | 276 | /* |
277 | * For quick access (irqsoff uses this in fast path), just | ||
278 | * return the mirror variable of the state of the ring buffer. | ||
279 | * It's a little racy, but we don't really care. | ||
280 | */ | ||
281 | smp_rmb(); | ||
282 | return !global_trace.buffer_disabled; | ||
221 | } | 283 | } |
222 | 284 | ||
223 | /* | 285 | /* |
@@ -240,7 +302,7 @@ static struct tracer *trace_types __read_mostly; | |||
240 | /* | 302 | /* |
241 | * trace_types_lock is used to protect the trace_types list. | 303 | * trace_types_lock is used to protect the trace_types list. |
242 | */ | 304 | */ |
243 | static DEFINE_MUTEX(trace_types_lock); | 305 | DEFINE_MUTEX(trace_types_lock); |
244 | 306 | ||
245 | /* | 307 | /* |
246 | * serialize the access of the ring buffer | 308 | * serialize the access of the ring buffer |
@@ -330,6 +392,23 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
330 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | | 392 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | |
331 | TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION; | 393 | TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION; |
332 | 394 | ||
395 | static void tracer_tracing_on(struct trace_array *tr) | ||
396 | { | ||
397 | if (tr->trace_buffer.buffer) | ||
398 | ring_buffer_record_on(tr->trace_buffer.buffer); | ||
399 | /* | ||
400 | * This flag is looked at when buffers haven't been allocated | ||
401 | * yet, or by some tracers (like irqsoff), that just want to | ||
402 | * know if the ring buffer has been disabled, but it can handle | ||
403 | * races of where it gets disabled but we still do a record. | ||
404 | * As the check is in the fast path of the tracers, it is more | ||
405 | * important to be fast than accurate. | ||
406 | */ | ||
407 | tr->buffer_disabled = 0; | ||
408 | /* Make the flag seen by readers */ | ||
409 | smp_wmb(); | ||
410 | } | ||
411 | |||
333 | /** | 412 | /** |
334 | * tracing_on - enable tracing buffers | 413 | * tracing_on - enable tracing buffers |
335 | * | 414 | * |
@@ -338,15 +417,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
338 | */ | 417 | */ |
339 | void tracing_on(void) | 418 | void tracing_on(void) |
340 | { | 419 | { |
341 | if (global_trace.trace_buffer.buffer) | 420 | tracer_tracing_on(&global_trace); |
342 | ring_buffer_record_on(global_trace.trace_buffer.buffer); | ||
343 | /* | ||
344 | * This flag is only looked at when buffers haven't been | ||
345 | * allocated yet. We don't really care about the race | ||
346 | * between setting this flag and actually turning | ||
347 | * on the buffer. | ||
348 | */ | ||
349 | global_trace.buffer_disabled = 0; | ||
350 | } | 421 | } |
351 | EXPORT_SYMBOL_GPL(tracing_on); | 422 | EXPORT_SYMBOL_GPL(tracing_on); |
352 | 423 | ||
@@ -540,6 +611,23 @@ void tracing_snapshot_alloc(void) | |||
540 | EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); | 611 | EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); |
541 | #endif /* CONFIG_TRACER_SNAPSHOT */ | 612 | #endif /* CONFIG_TRACER_SNAPSHOT */ |
542 | 613 | ||
614 | static void tracer_tracing_off(struct trace_array *tr) | ||
615 | { | ||
616 | if (tr->trace_buffer.buffer) | ||
617 | ring_buffer_record_off(tr->trace_buffer.buffer); | ||
618 | /* | ||
619 | * This flag is looked at when buffers haven't been allocated | ||
620 | * yet, or by some tracers (like irqsoff), that just want to | ||
621 | * know if the ring buffer has been disabled, but it can handle | ||
622 | * races of where it gets disabled but we still do a record. | ||
623 | * As the check is in the fast path of the tracers, it is more | ||
624 | * important to be fast than accurate. | ||
625 | */ | ||
626 | tr->buffer_disabled = 1; | ||
627 | /* Make the flag seen by readers */ | ||
628 | smp_wmb(); | ||
629 | } | ||
630 | |||
543 | /** | 631 | /** |
544 | * tracing_off - turn off tracing buffers | 632 | * tracing_off - turn off tracing buffers |
545 | * | 633 | * |
@@ -550,26 +638,35 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); | |||
550 | */ | 638 | */ |
551 | void tracing_off(void) | 639 | void tracing_off(void) |
552 | { | 640 | { |
553 | if (global_trace.trace_buffer.buffer) | 641 | tracer_tracing_off(&global_trace); |
554 | ring_buffer_record_off(global_trace.trace_buffer.buffer); | ||
555 | /* | ||
556 | * This flag is only looked at when buffers haven't been | ||
557 | * allocated yet. We don't really care about the race | ||
558 | * between setting this flag and actually turning | ||
559 | * on the buffer. | ||
560 | */ | ||
561 | global_trace.buffer_disabled = 1; | ||
562 | } | 642 | } |
563 | EXPORT_SYMBOL_GPL(tracing_off); | 643 | EXPORT_SYMBOL_GPL(tracing_off); |
564 | 644 | ||
645 | void disable_trace_on_warning(void) | ||
646 | { | ||
647 | if (__disable_trace_on_warning) | ||
648 | tracing_off(); | ||
649 | } | ||
650 | |||
651 | /** | ||
652 | * tracer_tracing_is_on - show real state of ring buffer enabled | ||
653 | * @tr : the trace array to know if ring buffer is enabled | ||
654 | * | ||
655 | * Shows real state of the ring buffer if it is enabled or not. | ||
656 | */ | ||
657 | static int tracer_tracing_is_on(struct trace_array *tr) | ||
658 | { | ||
659 | if (tr->trace_buffer.buffer) | ||
660 | return ring_buffer_record_is_on(tr->trace_buffer.buffer); | ||
661 | return !tr->buffer_disabled; | ||
662 | } | ||
663 | |||
565 | /** | 664 | /** |
566 | * tracing_is_on - show state of ring buffers enabled | 665 | * tracing_is_on - show state of ring buffers enabled |
567 | */ | 666 | */ |
568 | int tracing_is_on(void) | 667 | int tracing_is_on(void) |
569 | { | 668 | { |
570 | if (global_trace.trace_buffer.buffer) | 669 | return tracer_tracing_is_on(&global_trace); |
571 | return ring_buffer_record_is_on(global_trace.trace_buffer.buffer); | ||
572 | return !global_trace.buffer_disabled; | ||
573 | } | 670 | } |
574 | EXPORT_SYMBOL_GPL(tracing_is_on); | 671 | EXPORT_SYMBOL_GPL(tracing_is_on); |
575 | 672 | ||
@@ -1119,7 +1216,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf) | |||
1119 | /* Make sure all commits have finished */ | 1216 | /* Make sure all commits have finished */ |
1120 | synchronize_sched(); | 1217 | synchronize_sched(); |
1121 | 1218 | ||
1122 | buf->time_start = ftrace_now(buf->cpu); | 1219 | buf->time_start = buffer_ftrace_now(buf, buf->cpu); |
1123 | 1220 | ||
1124 | for_each_online_cpu(cpu) | 1221 | for_each_online_cpu(cpu) |
1125 | ring_buffer_reset_cpu(buffer, cpu); | 1222 | ring_buffer_reset_cpu(buffer, cpu); |
@@ -1127,23 +1224,17 @@ void tracing_reset_online_cpus(struct trace_buffer *buf) | |||
1127 | ring_buffer_record_enable(buffer); | 1224 | ring_buffer_record_enable(buffer); |
1128 | } | 1225 | } |
1129 | 1226 | ||
1130 | void tracing_reset_current(int cpu) | 1227 | /* Must have trace_types_lock held */ |
1131 | { | ||
1132 | tracing_reset(&global_trace.trace_buffer, cpu); | ||
1133 | } | ||
1134 | |||
1135 | void tracing_reset_all_online_cpus(void) | 1228 | void tracing_reset_all_online_cpus(void) |
1136 | { | 1229 | { |
1137 | struct trace_array *tr; | 1230 | struct trace_array *tr; |
1138 | 1231 | ||
1139 | mutex_lock(&trace_types_lock); | ||
1140 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | 1232 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
1141 | tracing_reset_online_cpus(&tr->trace_buffer); | 1233 | tracing_reset_online_cpus(&tr->trace_buffer); |
1142 | #ifdef CONFIG_TRACER_MAX_TRACE | 1234 | #ifdef CONFIG_TRACER_MAX_TRACE |
1143 | tracing_reset_online_cpus(&tr->max_buffer); | 1235 | tracing_reset_online_cpus(&tr->max_buffer); |
1144 | #endif | 1236 | #endif |
1145 | } | 1237 | } |
1146 | mutex_unlock(&trace_types_lock); | ||
1147 | } | 1238 | } |
1148 | 1239 | ||
1149 | #define SAVED_CMDLINES 128 | 1240 | #define SAVED_CMDLINES 128 |
@@ -1543,15 +1634,6 @@ trace_function(struct trace_array *tr, | |||
1543 | __buffer_unlock_commit(buffer, event); | 1634 | __buffer_unlock_commit(buffer, event); |
1544 | } | 1635 | } |
1545 | 1636 | ||
1546 | void | ||
1547 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, | ||
1548 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | ||
1549 | int pc) | ||
1550 | { | ||
1551 | if (likely(!atomic_read(&data->disabled))) | ||
1552 | trace_function(tr, ip, parent_ip, flags, pc); | ||
1553 | } | ||
1554 | |||
1555 | #ifdef CONFIG_STACKTRACE | 1637 | #ifdef CONFIG_STACKTRACE |
1556 | 1638 | ||
1557 | #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) | 1639 | #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) |
@@ -2760,6 +2842,17 @@ static int s_show(struct seq_file *m, void *v) | |||
2760 | return 0; | 2842 | return 0; |
2761 | } | 2843 | } |
2762 | 2844 | ||
2845 | /* | ||
2846 | * Should be used after trace_array_get(), trace_types_lock | ||
2847 | * ensures that i_cdev was already initialized. | ||
2848 | */ | ||
2849 | static inline int tracing_get_cpu(struct inode *inode) | ||
2850 | { | ||
2851 | if (inode->i_cdev) /* See trace_create_cpu_file() */ | ||
2852 | return (long)inode->i_cdev - 1; | ||
2853 | return RING_BUFFER_ALL_CPUS; | ||
2854 | } | ||
2855 | |||
2763 | static const struct seq_operations tracer_seq_ops = { | 2856 | static const struct seq_operations tracer_seq_ops = { |
2764 | .start = s_start, | 2857 | .start = s_start, |
2765 | .next = s_next, | 2858 | .next = s_next, |
@@ -2770,8 +2863,7 @@ static const struct seq_operations tracer_seq_ops = { | |||
2770 | static struct trace_iterator * | 2863 | static struct trace_iterator * |
2771 | __tracing_open(struct inode *inode, struct file *file, bool snapshot) | 2864 | __tracing_open(struct inode *inode, struct file *file, bool snapshot) |
2772 | { | 2865 | { |
2773 | struct trace_cpu *tc = inode->i_private; | 2866 | struct trace_array *tr = inode->i_private; |
2774 | struct trace_array *tr = tc->tr; | ||
2775 | struct trace_iterator *iter; | 2867 | struct trace_iterator *iter; |
2776 | int cpu; | 2868 | int cpu; |
2777 | 2869 | ||
@@ -2812,8 +2904,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) | |||
2812 | iter->trace_buffer = &tr->trace_buffer; | 2904 | iter->trace_buffer = &tr->trace_buffer; |
2813 | iter->snapshot = snapshot; | 2905 | iter->snapshot = snapshot; |
2814 | iter->pos = -1; | 2906 | iter->pos = -1; |
2907 | iter->cpu_file = tracing_get_cpu(inode); | ||
2815 | mutex_init(&iter->mutex); | 2908 | mutex_init(&iter->mutex); |
2816 | iter->cpu_file = tc->cpu; | ||
2817 | 2909 | ||
2818 | /* Notify the tracer early; before we stop tracing. */ | 2910 | /* Notify the tracer early; before we stop tracing. */ |
2819 | if (iter->trace && iter->trace->open) | 2911 | if (iter->trace && iter->trace->open) |
@@ -2850,8 +2942,6 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) | |||
2850 | tracing_iter_reset(iter, cpu); | 2942 | tracing_iter_reset(iter, cpu); |
2851 | } | 2943 | } |
2852 | 2944 | ||
2853 | tr->ref++; | ||
2854 | |||
2855 | mutex_unlock(&trace_types_lock); | 2945 | mutex_unlock(&trace_types_lock); |
2856 | 2946 | ||
2857 | return iter; | 2947 | return iter; |
@@ -2874,24 +2964,41 @@ int tracing_open_generic(struct inode *inode, struct file *filp) | |||
2874 | return 0; | 2964 | return 0; |
2875 | } | 2965 | } |
2876 | 2966 | ||
2967 | /* | ||
2968 | * Open and update trace_array ref count. | ||
2969 | * Must have the current trace_array passed to it. | ||
2970 | */ | ||
2971 | static int tracing_open_generic_tr(struct inode *inode, struct file *filp) | ||
2972 | { | ||
2973 | struct trace_array *tr = inode->i_private; | ||
2974 | |||
2975 | if (tracing_disabled) | ||
2976 | return -ENODEV; | ||
2977 | |||
2978 | if (trace_array_get(tr) < 0) | ||
2979 | return -ENODEV; | ||
2980 | |||
2981 | filp->private_data = inode->i_private; | ||
2982 | |||
2983 | return 0; | ||
2984 | } | ||
2985 | |||
2877 | static int tracing_release(struct inode *inode, struct file *file) | 2986 | static int tracing_release(struct inode *inode, struct file *file) |
2878 | { | 2987 | { |
2988 | struct trace_array *tr = inode->i_private; | ||
2879 | struct seq_file *m = file->private_data; | 2989 | struct seq_file *m = file->private_data; |
2880 | struct trace_iterator *iter; | 2990 | struct trace_iterator *iter; |
2881 | struct trace_array *tr; | ||
2882 | int cpu; | 2991 | int cpu; |
2883 | 2992 | ||
2884 | if (!(file->f_mode & FMODE_READ)) | 2993 | if (!(file->f_mode & FMODE_READ)) { |
2994 | trace_array_put(tr); | ||
2885 | return 0; | 2995 | return 0; |
2996 | } | ||
2886 | 2997 | ||
2998 | /* Writes do not use seq_file */ | ||
2887 | iter = m->private; | 2999 | iter = m->private; |
2888 | tr = iter->tr; | ||
2889 | |||
2890 | mutex_lock(&trace_types_lock); | 3000 | mutex_lock(&trace_types_lock); |
2891 | 3001 | ||
2892 | WARN_ON(!tr->ref); | ||
2893 | tr->ref--; | ||
2894 | |||
2895 | for_each_tracing_cpu(cpu) { | 3002 | for_each_tracing_cpu(cpu) { |
2896 | if (iter->buffer_iter[cpu]) | 3003 | if (iter->buffer_iter[cpu]) |
2897 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | 3004 | ring_buffer_read_finish(iter->buffer_iter[cpu]); |
@@ -2903,6 +3010,9 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
2903 | if (!iter->snapshot) | 3010 | if (!iter->snapshot) |
2904 | /* reenable tracing if it was previously enabled */ | 3011 | /* reenable tracing if it was previously enabled */ |
2905 | tracing_start_tr(tr); | 3012 | tracing_start_tr(tr); |
3013 | |||
3014 | __trace_array_put(tr); | ||
3015 | |||
2906 | mutex_unlock(&trace_types_lock); | 3016 | mutex_unlock(&trace_types_lock); |
2907 | 3017 | ||
2908 | mutex_destroy(&iter->mutex); | 3018 | mutex_destroy(&iter->mutex); |
@@ -2910,24 +3020,44 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
2910 | kfree(iter->trace); | 3020 | kfree(iter->trace); |
2911 | kfree(iter->buffer_iter); | 3021 | kfree(iter->buffer_iter); |
2912 | seq_release_private(inode, file); | 3022 | seq_release_private(inode, file); |
3023 | |||
3024 | return 0; | ||
3025 | } | ||
3026 | |||
3027 | static int tracing_release_generic_tr(struct inode *inode, struct file *file) | ||
3028 | { | ||
3029 | struct trace_array *tr = inode->i_private; | ||
3030 | |||
3031 | trace_array_put(tr); | ||
2913 | return 0; | 3032 | return 0; |
2914 | } | 3033 | } |
2915 | 3034 | ||
3035 | static int tracing_single_release_tr(struct inode *inode, struct file *file) | ||
3036 | { | ||
3037 | struct trace_array *tr = inode->i_private; | ||
3038 | |||
3039 | trace_array_put(tr); | ||
3040 | |||
3041 | return single_release(inode, file); | ||
3042 | } | ||
3043 | |||
2916 | static int tracing_open(struct inode *inode, struct file *file) | 3044 | static int tracing_open(struct inode *inode, struct file *file) |
2917 | { | 3045 | { |
3046 | struct trace_array *tr = inode->i_private; | ||
2918 | struct trace_iterator *iter; | 3047 | struct trace_iterator *iter; |
2919 | int ret = 0; | 3048 | int ret = 0; |
2920 | 3049 | ||
3050 | if (trace_array_get(tr) < 0) | ||
3051 | return -ENODEV; | ||
3052 | |||
2921 | /* If this file was open for write, then erase contents */ | 3053 | /* If this file was open for write, then erase contents */ |
2922 | if ((file->f_mode & FMODE_WRITE) && | 3054 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { |
2923 | (file->f_flags & O_TRUNC)) { | 3055 | int cpu = tracing_get_cpu(inode); |
2924 | struct trace_cpu *tc = inode->i_private; | ||
2925 | struct trace_array *tr = tc->tr; | ||
2926 | 3056 | ||
2927 | if (tc->cpu == RING_BUFFER_ALL_CPUS) | 3057 | if (cpu == RING_BUFFER_ALL_CPUS) |
2928 | tracing_reset_online_cpus(&tr->trace_buffer); | 3058 | tracing_reset_online_cpus(&tr->trace_buffer); |
2929 | else | 3059 | else |
2930 | tracing_reset(&tr->trace_buffer, tc->cpu); | 3060 | tracing_reset(&tr->trace_buffer, cpu); |
2931 | } | 3061 | } |
2932 | 3062 | ||
2933 | if (file->f_mode & FMODE_READ) { | 3063 | if (file->f_mode & FMODE_READ) { |
@@ -2937,6 +3067,10 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
2937 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) | 3067 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) |
2938 | iter->iter_flags |= TRACE_FILE_LAT_FMT; | 3068 | iter->iter_flags |= TRACE_FILE_LAT_FMT; |
2939 | } | 3069 | } |
3070 | |||
3071 | if (ret < 0) | ||
3072 | trace_array_put(tr); | ||
3073 | |||
2940 | return ret; | 3074 | return ret; |
2941 | } | 3075 | } |
2942 | 3076 | ||
@@ -3293,17 +3427,27 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
3293 | 3427 | ||
3294 | static int tracing_trace_options_open(struct inode *inode, struct file *file) | 3428 | static int tracing_trace_options_open(struct inode *inode, struct file *file) |
3295 | { | 3429 | { |
3430 | struct trace_array *tr = inode->i_private; | ||
3431 | int ret; | ||
3432 | |||
3296 | if (tracing_disabled) | 3433 | if (tracing_disabled) |
3297 | return -ENODEV; | 3434 | return -ENODEV; |
3298 | 3435 | ||
3299 | return single_open(file, tracing_trace_options_show, inode->i_private); | 3436 | if (trace_array_get(tr) < 0) |
3437 | return -ENODEV; | ||
3438 | |||
3439 | ret = single_open(file, tracing_trace_options_show, inode->i_private); | ||
3440 | if (ret < 0) | ||
3441 | trace_array_put(tr); | ||
3442 | |||
3443 | return ret; | ||
3300 | } | 3444 | } |
3301 | 3445 | ||
3302 | static const struct file_operations tracing_iter_fops = { | 3446 | static const struct file_operations tracing_iter_fops = { |
3303 | .open = tracing_trace_options_open, | 3447 | .open = tracing_trace_options_open, |
3304 | .read = seq_read, | 3448 | .read = seq_read, |
3305 | .llseek = seq_lseek, | 3449 | .llseek = seq_lseek, |
3306 | .release = single_release, | 3450 | .release = tracing_single_release_tr, |
3307 | .write = tracing_trace_options_write, | 3451 | .write = tracing_trace_options_write, |
3308 | }; | 3452 | }; |
3309 | 3453 | ||
@@ -3379,14 +3523,14 @@ static const char readme_msg[] = | |||
3379 | "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n" | 3523 | "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n" |
3380 | "\t\t\t Read the contents for more information\n" | 3524 | "\t\t\t Read the contents for more information\n" |
3381 | #endif | 3525 | #endif |
3382 | #ifdef CONFIG_STACKTRACE | 3526 | #ifdef CONFIG_STACK_TRACER |
3383 | " stack_trace\t\t- Shows the max stack trace when active\n" | 3527 | " stack_trace\t\t- Shows the max stack trace when active\n" |
3384 | " stack_max_size\t- Shows current max stack size that was traced\n" | 3528 | " stack_max_size\t- Shows current max stack size that was traced\n" |
3385 | "\t\t\t Write into this file to reset the max size (trigger a new trace)\n" | 3529 | "\t\t\t Write into this file to reset the max size (trigger a new trace)\n" |
3386 | #ifdef CONFIG_DYNAMIC_FTRACE | 3530 | #ifdef CONFIG_DYNAMIC_FTRACE |
3387 | " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n" | 3531 | " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n" |
3388 | #endif | 3532 | #endif |
3389 | #endif /* CONFIG_STACKTRACE */ | 3533 | #endif /* CONFIG_STACK_TRACER */ |
3390 | ; | 3534 | ; |
3391 | 3535 | ||
3392 | static ssize_t | 3536 | static ssize_t |
@@ -3783,20 +3927,23 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf, | |||
3783 | 3927 | ||
3784 | static int tracing_open_pipe(struct inode *inode, struct file *filp) | 3928 | static int tracing_open_pipe(struct inode *inode, struct file *filp) |
3785 | { | 3929 | { |
3786 | struct trace_cpu *tc = inode->i_private; | 3930 | struct trace_array *tr = inode->i_private; |
3787 | struct trace_array *tr = tc->tr; | ||
3788 | struct trace_iterator *iter; | 3931 | struct trace_iterator *iter; |
3789 | int ret = 0; | 3932 | int ret = 0; |
3790 | 3933 | ||
3791 | if (tracing_disabled) | 3934 | if (tracing_disabled) |
3792 | return -ENODEV; | 3935 | return -ENODEV; |
3793 | 3936 | ||
3937 | if (trace_array_get(tr) < 0) | ||
3938 | return -ENODEV; | ||
3939 | |||
3794 | mutex_lock(&trace_types_lock); | 3940 | mutex_lock(&trace_types_lock); |
3795 | 3941 | ||
3796 | /* create a buffer to store the information to pass to userspace */ | 3942 | /* create a buffer to store the information to pass to userspace */ |
3797 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 3943 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
3798 | if (!iter) { | 3944 | if (!iter) { |
3799 | ret = -ENOMEM; | 3945 | ret = -ENOMEM; |
3946 | __trace_array_put(tr); | ||
3800 | goto out; | 3947 | goto out; |
3801 | } | 3948 | } |
3802 | 3949 | ||
@@ -3826,9 +3973,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
3826 | if (trace_clocks[tr->clock_id].in_ns) | 3973 | if (trace_clocks[tr->clock_id].in_ns) |
3827 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; | 3974 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; |
3828 | 3975 | ||
3829 | iter->cpu_file = tc->cpu; | 3976 | iter->tr = tr; |
3830 | iter->tr = tc->tr; | 3977 | iter->trace_buffer = &tr->trace_buffer; |
3831 | iter->trace_buffer = &tc->tr->trace_buffer; | 3978 | iter->cpu_file = tracing_get_cpu(inode); |
3832 | mutex_init(&iter->mutex); | 3979 | mutex_init(&iter->mutex); |
3833 | filp->private_data = iter; | 3980 | filp->private_data = iter; |
3834 | 3981 | ||
@@ -3843,6 +3990,7 @@ out: | |||
3843 | fail: | 3990 | fail: |
3844 | kfree(iter->trace); | 3991 | kfree(iter->trace); |
3845 | kfree(iter); | 3992 | kfree(iter); |
3993 | __trace_array_put(tr); | ||
3846 | mutex_unlock(&trace_types_lock); | 3994 | mutex_unlock(&trace_types_lock); |
3847 | return ret; | 3995 | return ret; |
3848 | } | 3996 | } |
@@ -3850,6 +3998,7 @@ fail: | |||
3850 | static int tracing_release_pipe(struct inode *inode, struct file *file) | 3998 | static int tracing_release_pipe(struct inode *inode, struct file *file) |
3851 | { | 3999 | { |
3852 | struct trace_iterator *iter = file->private_data; | 4000 | struct trace_iterator *iter = file->private_data; |
4001 | struct trace_array *tr = inode->i_private; | ||
3853 | 4002 | ||
3854 | mutex_lock(&trace_types_lock); | 4003 | mutex_lock(&trace_types_lock); |
3855 | 4004 | ||
@@ -3863,6 +4012,8 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
3863 | kfree(iter->trace); | 4012 | kfree(iter->trace); |
3864 | kfree(iter); | 4013 | kfree(iter); |
3865 | 4014 | ||
4015 | trace_array_put(tr); | ||
4016 | |||
3866 | return 0; | 4017 | return 0; |
3867 | } | 4018 | } |
3868 | 4019 | ||
@@ -3939,7 +4090,7 @@ static int tracing_wait_pipe(struct file *filp) | |||
3939 | * | 4090 | * |
3940 | * iter->pos will be 0 if we haven't read anything. | 4091 | * iter->pos will be 0 if we haven't read anything. |
3941 | */ | 4092 | */ |
3942 | if (!tracing_is_enabled() && iter->pos) | 4093 | if (!tracing_is_on() && iter->pos) |
3943 | break; | 4094 | break; |
3944 | } | 4095 | } |
3945 | 4096 | ||
@@ -4000,6 +4151,7 @@ waitagain: | |||
4000 | memset(&iter->seq, 0, | 4151 | memset(&iter->seq, 0, |
4001 | sizeof(struct trace_iterator) - | 4152 | sizeof(struct trace_iterator) - |
4002 | offsetof(struct trace_iterator, seq)); | 4153 | offsetof(struct trace_iterator, seq)); |
4154 | cpumask_clear(iter->started); | ||
4003 | iter->pos = -1; | 4155 | iter->pos = -1; |
4004 | 4156 | ||
4005 | trace_event_read_lock(); | 4157 | trace_event_read_lock(); |
@@ -4200,15 +4352,16 @@ static ssize_t | |||
4200 | tracing_entries_read(struct file *filp, char __user *ubuf, | 4352 | tracing_entries_read(struct file *filp, char __user *ubuf, |
4201 | size_t cnt, loff_t *ppos) | 4353 | size_t cnt, loff_t *ppos) |
4202 | { | 4354 | { |
4203 | struct trace_cpu *tc = filp->private_data; | 4355 | struct inode *inode = file_inode(filp); |
4204 | struct trace_array *tr = tc->tr; | 4356 | struct trace_array *tr = inode->i_private; |
4357 | int cpu = tracing_get_cpu(inode); | ||
4205 | char buf[64]; | 4358 | char buf[64]; |
4206 | int r = 0; | 4359 | int r = 0; |
4207 | ssize_t ret; | 4360 | ssize_t ret; |
4208 | 4361 | ||
4209 | mutex_lock(&trace_types_lock); | 4362 | mutex_lock(&trace_types_lock); |
4210 | 4363 | ||
4211 | if (tc->cpu == RING_BUFFER_ALL_CPUS) { | 4364 | if (cpu == RING_BUFFER_ALL_CPUS) { |
4212 | int cpu, buf_size_same; | 4365 | int cpu, buf_size_same; |
4213 | unsigned long size; | 4366 | unsigned long size; |
4214 | 4367 | ||
@@ -4235,7 +4388,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf, | |||
4235 | } else | 4388 | } else |
4236 | r = sprintf(buf, "X\n"); | 4389 | r = sprintf(buf, "X\n"); |
4237 | } else | 4390 | } else |
4238 | r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10); | 4391 | r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); |
4239 | 4392 | ||
4240 | mutex_unlock(&trace_types_lock); | 4393 | mutex_unlock(&trace_types_lock); |
4241 | 4394 | ||
@@ -4247,7 +4400,8 @@ static ssize_t | |||
4247 | tracing_entries_write(struct file *filp, const char __user *ubuf, | 4400 | tracing_entries_write(struct file *filp, const char __user *ubuf, |
4248 | size_t cnt, loff_t *ppos) | 4401 | size_t cnt, loff_t *ppos) |
4249 | { | 4402 | { |
4250 | struct trace_cpu *tc = filp->private_data; | 4403 | struct inode *inode = file_inode(filp); |
4404 | struct trace_array *tr = inode->i_private; | ||
4251 | unsigned long val; | 4405 | unsigned long val; |
4252 | int ret; | 4406 | int ret; |
4253 | 4407 | ||
@@ -4261,8 +4415,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
4261 | 4415 | ||
4262 | /* value is in KB */ | 4416 | /* value is in KB */ |
4263 | val <<= 10; | 4417 | val <<= 10; |
4264 | 4418 | ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); | |
4265 | ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu); | ||
4266 | if (ret < 0) | 4419 | if (ret < 0) |
4267 | return ret; | 4420 | return ret; |
4268 | 4421 | ||
@@ -4316,10 +4469,12 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp) | |||
4316 | 4469 | ||
4317 | /* disable tracing ? */ | 4470 | /* disable tracing ? */ |
4318 | if (trace_flags & TRACE_ITER_STOP_ON_FREE) | 4471 | if (trace_flags & TRACE_ITER_STOP_ON_FREE) |
4319 | tracing_off(); | 4472 | tracer_tracing_off(tr); |
4320 | /* resize the ring buffer to 0 */ | 4473 | /* resize the ring buffer to 0 */ |
4321 | tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); | 4474 | tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); |
4322 | 4475 | ||
4476 | trace_array_put(tr); | ||
4477 | |||
4323 | return 0; | 4478 | return 0; |
4324 | } | 4479 | } |
4325 | 4480 | ||
@@ -4328,6 +4483,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
4328 | size_t cnt, loff_t *fpos) | 4483 | size_t cnt, loff_t *fpos) |
4329 | { | 4484 | { |
4330 | unsigned long addr = (unsigned long)ubuf; | 4485 | unsigned long addr = (unsigned long)ubuf; |
4486 | struct trace_array *tr = filp->private_data; | ||
4331 | struct ring_buffer_event *event; | 4487 | struct ring_buffer_event *event; |
4332 | struct ring_buffer *buffer; | 4488 | struct ring_buffer *buffer; |
4333 | struct print_entry *entry; | 4489 | struct print_entry *entry; |
@@ -4387,7 +4543,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
4387 | 4543 | ||
4388 | local_save_flags(irq_flags); | 4544 | local_save_flags(irq_flags); |
4389 | size = sizeof(*entry) + cnt + 2; /* possible \n added */ | 4545 | size = sizeof(*entry) + cnt + 2; /* possible \n added */ |
4390 | buffer = global_trace.trace_buffer.buffer; | 4546 | buffer = tr->trace_buffer.buffer; |
4391 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 4547 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
4392 | irq_flags, preempt_count()); | 4548 | irq_flags, preempt_count()); |
4393 | if (!event) { | 4549 | if (!event) { |
@@ -4478,12 +4634,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
4478 | * New clock may not be consistent with the previous clock. | 4634 | * New clock may not be consistent with the previous clock. |
4479 | * Reset the buffer so that it doesn't have incomparable timestamps. | 4635 | * Reset the buffer so that it doesn't have incomparable timestamps. |
4480 | */ | 4636 | */ |
4481 | tracing_reset_online_cpus(&global_trace.trace_buffer); | 4637 | tracing_reset_online_cpus(&tr->trace_buffer); |
4482 | 4638 | ||
4483 | #ifdef CONFIG_TRACER_MAX_TRACE | 4639 | #ifdef CONFIG_TRACER_MAX_TRACE |
4484 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) | 4640 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) |
4485 | ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); | 4641 | ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); |
4486 | tracing_reset_online_cpus(&global_trace.max_buffer); | 4642 | tracing_reset_online_cpus(&tr->max_buffer); |
4487 | #endif | 4643 | #endif |
4488 | 4644 | ||
4489 | mutex_unlock(&trace_types_lock); | 4645 | mutex_unlock(&trace_types_lock); |
@@ -4495,10 +4651,20 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
4495 | 4651 | ||
4496 | static int tracing_clock_open(struct inode *inode, struct file *file) | 4652 | static int tracing_clock_open(struct inode *inode, struct file *file) |
4497 | { | 4653 | { |
4654 | struct trace_array *tr = inode->i_private; | ||
4655 | int ret; | ||
4656 | |||
4498 | if (tracing_disabled) | 4657 | if (tracing_disabled) |
4499 | return -ENODEV; | 4658 | return -ENODEV; |
4500 | 4659 | ||
4501 | return single_open(file, tracing_clock_show, inode->i_private); | 4660 | if (trace_array_get(tr)) |
4661 | return -ENODEV; | ||
4662 | |||
4663 | ret = single_open(file, tracing_clock_show, inode->i_private); | ||
4664 | if (ret < 0) | ||
4665 | trace_array_put(tr); | ||
4666 | |||
4667 | return ret; | ||
4502 | } | 4668 | } |
4503 | 4669 | ||
4504 | struct ftrace_buffer_info { | 4670 | struct ftrace_buffer_info { |
@@ -4510,31 +4676,40 @@ struct ftrace_buffer_info { | |||
4510 | #ifdef CONFIG_TRACER_SNAPSHOT | 4676 | #ifdef CONFIG_TRACER_SNAPSHOT |
4511 | static int tracing_snapshot_open(struct inode *inode, struct file *file) | 4677 | static int tracing_snapshot_open(struct inode *inode, struct file *file) |
4512 | { | 4678 | { |
4513 | struct trace_cpu *tc = inode->i_private; | 4679 | struct trace_array *tr = inode->i_private; |
4514 | struct trace_iterator *iter; | 4680 | struct trace_iterator *iter; |
4515 | struct seq_file *m; | 4681 | struct seq_file *m; |
4516 | int ret = 0; | 4682 | int ret = 0; |
4517 | 4683 | ||
4684 | if (trace_array_get(tr) < 0) | ||
4685 | return -ENODEV; | ||
4686 | |||
4518 | if (file->f_mode & FMODE_READ) { | 4687 | if (file->f_mode & FMODE_READ) { |
4519 | iter = __tracing_open(inode, file, true); | 4688 | iter = __tracing_open(inode, file, true); |
4520 | if (IS_ERR(iter)) | 4689 | if (IS_ERR(iter)) |
4521 | ret = PTR_ERR(iter); | 4690 | ret = PTR_ERR(iter); |
4522 | } else { | 4691 | } else { |
4523 | /* Writes still need the seq_file to hold the private data */ | 4692 | /* Writes still need the seq_file to hold the private data */ |
4693 | ret = -ENOMEM; | ||
4524 | m = kzalloc(sizeof(*m), GFP_KERNEL); | 4694 | m = kzalloc(sizeof(*m), GFP_KERNEL); |
4525 | if (!m) | 4695 | if (!m) |
4526 | return -ENOMEM; | 4696 | goto out; |
4527 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 4697 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
4528 | if (!iter) { | 4698 | if (!iter) { |
4529 | kfree(m); | 4699 | kfree(m); |
4530 | return -ENOMEM; | 4700 | goto out; |
4531 | } | 4701 | } |
4532 | iter->tr = tc->tr; | 4702 | ret = 0; |
4533 | iter->trace_buffer = &tc->tr->max_buffer; | 4703 | |
4534 | iter->cpu_file = tc->cpu; | 4704 | iter->tr = tr; |
4705 | iter->trace_buffer = &tr->max_buffer; | ||
4706 | iter->cpu_file = tracing_get_cpu(inode); | ||
4535 | m->private = iter; | 4707 | m->private = iter; |
4536 | file->private_data = m; | 4708 | file->private_data = m; |
4537 | } | 4709 | } |
4710 | out: | ||
4711 | if (ret < 0) | ||
4712 | trace_array_put(tr); | ||
4538 | 4713 | ||
4539 | return ret; | 4714 | return ret; |
4540 | } | 4715 | } |
@@ -4616,9 +4791,12 @@ out: | |||
4616 | static int tracing_snapshot_release(struct inode *inode, struct file *file) | 4791 | static int tracing_snapshot_release(struct inode *inode, struct file *file) |
4617 | { | 4792 | { |
4618 | struct seq_file *m = file->private_data; | 4793 | struct seq_file *m = file->private_data; |
4794 | int ret; | ||
4795 | |||
4796 | ret = tracing_release(inode, file); | ||
4619 | 4797 | ||
4620 | if (file->f_mode & FMODE_READ) | 4798 | if (file->f_mode & FMODE_READ) |
4621 | return tracing_release(inode, file); | 4799 | return ret; |
4622 | 4800 | ||
4623 | /* If write only, the seq_file is just a stub */ | 4801 | /* If write only, the seq_file is just a stub */ |
4624 | if (m) | 4802 | if (m) |
@@ -4684,34 +4862,38 @@ static const struct file_operations tracing_pipe_fops = { | |||
4684 | }; | 4862 | }; |
4685 | 4863 | ||
4686 | static const struct file_operations tracing_entries_fops = { | 4864 | static const struct file_operations tracing_entries_fops = { |
4687 | .open = tracing_open_generic, | 4865 | .open = tracing_open_generic_tr, |
4688 | .read = tracing_entries_read, | 4866 | .read = tracing_entries_read, |
4689 | .write = tracing_entries_write, | 4867 | .write = tracing_entries_write, |
4690 | .llseek = generic_file_llseek, | 4868 | .llseek = generic_file_llseek, |
4869 | .release = tracing_release_generic_tr, | ||
4691 | }; | 4870 | }; |
4692 | 4871 | ||
4693 | static const struct file_operations tracing_total_entries_fops = { | 4872 | static const struct file_operations tracing_total_entries_fops = { |
4694 | .open = tracing_open_generic, | 4873 | .open = tracing_open_generic_tr, |
4695 | .read = tracing_total_entries_read, | 4874 | .read = tracing_total_entries_read, |
4696 | .llseek = generic_file_llseek, | 4875 | .llseek = generic_file_llseek, |
4876 | .release = tracing_release_generic_tr, | ||
4697 | }; | 4877 | }; |
4698 | 4878 | ||
4699 | static const struct file_operations tracing_free_buffer_fops = { | 4879 | static const struct file_operations tracing_free_buffer_fops = { |
4880 | .open = tracing_open_generic_tr, | ||
4700 | .write = tracing_free_buffer_write, | 4881 | .write = tracing_free_buffer_write, |
4701 | .release = tracing_free_buffer_release, | 4882 | .release = tracing_free_buffer_release, |
4702 | }; | 4883 | }; |
4703 | 4884 | ||
4704 | static const struct file_operations tracing_mark_fops = { | 4885 | static const struct file_operations tracing_mark_fops = { |
4705 | .open = tracing_open_generic, | 4886 | .open = tracing_open_generic_tr, |
4706 | .write = tracing_mark_write, | 4887 | .write = tracing_mark_write, |
4707 | .llseek = generic_file_llseek, | 4888 | .llseek = generic_file_llseek, |
4889 | .release = tracing_release_generic_tr, | ||
4708 | }; | 4890 | }; |
4709 | 4891 | ||
4710 | static const struct file_operations trace_clock_fops = { | 4892 | static const struct file_operations trace_clock_fops = { |
4711 | .open = tracing_clock_open, | 4893 | .open = tracing_clock_open, |
4712 | .read = seq_read, | 4894 | .read = seq_read, |
4713 | .llseek = seq_lseek, | 4895 | .llseek = seq_lseek, |
4714 | .release = single_release, | 4896 | .release = tracing_single_release_tr, |
4715 | .write = tracing_clock_write, | 4897 | .write = tracing_clock_write, |
4716 | }; | 4898 | }; |
4717 | 4899 | ||
@@ -4736,23 +4918,26 @@ static const struct file_operations snapshot_raw_fops = { | |||
4736 | 4918 | ||
4737 | static int tracing_buffers_open(struct inode *inode, struct file *filp) | 4919 | static int tracing_buffers_open(struct inode *inode, struct file *filp) |
4738 | { | 4920 | { |
4739 | struct trace_cpu *tc = inode->i_private; | 4921 | struct trace_array *tr = inode->i_private; |
4740 | struct trace_array *tr = tc->tr; | ||
4741 | struct ftrace_buffer_info *info; | 4922 | struct ftrace_buffer_info *info; |
4923 | int ret; | ||
4742 | 4924 | ||
4743 | if (tracing_disabled) | 4925 | if (tracing_disabled) |
4744 | return -ENODEV; | 4926 | return -ENODEV; |
4745 | 4927 | ||
4928 | if (trace_array_get(tr) < 0) | ||
4929 | return -ENODEV; | ||
4930 | |||
4746 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 4931 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
4747 | if (!info) | 4932 | if (!info) { |
4933 | trace_array_put(tr); | ||
4748 | return -ENOMEM; | 4934 | return -ENOMEM; |
4935 | } | ||
4749 | 4936 | ||
4750 | mutex_lock(&trace_types_lock); | 4937 | mutex_lock(&trace_types_lock); |
4751 | 4938 | ||
4752 | tr->ref++; | ||
4753 | |||
4754 | info->iter.tr = tr; | 4939 | info->iter.tr = tr; |
4755 | info->iter.cpu_file = tc->cpu; | 4940 | info->iter.cpu_file = tracing_get_cpu(inode); |
4756 | info->iter.trace = tr->current_trace; | 4941 | info->iter.trace = tr->current_trace; |
4757 | info->iter.trace_buffer = &tr->trace_buffer; | 4942 | info->iter.trace_buffer = &tr->trace_buffer; |
4758 | info->spare = NULL; | 4943 | info->spare = NULL; |
@@ -4763,7 +4948,11 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) | |||
4763 | 4948 | ||
4764 | mutex_unlock(&trace_types_lock); | 4949 | mutex_unlock(&trace_types_lock); |
4765 | 4950 | ||
4766 | return nonseekable_open(inode, filp); | 4951 | ret = nonseekable_open(inode, filp); |
4952 | if (ret < 0) | ||
4953 | trace_array_put(tr); | ||
4954 | |||
4955 | return ret; | ||
4767 | } | 4956 | } |
4768 | 4957 | ||
4769 | static unsigned int | 4958 | static unsigned int |
@@ -4863,8 +5052,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) | |||
4863 | 5052 | ||
4864 | mutex_lock(&trace_types_lock); | 5053 | mutex_lock(&trace_types_lock); |
4865 | 5054 | ||
4866 | WARN_ON(!iter->tr->ref); | 5055 | __trace_array_put(iter->tr); |
4867 | iter->tr->ref--; | ||
4868 | 5056 | ||
4869 | if (info->spare) | 5057 | if (info->spare) |
4870 | ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare); | 5058 | ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare); |
@@ -5066,14 +5254,14 @@ static ssize_t | |||
5066 | tracing_stats_read(struct file *filp, char __user *ubuf, | 5254 | tracing_stats_read(struct file *filp, char __user *ubuf, |
5067 | size_t count, loff_t *ppos) | 5255 | size_t count, loff_t *ppos) |
5068 | { | 5256 | { |
5069 | struct trace_cpu *tc = filp->private_data; | 5257 | struct inode *inode = file_inode(filp); |
5070 | struct trace_array *tr = tc->tr; | 5258 | struct trace_array *tr = inode->i_private; |
5071 | struct trace_buffer *trace_buf = &tr->trace_buffer; | 5259 | struct trace_buffer *trace_buf = &tr->trace_buffer; |
5260 | int cpu = tracing_get_cpu(inode); | ||
5072 | struct trace_seq *s; | 5261 | struct trace_seq *s; |
5073 | unsigned long cnt; | 5262 | unsigned long cnt; |
5074 | unsigned long long t; | 5263 | unsigned long long t; |
5075 | unsigned long usec_rem; | 5264 | unsigned long usec_rem; |
5076 | int cpu = tc->cpu; | ||
5077 | 5265 | ||
5078 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 5266 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
5079 | if (!s) | 5267 | if (!s) |
@@ -5126,9 +5314,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
5126 | } | 5314 | } |
5127 | 5315 | ||
5128 | static const struct file_operations tracing_stats_fops = { | 5316 | static const struct file_operations tracing_stats_fops = { |
5129 | .open = tracing_open_generic, | 5317 | .open = tracing_open_generic_tr, |
5130 | .read = tracing_stats_read, | 5318 | .read = tracing_stats_read, |
5131 | .llseek = generic_file_llseek, | 5319 | .llseek = generic_file_llseek, |
5320 | .release = tracing_release_generic_tr, | ||
5132 | }; | 5321 | }; |
5133 | 5322 | ||
5134 | #ifdef CONFIG_DYNAMIC_FTRACE | 5323 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -5317,10 +5506,20 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) | |||
5317 | return tr->percpu_dir; | 5506 | return tr->percpu_dir; |
5318 | } | 5507 | } |
5319 | 5508 | ||
5509 | static struct dentry * | ||
5510 | trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, | ||
5511 | void *data, long cpu, const struct file_operations *fops) | ||
5512 | { | ||
5513 | struct dentry *ret = trace_create_file(name, mode, parent, data, fops); | ||
5514 | |||
5515 | if (ret) /* See tracing_get_cpu() */ | ||
5516 | ret->d_inode->i_cdev = (void *)(cpu + 1); | ||
5517 | return ret; | ||
5518 | } | ||
5519 | |||
5320 | static void | 5520 | static void |
5321 | tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) | 5521 | tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) |
5322 | { | 5522 | { |
5323 | struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu); | ||
5324 | struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); | 5523 | struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); |
5325 | struct dentry *d_cpu; | 5524 | struct dentry *d_cpu; |
5326 | char cpu_dir[30]; /* 30 characters should be more than enough */ | 5525 | char cpu_dir[30]; /* 30 characters should be more than enough */ |
@@ -5336,28 +5535,28 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) | |||
5336 | } | 5535 | } |
5337 | 5536 | ||
5338 | /* per cpu trace_pipe */ | 5537 | /* per cpu trace_pipe */ |
5339 | trace_create_file("trace_pipe", 0444, d_cpu, | 5538 | trace_create_cpu_file("trace_pipe", 0444, d_cpu, |
5340 | (void *)&data->trace_cpu, &tracing_pipe_fops); | 5539 | tr, cpu, &tracing_pipe_fops); |
5341 | 5540 | ||
5342 | /* per cpu trace */ | 5541 | /* per cpu trace */ |
5343 | trace_create_file("trace", 0644, d_cpu, | 5542 | trace_create_cpu_file("trace", 0644, d_cpu, |
5344 | (void *)&data->trace_cpu, &tracing_fops); | 5543 | tr, cpu, &tracing_fops); |
5345 | 5544 | ||
5346 | trace_create_file("trace_pipe_raw", 0444, d_cpu, | 5545 | trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, |
5347 | (void *)&data->trace_cpu, &tracing_buffers_fops); | 5546 | tr, cpu, &tracing_buffers_fops); |
5348 | 5547 | ||
5349 | trace_create_file("stats", 0444, d_cpu, | 5548 | trace_create_cpu_file("stats", 0444, d_cpu, |
5350 | (void *)&data->trace_cpu, &tracing_stats_fops); | 5549 | tr, cpu, &tracing_stats_fops); |
5351 | 5550 | ||
5352 | trace_create_file("buffer_size_kb", 0444, d_cpu, | 5551 | trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, |
5353 | (void *)&data->trace_cpu, &tracing_entries_fops); | 5552 | tr, cpu, &tracing_entries_fops); |
5354 | 5553 | ||
5355 | #ifdef CONFIG_TRACER_SNAPSHOT | 5554 | #ifdef CONFIG_TRACER_SNAPSHOT |
5356 | trace_create_file("snapshot", 0644, d_cpu, | 5555 | trace_create_cpu_file("snapshot", 0644, d_cpu, |
5357 | (void *)&data->trace_cpu, &snapshot_fops); | 5556 | tr, cpu, &snapshot_fops); |
5358 | 5557 | ||
5359 | trace_create_file("snapshot_raw", 0444, d_cpu, | 5558 | trace_create_cpu_file("snapshot_raw", 0444, d_cpu, |
5360 | (void *)&data->trace_cpu, &snapshot_raw_fops); | 5559 | tr, cpu, &snapshot_raw_fops); |
5361 | #endif | 5560 | #endif |
5362 | } | 5561 | } |
5363 | 5562 | ||
@@ -5612,15 +5811,10 @@ rb_simple_read(struct file *filp, char __user *ubuf, | |||
5612 | size_t cnt, loff_t *ppos) | 5811 | size_t cnt, loff_t *ppos) |
5613 | { | 5812 | { |
5614 | struct trace_array *tr = filp->private_data; | 5813 | struct trace_array *tr = filp->private_data; |
5615 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | ||
5616 | char buf[64]; | 5814 | char buf[64]; |
5617 | int r; | 5815 | int r; |
5618 | 5816 | ||
5619 | if (buffer) | 5817 | r = tracer_tracing_is_on(tr); |
5620 | r = ring_buffer_record_is_on(buffer); | ||
5621 | else | ||
5622 | r = 0; | ||
5623 | |||
5624 | r = sprintf(buf, "%d\n", r); | 5818 | r = sprintf(buf, "%d\n", r); |
5625 | 5819 | ||
5626 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 5820 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
@@ -5642,11 +5836,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf, | |||
5642 | if (buffer) { | 5836 | if (buffer) { |
5643 | mutex_lock(&trace_types_lock); | 5837 | mutex_lock(&trace_types_lock); |
5644 | if (val) { | 5838 | if (val) { |
5645 | ring_buffer_record_on(buffer); | 5839 | tracer_tracing_on(tr); |
5646 | if (tr->current_trace->start) | 5840 | if (tr->current_trace->start) |
5647 | tr->current_trace->start(tr); | 5841 | tr->current_trace->start(tr); |
5648 | } else { | 5842 | } else { |
5649 | ring_buffer_record_off(buffer); | 5843 | tracer_tracing_off(tr); |
5650 | if (tr->current_trace->stop) | 5844 | if (tr->current_trace->stop) |
5651 | tr->current_trace->stop(tr); | 5845 | tr->current_trace->stop(tr); |
5652 | } | 5846 | } |
@@ -5659,9 +5853,10 @@ rb_simple_write(struct file *filp, const char __user *ubuf, | |||
5659 | } | 5853 | } |
5660 | 5854 | ||
5661 | static const struct file_operations rb_simple_fops = { | 5855 | static const struct file_operations rb_simple_fops = { |
5662 | .open = tracing_open_generic, | 5856 | .open = tracing_open_generic_tr, |
5663 | .read = rb_simple_read, | 5857 | .read = rb_simple_read, |
5664 | .write = rb_simple_write, | 5858 | .write = rb_simple_write, |
5859 | .release = tracing_release_generic_tr, | ||
5665 | .llseek = default_llseek, | 5860 | .llseek = default_llseek, |
5666 | }; | 5861 | }; |
5667 | 5862 | ||
@@ -5670,17 +5865,6 @@ struct dentry *trace_instance_dir; | |||
5670 | static void | 5865 | static void |
5671 | init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer); | 5866 | init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer); |
5672 | 5867 | ||
5673 | static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf) | ||
5674 | { | ||
5675 | int cpu; | ||
5676 | |||
5677 | for_each_tracing_cpu(cpu) { | ||
5678 | memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu)); | ||
5679 | per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu; | ||
5680 | per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr; | ||
5681 | } | ||
5682 | } | ||
5683 | |||
5684 | static int | 5868 | static int |
5685 | allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) | 5869 | allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) |
5686 | { | 5870 | { |
@@ -5698,8 +5882,6 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size | |||
5698 | return -ENOMEM; | 5882 | return -ENOMEM; |
5699 | } | 5883 | } |
5700 | 5884 | ||
5701 | init_trace_buffers(tr, buf); | ||
5702 | |||
5703 | /* Allocate the first page for all buffers */ | 5885 | /* Allocate the first page for all buffers */ |
5704 | set_buffer_entries(&tr->trace_buffer, | 5886 | set_buffer_entries(&tr->trace_buffer, |
5705 | ring_buffer_size(tr->trace_buffer.buffer, 0)); | 5887 | ring_buffer_size(tr->trace_buffer.buffer, 0)); |
@@ -5766,17 +5948,15 @@ static int new_instance_create(const char *name) | |||
5766 | if (allocate_trace_buffers(tr, trace_buf_size) < 0) | 5948 | if (allocate_trace_buffers(tr, trace_buf_size) < 0) |
5767 | goto out_free_tr; | 5949 | goto out_free_tr; |
5768 | 5950 | ||
5769 | /* Holder for file callbacks */ | ||
5770 | tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS; | ||
5771 | tr->trace_cpu.tr = tr; | ||
5772 | |||
5773 | tr->dir = debugfs_create_dir(name, trace_instance_dir); | 5951 | tr->dir = debugfs_create_dir(name, trace_instance_dir); |
5774 | if (!tr->dir) | 5952 | if (!tr->dir) |
5775 | goto out_free_tr; | 5953 | goto out_free_tr; |
5776 | 5954 | ||
5777 | ret = event_trace_add_tracer(tr->dir, tr); | 5955 | ret = event_trace_add_tracer(tr->dir, tr); |
5778 | if (ret) | 5956 | if (ret) { |
5957 | debugfs_remove_recursive(tr->dir); | ||
5779 | goto out_free_tr; | 5958 | goto out_free_tr; |
5959 | } | ||
5780 | 5960 | ||
5781 | init_tracer_debugfs(tr, tr->dir); | 5961 | init_tracer_debugfs(tr, tr->dir); |
5782 | 5962 | ||
@@ -5922,18 +6102,18 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
5922 | tr, &tracing_iter_fops); | 6102 | tr, &tracing_iter_fops); |
5923 | 6103 | ||
5924 | trace_create_file("trace", 0644, d_tracer, | 6104 | trace_create_file("trace", 0644, d_tracer, |
5925 | (void *)&tr->trace_cpu, &tracing_fops); | 6105 | tr, &tracing_fops); |
5926 | 6106 | ||
5927 | trace_create_file("trace_pipe", 0444, d_tracer, | 6107 | trace_create_file("trace_pipe", 0444, d_tracer, |
5928 | (void *)&tr->trace_cpu, &tracing_pipe_fops); | 6108 | tr, &tracing_pipe_fops); |
5929 | 6109 | ||
5930 | trace_create_file("buffer_size_kb", 0644, d_tracer, | 6110 | trace_create_file("buffer_size_kb", 0644, d_tracer, |
5931 | (void *)&tr->trace_cpu, &tracing_entries_fops); | 6111 | tr, &tracing_entries_fops); |
5932 | 6112 | ||
5933 | trace_create_file("buffer_total_size_kb", 0444, d_tracer, | 6113 | trace_create_file("buffer_total_size_kb", 0444, d_tracer, |
5934 | tr, &tracing_total_entries_fops); | 6114 | tr, &tracing_total_entries_fops); |
5935 | 6115 | ||
5936 | trace_create_file("free_buffer", 0644, d_tracer, | 6116 | trace_create_file("free_buffer", 0200, d_tracer, |
5937 | tr, &tracing_free_buffer_fops); | 6117 | tr, &tracing_free_buffer_fops); |
5938 | 6118 | ||
5939 | trace_create_file("trace_marker", 0220, d_tracer, | 6119 | trace_create_file("trace_marker", 0220, d_tracer, |
@@ -5943,11 +6123,11 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
5943 | &trace_clock_fops); | 6123 | &trace_clock_fops); |
5944 | 6124 | ||
5945 | trace_create_file("tracing_on", 0644, d_tracer, | 6125 | trace_create_file("tracing_on", 0644, d_tracer, |
5946 | tr, &rb_simple_fops); | 6126 | tr, &rb_simple_fops); |
5947 | 6127 | ||
5948 | #ifdef CONFIG_TRACER_SNAPSHOT | 6128 | #ifdef CONFIG_TRACER_SNAPSHOT |
5949 | trace_create_file("snapshot", 0644, d_tracer, | 6129 | trace_create_file("snapshot", 0644, d_tracer, |
5950 | (void *)&tr->trace_cpu, &snapshot_fops); | 6130 | tr, &snapshot_fops); |
5951 | #endif | 6131 | #endif |
5952 | 6132 | ||
5953 | for_each_tracing_cpu(cpu) | 6133 | for_each_tracing_cpu(cpu) |
@@ -6241,10 +6421,6 @@ __init static int tracer_alloc_buffers(void) | |||
6241 | 6421 | ||
6242 | global_trace.flags = TRACE_ARRAY_FL_GLOBAL; | 6422 | global_trace.flags = TRACE_ARRAY_FL_GLOBAL; |
6243 | 6423 | ||
6244 | /* Holder for file callbacks */ | ||
6245 | global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS; | ||
6246 | global_trace.trace_cpu.tr = &global_trace; | ||
6247 | |||
6248 | INIT_LIST_HEAD(&global_trace.systems); | 6424 | INIT_LIST_HEAD(&global_trace.systems); |
6249 | INIT_LIST_HEAD(&global_trace.events); | 6425 | INIT_LIST_HEAD(&global_trace.events); |
6250 | list_add(&global_trace.list, &ftrace_trace_arrays); | 6426 | list_add(&global_trace.list, &ftrace_trace_arrays); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 20572ed88c5c..afaae41b0a02 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -130,19 +130,12 @@ enum trace_flag_type { | |||
130 | 130 | ||
131 | struct trace_array; | 131 | struct trace_array; |
132 | 132 | ||
133 | struct trace_cpu { | ||
134 | struct trace_array *tr; | ||
135 | struct dentry *dir; | ||
136 | int cpu; | ||
137 | }; | ||
138 | |||
139 | /* | 133 | /* |
140 | * The CPU trace array - it consists of thousands of trace entries | 134 | * The CPU trace array - it consists of thousands of trace entries |
141 | * plus some other descriptor data: (for example which task started | 135 | * plus some other descriptor data: (for example which task started |
142 | * the trace, etc.) | 136 | * the trace, etc.) |
143 | */ | 137 | */ |
144 | struct trace_array_cpu { | 138 | struct trace_array_cpu { |
145 | struct trace_cpu trace_cpu; | ||
146 | atomic_t disabled; | 139 | atomic_t disabled; |
147 | void *buffer_page; /* ring buffer spare */ | 140 | void *buffer_page; /* ring buffer spare */ |
148 | 141 | ||
@@ -196,7 +189,6 @@ struct trace_array { | |||
196 | bool allocated_snapshot; | 189 | bool allocated_snapshot; |
197 | #endif | 190 | #endif |
198 | int buffer_disabled; | 191 | int buffer_disabled; |
199 | struct trace_cpu trace_cpu; /* place holder */ | ||
200 | #ifdef CONFIG_FTRACE_SYSCALLS | 192 | #ifdef CONFIG_FTRACE_SYSCALLS |
201 | int sys_refcount_enter; | 193 | int sys_refcount_enter; |
202 | int sys_refcount_exit; | 194 | int sys_refcount_exit; |
@@ -214,7 +206,6 @@ struct trace_array { | |||
214 | struct dentry *event_dir; | 206 | struct dentry *event_dir; |
215 | struct list_head systems; | 207 | struct list_head systems; |
216 | struct list_head events; | 208 | struct list_head events; |
217 | struct task_struct *waiter; | ||
218 | int ref; | 209 | int ref; |
219 | }; | 210 | }; |
220 | 211 | ||
@@ -224,6 +215,11 @@ enum { | |||
224 | 215 | ||
225 | extern struct list_head ftrace_trace_arrays; | 216 | extern struct list_head ftrace_trace_arrays; |
226 | 217 | ||
218 | extern struct mutex trace_types_lock; | ||
219 | |||
220 | extern int trace_array_get(struct trace_array *tr); | ||
221 | extern void trace_array_put(struct trace_array *tr); | ||
222 | |||
227 | /* | 223 | /* |
228 | * The global tracer (top) should be the first trace array added, | 224 | * The global tracer (top) should be the first trace array added, |
229 | * but we check the flag anyway. | 225 | * but we check the flag anyway. |
@@ -554,11 +550,6 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu); | |||
554 | 550 | ||
555 | void poll_wait_pipe(struct trace_iterator *iter); | 551 | void poll_wait_pipe(struct trace_iterator *iter); |
556 | 552 | ||
557 | void ftrace(struct trace_array *tr, | ||
558 | struct trace_array_cpu *data, | ||
559 | unsigned long ip, | ||
560 | unsigned long parent_ip, | ||
561 | unsigned long flags, int pc); | ||
562 | void tracing_sched_switch_trace(struct trace_array *tr, | 553 | void tracing_sched_switch_trace(struct trace_array *tr, |
563 | struct task_struct *prev, | 554 | struct task_struct *prev, |
564 | struct task_struct *next, | 555 | struct task_struct *next, |
@@ -680,6 +671,15 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace, | |||
680 | struct trace_array *tr); | 671 | struct trace_array *tr); |
681 | extern int trace_selftest_startup_branch(struct tracer *trace, | 672 | extern int trace_selftest_startup_branch(struct tracer *trace, |
682 | struct trace_array *tr); | 673 | struct trace_array *tr); |
674 | /* | ||
675 | * Tracer data references selftest functions that only occur | ||
676 | * on boot up. These can be __init functions. Thus, when selftests | ||
677 | * are enabled, then the tracers need to reference __init functions. | ||
678 | */ | ||
679 | #define __tracer_data __refdata | ||
680 | #else | ||
681 | /* Tracers are seldom changed. Optimize when selftests are disabled. */ | ||
682 | #define __tracer_data __read_mostly | ||
683 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 683 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
684 | 684 | ||
685 | extern void *head_page(struct trace_array_cpu *data); | 685 | extern void *head_page(struct trace_array_cpu *data); |
@@ -774,6 +774,7 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags) | |||
774 | extern struct list_head ftrace_pids; | 774 | extern struct list_head ftrace_pids; |
775 | 775 | ||
776 | #ifdef CONFIG_FUNCTION_TRACER | 776 | #ifdef CONFIG_FUNCTION_TRACER |
777 | extern bool ftrace_filter_param __initdata; | ||
777 | static inline int ftrace_trace_task(struct task_struct *task) | 778 | static inline int ftrace_trace_task(struct task_struct *task) |
778 | { | 779 | { |
779 | if (list_empty(&ftrace_pids)) | 780 | if (list_empty(&ftrace_pids)) |
@@ -899,12 +900,6 @@ static inline void trace_branch_disable(void) | |||
899 | /* set ring buffers to default size if not already done so */ | 900 | /* set ring buffers to default size if not already done so */ |
900 | int tracing_update_buffers(void); | 901 | int tracing_update_buffers(void); |
901 | 902 | ||
902 | /* trace event type bit fields, not numeric */ | ||
903 | enum { | ||
904 | TRACE_EVENT_TYPE_PRINTF = 1, | ||
905 | TRACE_EVENT_TYPE_RAW = 2, | ||
906 | }; | ||
907 | |||
908 | struct ftrace_event_field { | 903 | struct ftrace_event_field { |
909 | struct list_head link; | 904 | struct list_head link; |
910 | const char *name; | 905 | const char *name; |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 84b1e045faba..80c36bcf66e8 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -236,6 +236,10 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
236 | 236 | ||
237 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); | 237 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); |
238 | 238 | ||
239 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | ||
240 | "perf buffer not large enough")) | ||
241 | return NULL; | ||
242 | |||
239 | pc = preempt_count(); | 243 | pc = preempt_count(); |
240 | 244 | ||
241 | *rctxp = perf_swevent_get_recursion_context(); | 245 | *rctxp = perf_swevent_get_recursion_context(); |
@@ -266,6 +270,10 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, | |||
266 | struct pt_regs regs; | 270 | struct pt_regs regs; |
267 | int rctx; | 271 | int rctx; |
268 | 272 | ||
273 | head = this_cpu_ptr(event_function.perf_events); | ||
274 | if (hlist_empty(head)) | ||
275 | return; | ||
276 | |||
269 | #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \ | 277 | #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \ |
270 | sizeof(u64)) - sizeof(u32)) | 278 | sizeof(u64)) - sizeof(u32)) |
271 | 279 | ||
@@ -279,8 +287,6 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, | |||
279 | 287 | ||
280 | entry->ip = ip; | 288 | entry->ip = ip; |
281 | entry->parent_ip = parent_ip; | 289 | entry->parent_ip = parent_ip; |
282 | |||
283 | head = this_cpu_ptr(event_function.perf_events); | ||
284 | perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, | 290 | perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, |
285 | 1, ®s, head, NULL); | 291 | 1, ®s, head, NULL); |
286 | 292 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 27963e2bf4bf..29a7ebcfb426 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -41,6 +41,23 @@ static LIST_HEAD(ftrace_common_fields); | |||
41 | static struct kmem_cache *field_cachep; | 41 | static struct kmem_cache *field_cachep; |
42 | static struct kmem_cache *file_cachep; | 42 | static struct kmem_cache *file_cachep; |
43 | 43 | ||
44 | #define SYSTEM_FL_FREE_NAME (1 << 31) | ||
45 | |||
46 | static inline int system_refcount(struct event_subsystem *system) | ||
47 | { | ||
48 | return system->ref_count & ~SYSTEM_FL_FREE_NAME; | ||
49 | } | ||
50 | |||
51 | static int system_refcount_inc(struct event_subsystem *system) | ||
52 | { | ||
53 | return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME; | ||
54 | } | ||
55 | |||
56 | static int system_refcount_dec(struct event_subsystem *system) | ||
57 | { | ||
58 | return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME; | ||
59 | } | ||
60 | |||
44 | /* Double loops, do not use break, only goto's work */ | 61 | /* Double loops, do not use break, only goto's work */ |
45 | #define do_for_each_event_file(tr, file) \ | 62 | #define do_for_each_event_file(tr, file) \ |
46 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ | 63 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ |
@@ -97,7 +114,7 @@ static int __trace_define_field(struct list_head *head, const char *type, | |||
97 | 114 | ||
98 | field = kmem_cache_alloc(field_cachep, GFP_TRACE); | 115 | field = kmem_cache_alloc(field_cachep, GFP_TRACE); |
99 | if (!field) | 116 | if (!field) |
100 | goto err; | 117 | return -ENOMEM; |
101 | 118 | ||
102 | field->name = name; | 119 | field->name = name; |
103 | field->type = type; | 120 | field->type = type; |
@@ -114,11 +131,6 @@ static int __trace_define_field(struct list_head *head, const char *type, | |||
114 | list_add(&field->link, head); | 131 | list_add(&field->link, head); |
115 | 132 | ||
116 | return 0; | 133 | return 0; |
117 | |||
118 | err: | ||
119 | kmem_cache_free(field_cachep, field); | ||
120 | |||
121 | return -ENOMEM; | ||
122 | } | 134 | } |
123 | 135 | ||
124 | int trace_define_field(struct ftrace_event_call *call, const char *type, | 136 | int trace_define_field(struct ftrace_event_call *call, const char *type, |
@@ -279,9 +291,11 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, | |||
279 | } | 291 | } |
280 | call->class->reg(call, TRACE_REG_UNREGISTER, file); | 292 | call->class->reg(call, TRACE_REG_UNREGISTER, file); |
281 | } | 293 | } |
282 | /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT */ | 294 | /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ |
283 | if (file->flags & FTRACE_EVENT_FL_SOFT_MODE) | 295 | if (file->flags & FTRACE_EVENT_FL_SOFT_MODE) |
284 | set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); | 296 | set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); |
297 | else | ||
298 | clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); | ||
285 | break; | 299 | break; |
286 | case 1: | 300 | case 1: |
287 | /* | 301 | /* |
@@ -349,8 +363,8 @@ static void __put_system(struct event_subsystem *system) | |||
349 | { | 363 | { |
350 | struct event_filter *filter = system->filter; | 364 | struct event_filter *filter = system->filter; |
351 | 365 | ||
352 | WARN_ON_ONCE(system->ref_count == 0); | 366 | WARN_ON_ONCE(system_refcount(system) == 0); |
353 | if (--system->ref_count) | 367 | if (system_refcount_dec(system)) |
354 | return; | 368 | return; |
355 | 369 | ||
356 | list_del(&system->list); | 370 | list_del(&system->list); |
@@ -359,13 +373,15 @@ static void __put_system(struct event_subsystem *system) | |||
359 | kfree(filter->filter_string); | 373 | kfree(filter->filter_string); |
360 | kfree(filter); | 374 | kfree(filter); |
361 | } | 375 | } |
376 | if (system->ref_count & SYSTEM_FL_FREE_NAME) | ||
377 | kfree(system->name); | ||
362 | kfree(system); | 378 | kfree(system); |
363 | } | 379 | } |
364 | 380 | ||
365 | static void __get_system(struct event_subsystem *system) | 381 | static void __get_system(struct event_subsystem *system) |
366 | { | 382 | { |
367 | WARN_ON_ONCE(system->ref_count == 0); | 383 | WARN_ON_ONCE(system_refcount(system) == 0); |
368 | system->ref_count++; | 384 | system_refcount_inc(system); |
369 | } | 385 | } |
370 | 386 | ||
371 | static void __get_system_dir(struct ftrace_subsystem_dir *dir) | 387 | static void __get_system_dir(struct ftrace_subsystem_dir *dir) |
@@ -379,7 +395,7 @@ static void __put_system_dir(struct ftrace_subsystem_dir *dir) | |||
379 | { | 395 | { |
380 | WARN_ON_ONCE(dir->ref_count == 0); | 396 | WARN_ON_ONCE(dir->ref_count == 0); |
381 | /* If the subsystem is about to be freed, the dir must be too */ | 397 | /* If the subsystem is about to be freed, the dir must be too */ |
382 | WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1); | 398 | WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1); |
383 | 399 | ||
384 | __put_system(dir->subsystem); | 400 | __put_system(dir->subsystem); |
385 | if (!--dir->ref_count) | 401 | if (!--dir->ref_count) |
@@ -393,17 +409,55 @@ static void put_system(struct ftrace_subsystem_dir *dir) | |||
393 | mutex_unlock(&event_mutex); | 409 | mutex_unlock(&event_mutex); |
394 | } | 410 | } |
395 | 411 | ||
412 | static void remove_subsystem(struct ftrace_subsystem_dir *dir) | ||
413 | { | ||
414 | if (!dir) | ||
415 | return; | ||
416 | |||
417 | if (!--dir->nr_events) { | ||
418 | debugfs_remove_recursive(dir->entry); | ||
419 | list_del(&dir->list); | ||
420 | __put_system_dir(dir); | ||
421 | } | ||
422 | } | ||
423 | |||
424 | static void *event_file_data(struct file *filp) | ||
425 | { | ||
426 | return ACCESS_ONCE(file_inode(filp)->i_private); | ||
427 | } | ||
428 | |||
429 | static void remove_event_file_dir(struct ftrace_event_file *file) | ||
430 | { | ||
431 | struct dentry *dir = file->dir; | ||
432 | struct dentry *child; | ||
433 | |||
434 | if (dir) { | ||
435 | spin_lock(&dir->d_lock); /* probably unneeded */ | ||
436 | list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) { | ||
437 | if (child->d_inode) /* probably unneeded */ | ||
438 | child->d_inode->i_private = NULL; | ||
439 | } | ||
440 | spin_unlock(&dir->d_lock); | ||
441 | |||
442 | debugfs_remove_recursive(dir); | ||
443 | } | ||
444 | |||
445 | list_del(&file->list); | ||
446 | remove_subsystem(file->system); | ||
447 | kmem_cache_free(file_cachep, file); | ||
448 | } | ||
449 | |||
396 | /* | 450 | /* |
397 | * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. | 451 | * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. |
398 | */ | 452 | */ |
399 | static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, | 453 | static int |
400 | const char *sub, const char *event, int set) | 454 | __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, |
455 | const char *sub, const char *event, int set) | ||
401 | { | 456 | { |
402 | struct ftrace_event_file *file; | 457 | struct ftrace_event_file *file; |
403 | struct ftrace_event_call *call; | 458 | struct ftrace_event_call *call; |
404 | int ret = -EINVAL; | 459 | int ret = -EINVAL; |
405 | 460 | ||
406 | mutex_lock(&event_mutex); | ||
407 | list_for_each_entry(file, &tr->events, list) { | 461 | list_for_each_entry(file, &tr->events, list) { |
408 | 462 | ||
409 | call = file->event_call; | 463 | call = file->event_call; |
@@ -429,6 +483,17 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, | |||
429 | 483 | ||
430 | ret = 0; | 484 | ret = 0; |
431 | } | 485 | } |
486 | |||
487 | return ret; | ||
488 | } | ||
489 | |||
490 | static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, | ||
491 | const char *sub, const char *event, int set) | ||
492 | { | ||
493 | int ret; | ||
494 | |||
495 | mutex_lock(&event_mutex); | ||
496 | ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set); | ||
432 | mutex_unlock(&event_mutex); | 497 | mutex_unlock(&event_mutex); |
433 | 498 | ||
434 | return ret; | 499 | return ret; |
@@ -623,18 +688,28 @@ static ssize_t | |||
623 | event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | 688 | event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, |
624 | loff_t *ppos) | 689 | loff_t *ppos) |
625 | { | 690 | { |
626 | struct ftrace_event_file *file = filp->private_data; | 691 | struct ftrace_event_file *file; |
627 | char *buf; | 692 | unsigned long flags; |
693 | char buf[4] = "0"; | ||
628 | 694 | ||
629 | if (file->flags & FTRACE_EVENT_FL_ENABLED) { | 695 | mutex_lock(&event_mutex); |
630 | if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED) | 696 | file = event_file_data(filp); |
631 | buf = "0*\n"; | 697 | if (likely(file)) |
632 | else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE) | 698 | flags = file->flags; |
633 | buf = "1*\n"; | 699 | mutex_unlock(&event_mutex); |
634 | else | 700 | |
635 | buf = "1\n"; | 701 | if (!file) |
636 | } else | 702 | return -ENODEV; |
637 | buf = "0\n"; | 703 | |
704 | if (flags & FTRACE_EVENT_FL_ENABLED && | ||
705 | !(flags & FTRACE_EVENT_FL_SOFT_DISABLED)) | ||
706 | strcpy(buf, "1"); | ||
707 | |||
708 | if (flags & FTRACE_EVENT_FL_SOFT_DISABLED || | ||
709 | flags & FTRACE_EVENT_FL_SOFT_MODE) | ||
710 | strcat(buf, "*"); | ||
711 | |||
712 | strcat(buf, "\n"); | ||
638 | 713 | ||
639 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf)); | 714 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf)); |
640 | } | 715 | } |
@@ -643,13 +718,10 @@ static ssize_t | |||
643 | event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | 718 | event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, |
644 | loff_t *ppos) | 719 | loff_t *ppos) |
645 | { | 720 | { |
646 | struct ftrace_event_file *file = filp->private_data; | 721 | struct ftrace_event_file *file; |
647 | unsigned long val; | 722 | unsigned long val; |
648 | int ret; | 723 | int ret; |
649 | 724 | ||
650 | if (!file) | ||
651 | return -EINVAL; | ||
652 | |||
653 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | 725 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
654 | if (ret) | 726 | if (ret) |
655 | return ret; | 727 | return ret; |
@@ -661,8 +733,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
661 | switch (val) { | 733 | switch (val) { |
662 | case 0: | 734 | case 0: |
663 | case 1: | 735 | case 1: |
736 | ret = -ENODEV; | ||
664 | mutex_lock(&event_mutex); | 737 | mutex_lock(&event_mutex); |
665 | ret = ftrace_event_enable_disable(file, val); | 738 | file = event_file_data(filp); |
739 | if (likely(file)) | ||
740 | ret = ftrace_event_enable_disable(file, val); | ||
666 | mutex_unlock(&event_mutex); | 741 | mutex_unlock(&event_mutex); |
667 | break; | 742 | break; |
668 | 743 | ||
@@ -769,65 +844,39 @@ enum { | |||
769 | 844 | ||
770 | static void *f_next(struct seq_file *m, void *v, loff_t *pos) | 845 | static void *f_next(struct seq_file *m, void *v, loff_t *pos) |
771 | { | 846 | { |
772 | struct ftrace_event_call *call = m->private; | 847 | struct ftrace_event_call *call = event_file_data(m->private); |
773 | struct ftrace_event_field *field; | ||
774 | struct list_head *common_head = &ftrace_common_fields; | 848 | struct list_head *common_head = &ftrace_common_fields; |
775 | struct list_head *head = trace_get_fields(call); | 849 | struct list_head *head = trace_get_fields(call); |
850 | struct list_head *node = v; | ||
776 | 851 | ||
777 | (*pos)++; | 852 | (*pos)++; |
778 | 853 | ||
779 | switch ((unsigned long)v) { | 854 | switch ((unsigned long)v) { |
780 | case FORMAT_HEADER: | 855 | case FORMAT_HEADER: |
781 | if (unlikely(list_empty(common_head))) | 856 | node = common_head; |
782 | return NULL; | 857 | break; |
783 | |||
784 | field = list_entry(common_head->prev, | ||
785 | struct ftrace_event_field, link); | ||
786 | return field; | ||
787 | 858 | ||
788 | case FORMAT_FIELD_SEPERATOR: | 859 | case FORMAT_FIELD_SEPERATOR: |
789 | if (unlikely(list_empty(head))) | 860 | node = head; |
790 | return NULL; | 861 | break; |
791 | |||
792 | field = list_entry(head->prev, struct ftrace_event_field, link); | ||
793 | return field; | ||
794 | 862 | ||
795 | case FORMAT_PRINTFMT: | 863 | case FORMAT_PRINTFMT: |
796 | /* all done */ | 864 | /* all done */ |
797 | return NULL; | 865 | return NULL; |
798 | } | 866 | } |
799 | 867 | ||
800 | field = v; | 868 | node = node->prev; |
801 | if (field->link.prev == common_head) | 869 | if (node == common_head) |
802 | return (void *)FORMAT_FIELD_SEPERATOR; | 870 | return (void *)FORMAT_FIELD_SEPERATOR; |
803 | else if (field->link.prev == head) | 871 | else if (node == head) |
804 | return (void *)FORMAT_PRINTFMT; | 872 | return (void *)FORMAT_PRINTFMT; |
805 | 873 | else | |
806 | field = list_entry(field->link.prev, struct ftrace_event_field, link); | 874 | return node; |
807 | |||
808 | return field; | ||
809 | } | ||
810 | |||
811 | static void *f_start(struct seq_file *m, loff_t *pos) | ||
812 | { | ||
813 | loff_t l = 0; | ||
814 | void *p; | ||
815 | |||
816 | /* Start by showing the header */ | ||
817 | if (!*pos) | ||
818 | return (void *)FORMAT_HEADER; | ||
819 | |||
820 | p = (void *)FORMAT_HEADER; | ||
821 | do { | ||
822 | p = f_next(m, p, &l); | ||
823 | } while (p && l < *pos); | ||
824 | |||
825 | return p; | ||
826 | } | 875 | } |
827 | 876 | ||
828 | static int f_show(struct seq_file *m, void *v) | 877 | static int f_show(struct seq_file *m, void *v) |
829 | { | 878 | { |
830 | struct ftrace_event_call *call = m->private; | 879 | struct ftrace_event_call *call = event_file_data(m->private); |
831 | struct ftrace_event_field *field; | 880 | struct ftrace_event_field *field; |
832 | const char *array_descriptor; | 881 | const char *array_descriptor; |
833 | 882 | ||
@@ -848,8 +897,7 @@ static int f_show(struct seq_file *m, void *v) | |||
848 | return 0; | 897 | return 0; |
849 | } | 898 | } |
850 | 899 | ||
851 | field = v; | 900 | field = list_entry(v, struct ftrace_event_field, link); |
852 | |||
853 | /* | 901 | /* |
854 | * Smartly shows the array type(except dynamic array). | 902 | * Smartly shows the array type(except dynamic array). |
855 | * Normal: | 903 | * Normal: |
@@ -876,8 +924,25 @@ static int f_show(struct seq_file *m, void *v) | |||
876 | return 0; | 924 | return 0; |
877 | } | 925 | } |
878 | 926 | ||
927 | static void *f_start(struct seq_file *m, loff_t *pos) | ||
928 | { | ||
929 | void *p = (void *)FORMAT_HEADER; | ||
930 | loff_t l = 0; | ||
931 | |||
932 | /* ->stop() is called even if ->start() fails */ | ||
933 | mutex_lock(&event_mutex); | ||
934 | if (!event_file_data(m->private)) | ||
935 | return ERR_PTR(-ENODEV); | ||
936 | |||
937 | while (l < *pos && p) | ||
938 | p = f_next(m, p, &l); | ||
939 | |||
940 | return p; | ||
941 | } | ||
942 | |||
879 | static void f_stop(struct seq_file *m, void *p) | 943 | static void f_stop(struct seq_file *m, void *p) |
880 | { | 944 | { |
945 | mutex_unlock(&event_mutex); | ||
881 | } | 946 | } |
882 | 947 | ||
883 | static const struct seq_operations trace_format_seq_ops = { | 948 | static const struct seq_operations trace_format_seq_ops = { |
@@ -889,7 +954,6 @@ static const struct seq_operations trace_format_seq_ops = { | |||
889 | 954 | ||
890 | static int trace_format_open(struct inode *inode, struct file *file) | 955 | static int trace_format_open(struct inode *inode, struct file *file) |
891 | { | 956 | { |
892 | struct ftrace_event_call *call = inode->i_private; | ||
893 | struct seq_file *m; | 957 | struct seq_file *m; |
894 | int ret; | 958 | int ret; |
895 | 959 | ||
@@ -898,7 +962,7 @@ static int trace_format_open(struct inode *inode, struct file *file) | |||
898 | return ret; | 962 | return ret; |
899 | 963 | ||
900 | m = file->private_data; | 964 | m = file->private_data; |
901 | m->private = call; | 965 | m->private = file; |
902 | 966 | ||
903 | return 0; | 967 | return 0; |
904 | } | 968 | } |
@@ -906,45 +970,47 @@ static int trace_format_open(struct inode *inode, struct file *file) | |||
906 | static ssize_t | 970 | static ssize_t |
907 | event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) | 971 | event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) |
908 | { | 972 | { |
909 | struct ftrace_event_call *call = filp->private_data; | 973 | int id = (long)event_file_data(filp); |
910 | struct trace_seq *s; | 974 | char buf[32]; |
911 | int r; | 975 | int len; |
912 | 976 | ||
913 | if (*ppos) | 977 | if (*ppos) |
914 | return 0; | 978 | return 0; |
915 | 979 | ||
916 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 980 | if (unlikely(!id)) |
917 | if (!s) | 981 | return -ENODEV; |
918 | return -ENOMEM; | ||
919 | 982 | ||
920 | trace_seq_init(s); | 983 | len = sprintf(buf, "%d\n", id); |
921 | trace_seq_printf(s, "%d\n", call->event.type); | ||
922 | 984 | ||
923 | r = simple_read_from_buffer(ubuf, cnt, ppos, | 985 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); |
924 | s->buffer, s->len); | ||
925 | kfree(s); | ||
926 | return r; | ||
927 | } | 986 | } |
928 | 987 | ||
929 | static ssize_t | 988 | static ssize_t |
930 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, | 989 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, |
931 | loff_t *ppos) | 990 | loff_t *ppos) |
932 | { | 991 | { |
933 | struct ftrace_event_call *call = filp->private_data; | 992 | struct ftrace_event_call *call; |
934 | struct trace_seq *s; | 993 | struct trace_seq *s; |
935 | int r; | 994 | int r = -ENODEV; |
936 | 995 | ||
937 | if (*ppos) | 996 | if (*ppos) |
938 | return 0; | 997 | return 0; |
939 | 998 | ||
940 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 999 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
1000 | |||
941 | if (!s) | 1001 | if (!s) |
942 | return -ENOMEM; | 1002 | return -ENOMEM; |
943 | 1003 | ||
944 | trace_seq_init(s); | 1004 | trace_seq_init(s); |
945 | 1005 | ||
946 | print_event_filter(call, s); | 1006 | mutex_lock(&event_mutex); |
947 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); | 1007 | call = event_file_data(filp); |
1008 | if (call) | ||
1009 | print_event_filter(call, s); | ||
1010 | mutex_unlock(&event_mutex); | ||
1011 | |||
1012 | if (call) | ||
1013 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); | ||
948 | 1014 | ||
949 | kfree(s); | 1015 | kfree(s); |
950 | 1016 | ||
@@ -955,9 +1021,9 @@ static ssize_t | |||
955 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | 1021 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, |
956 | loff_t *ppos) | 1022 | loff_t *ppos) |
957 | { | 1023 | { |
958 | struct ftrace_event_call *call = filp->private_data; | 1024 | struct ftrace_event_call *call; |
959 | char *buf; | 1025 | char *buf; |
960 | int err; | 1026 | int err = -ENODEV; |
961 | 1027 | ||
962 | if (cnt >= PAGE_SIZE) | 1028 | if (cnt >= PAGE_SIZE) |
963 | return -EINVAL; | 1029 | return -EINVAL; |
@@ -972,7 +1038,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
972 | } | 1038 | } |
973 | buf[cnt] = '\0'; | 1039 | buf[cnt] = '\0'; |
974 | 1040 | ||
975 | err = apply_event_filter(call, buf); | 1041 | mutex_lock(&event_mutex); |
1042 | call = event_file_data(filp); | ||
1043 | if (call) | ||
1044 | err = apply_event_filter(call, buf); | ||
1045 | mutex_unlock(&event_mutex); | ||
1046 | |||
976 | free_page((unsigned long) buf); | 1047 | free_page((unsigned long) buf); |
977 | if (err < 0) | 1048 | if (err < 0) |
978 | return err; | 1049 | return err; |
@@ -992,6 +1063,7 @@ static int subsystem_open(struct inode *inode, struct file *filp) | |||
992 | int ret; | 1063 | int ret; |
993 | 1064 | ||
994 | /* Make sure the system still exists */ | 1065 | /* Make sure the system still exists */ |
1066 | mutex_lock(&trace_types_lock); | ||
995 | mutex_lock(&event_mutex); | 1067 | mutex_lock(&event_mutex); |
996 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | 1068 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
997 | list_for_each_entry(dir, &tr->systems, list) { | 1069 | list_for_each_entry(dir, &tr->systems, list) { |
@@ -1007,6 +1079,7 @@ static int subsystem_open(struct inode *inode, struct file *filp) | |||
1007 | } | 1079 | } |
1008 | exit_loop: | 1080 | exit_loop: |
1009 | mutex_unlock(&event_mutex); | 1081 | mutex_unlock(&event_mutex); |
1082 | mutex_unlock(&trace_types_lock); | ||
1010 | 1083 | ||
1011 | if (!system) | 1084 | if (!system) |
1012 | return -ENODEV; | 1085 | return -ENODEV; |
@@ -1014,9 +1087,17 @@ static int subsystem_open(struct inode *inode, struct file *filp) | |||
1014 | /* Some versions of gcc think dir can be uninitialized here */ | 1087 | /* Some versions of gcc think dir can be uninitialized here */ |
1015 | WARN_ON(!dir); | 1088 | WARN_ON(!dir); |
1016 | 1089 | ||
1090 | /* Still need to increment the ref count of the system */ | ||
1091 | if (trace_array_get(tr) < 0) { | ||
1092 | put_system(dir); | ||
1093 | return -ENODEV; | ||
1094 | } | ||
1095 | |||
1017 | ret = tracing_open_generic(inode, filp); | 1096 | ret = tracing_open_generic(inode, filp); |
1018 | if (ret < 0) | 1097 | if (ret < 0) { |
1098 | trace_array_put(tr); | ||
1019 | put_system(dir); | 1099 | put_system(dir); |
1100 | } | ||
1020 | 1101 | ||
1021 | return ret; | 1102 | return ret; |
1022 | } | 1103 | } |
@@ -1027,16 +1108,23 @@ static int system_tr_open(struct inode *inode, struct file *filp) | |||
1027 | struct trace_array *tr = inode->i_private; | 1108 | struct trace_array *tr = inode->i_private; |
1028 | int ret; | 1109 | int ret; |
1029 | 1110 | ||
1111 | if (trace_array_get(tr) < 0) | ||
1112 | return -ENODEV; | ||
1113 | |||
1030 | /* Make a temporary dir that has no system but points to tr */ | 1114 | /* Make a temporary dir that has no system but points to tr */ |
1031 | dir = kzalloc(sizeof(*dir), GFP_KERNEL); | 1115 | dir = kzalloc(sizeof(*dir), GFP_KERNEL); |
1032 | if (!dir) | 1116 | if (!dir) { |
1117 | trace_array_put(tr); | ||
1033 | return -ENOMEM; | 1118 | return -ENOMEM; |
1119 | } | ||
1034 | 1120 | ||
1035 | dir->tr = tr; | 1121 | dir->tr = tr; |
1036 | 1122 | ||
1037 | ret = tracing_open_generic(inode, filp); | 1123 | ret = tracing_open_generic(inode, filp); |
1038 | if (ret < 0) | 1124 | if (ret < 0) { |
1125 | trace_array_put(tr); | ||
1039 | kfree(dir); | 1126 | kfree(dir); |
1127 | } | ||
1040 | 1128 | ||
1041 | filp->private_data = dir; | 1129 | filp->private_data = dir; |
1042 | 1130 | ||
@@ -1047,6 +1135,8 @@ static int subsystem_release(struct inode *inode, struct file *file) | |||
1047 | { | 1135 | { |
1048 | struct ftrace_subsystem_dir *dir = file->private_data; | 1136 | struct ftrace_subsystem_dir *dir = file->private_data; |
1049 | 1137 | ||
1138 | trace_array_put(dir->tr); | ||
1139 | |||
1050 | /* | 1140 | /* |
1051 | * If dir->subsystem is NULL, then this is a temporary | 1141 | * If dir->subsystem is NULL, then this is a temporary |
1052 | * descriptor that was made for a trace_array to enable | 1142 | * descriptor that was made for a trace_array to enable |
@@ -1143,6 +1233,7 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) | |||
1143 | 1233 | ||
1144 | static int ftrace_event_avail_open(struct inode *inode, struct file *file); | 1234 | static int ftrace_event_avail_open(struct inode *inode, struct file *file); |
1145 | static int ftrace_event_set_open(struct inode *inode, struct file *file); | 1235 | static int ftrace_event_set_open(struct inode *inode, struct file *file); |
1236 | static int ftrace_event_release(struct inode *inode, struct file *file); | ||
1146 | 1237 | ||
1147 | static const struct seq_operations show_event_seq_ops = { | 1238 | static const struct seq_operations show_event_seq_ops = { |
1148 | .start = t_start, | 1239 | .start = t_start, |
@@ -1170,7 +1261,7 @@ static const struct file_operations ftrace_set_event_fops = { | |||
1170 | .read = seq_read, | 1261 | .read = seq_read, |
1171 | .write = ftrace_event_write, | 1262 | .write = ftrace_event_write, |
1172 | .llseek = seq_lseek, | 1263 | .llseek = seq_lseek, |
1173 | .release = seq_release, | 1264 | .release = ftrace_event_release, |
1174 | }; | 1265 | }; |
1175 | 1266 | ||
1176 | static const struct file_operations ftrace_enable_fops = { | 1267 | static const struct file_operations ftrace_enable_fops = { |
@@ -1188,7 +1279,6 @@ static const struct file_operations ftrace_event_format_fops = { | |||
1188 | }; | 1279 | }; |
1189 | 1280 | ||
1190 | static const struct file_operations ftrace_event_id_fops = { | 1281 | static const struct file_operations ftrace_event_id_fops = { |
1191 | .open = tracing_open_generic, | ||
1192 | .read = event_id_read, | 1282 | .read = event_id_read, |
1193 | .llseek = default_llseek, | 1283 | .llseek = default_llseek, |
1194 | }; | 1284 | }; |
@@ -1247,6 +1337,15 @@ ftrace_event_open(struct inode *inode, struct file *file, | |||
1247 | return ret; | 1337 | return ret; |
1248 | } | 1338 | } |
1249 | 1339 | ||
1340 | static int ftrace_event_release(struct inode *inode, struct file *file) | ||
1341 | { | ||
1342 | struct trace_array *tr = inode->i_private; | ||
1343 | |||
1344 | trace_array_put(tr); | ||
1345 | |||
1346 | return seq_release(inode, file); | ||
1347 | } | ||
1348 | |||
1250 | static int | 1349 | static int |
1251 | ftrace_event_avail_open(struct inode *inode, struct file *file) | 1350 | ftrace_event_avail_open(struct inode *inode, struct file *file) |
1252 | { | 1351 | { |
@@ -1260,12 +1359,19 @@ ftrace_event_set_open(struct inode *inode, struct file *file) | |||
1260 | { | 1359 | { |
1261 | const struct seq_operations *seq_ops = &show_set_event_seq_ops; | 1360 | const struct seq_operations *seq_ops = &show_set_event_seq_ops; |
1262 | struct trace_array *tr = inode->i_private; | 1361 | struct trace_array *tr = inode->i_private; |
1362 | int ret; | ||
1363 | |||
1364 | if (trace_array_get(tr) < 0) | ||
1365 | return -ENODEV; | ||
1263 | 1366 | ||
1264 | if ((file->f_mode & FMODE_WRITE) && | 1367 | if ((file->f_mode & FMODE_WRITE) && |
1265 | (file->f_flags & O_TRUNC)) | 1368 | (file->f_flags & O_TRUNC)) |
1266 | ftrace_clear_events(tr); | 1369 | ftrace_clear_events(tr); |
1267 | 1370 | ||
1268 | return ftrace_event_open(inode, file, seq_ops); | 1371 | ret = ftrace_event_open(inode, file, seq_ops); |
1372 | if (ret < 0) | ||
1373 | trace_array_put(tr); | ||
1374 | return ret; | ||
1269 | } | 1375 | } |
1270 | 1376 | ||
1271 | static struct event_subsystem * | 1377 | static struct event_subsystem * |
@@ -1279,7 +1385,15 @@ create_new_subsystem(const char *name) | |||
1279 | return NULL; | 1385 | return NULL; |
1280 | 1386 | ||
1281 | system->ref_count = 1; | 1387 | system->ref_count = 1; |
1282 | system->name = name; | 1388 | |
1389 | /* Only allocate if dynamic (kprobes and modules) */ | ||
1390 | if (!core_kernel_data((unsigned long)name)) { | ||
1391 | system->ref_count |= SYSTEM_FL_FREE_NAME; | ||
1392 | system->name = kstrdup(name, GFP_KERNEL); | ||
1393 | if (!system->name) | ||
1394 | goto out_free; | ||
1395 | } else | ||
1396 | system->name = name; | ||
1283 | 1397 | ||
1284 | system->filter = NULL; | 1398 | system->filter = NULL; |
1285 | 1399 | ||
@@ -1292,6 +1406,8 @@ create_new_subsystem(const char *name) | |||
1292 | return system; | 1406 | return system; |
1293 | 1407 | ||
1294 | out_free: | 1408 | out_free: |
1409 | if (system->ref_count & SYSTEM_FL_FREE_NAME) | ||
1410 | kfree(system->name); | ||
1295 | kfree(system); | 1411 | kfree(system); |
1296 | return NULL; | 1412 | return NULL; |
1297 | } | 1413 | } |
@@ -1410,8 +1526,8 @@ event_create_dir(struct dentry *parent, | |||
1410 | 1526 | ||
1411 | #ifdef CONFIG_PERF_EVENTS | 1527 | #ifdef CONFIG_PERF_EVENTS |
1412 | if (call->event.type && call->class->reg) | 1528 | if (call->event.type && call->class->reg) |
1413 | trace_create_file("id", 0444, file->dir, call, | 1529 | trace_create_file("id", 0444, file->dir, |
1414 | id); | 1530 | (void *)(long)call->event.type, id); |
1415 | #endif | 1531 | #endif |
1416 | 1532 | ||
1417 | /* | 1533 | /* |
@@ -1436,33 +1552,16 @@ event_create_dir(struct dentry *parent, | |||
1436 | return 0; | 1552 | return 0; |
1437 | } | 1553 | } |
1438 | 1554 | ||
1439 | static void remove_subsystem(struct ftrace_subsystem_dir *dir) | ||
1440 | { | ||
1441 | if (!dir) | ||
1442 | return; | ||
1443 | |||
1444 | if (!--dir->nr_events) { | ||
1445 | debugfs_remove_recursive(dir->entry); | ||
1446 | list_del(&dir->list); | ||
1447 | __put_system_dir(dir); | ||
1448 | } | ||
1449 | } | ||
1450 | |||
1451 | static void remove_event_from_tracers(struct ftrace_event_call *call) | 1555 | static void remove_event_from_tracers(struct ftrace_event_call *call) |
1452 | { | 1556 | { |
1453 | struct ftrace_event_file *file; | 1557 | struct ftrace_event_file *file; |
1454 | struct trace_array *tr; | 1558 | struct trace_array *tr; |
1455 | 1559 | ||
1456 | do_for_each_event_file_safe(tr, file) { | 1560 | do_for_each_event_file_safe(tr, file) { |
1457 | |||
1458 | if (file->event_call != call) | 1561 | if (file->event_call != call) |
1459 | continue; | 1562 | continue; |
1460 | 1563 | ||
1461 | list_del(&file->list); | 1564 | remove_event_file_dir(file); |
1462 | debugfs_remove_recursive(file->dir); | ||
1463 | remove_subsystem(file->system); | ||
1464 | kmem_cache_free(file_cachep, file); | ||
1465 | |||
1466 | /* | 1565 | /* |
1467 | * The do_for_each_event_file_safe() is | 1566 | * The do_for_each_event_file_safe() is |
1468 | * a double loop. After finding the call for this | 1567 | * a double loop. After finding the call for this |
@@ -1591,6 +1690,7 @@ static void __add_event_to_tracers(struct ftrace_event_call *call, | |||
1591 | int trace_add_event_call(struct ftrace_event_call *call) | 1690 | int trace_add_event_call(struct ftrace_event_call *call) |
1592 | { | 1691 | { |
1593 | int ret; | 1692 | int ret; |
1693 | mutex_lock(&trace_types_lock); | ||
1594 | mutex_lock(&event_mutex); | 1694 | mutex_lock(&event_mutex); |
1595 | 1695 | ||
1596 | ret = __register_event(call, NULL); | 1696 | ret = __register_event(call, NULL); |
@@ -1598,11 +1698,13 @@ int trace_add_event_call(struct ftrace_event_call *call) | |||
1598 | __add_event_to_tracers(call, NULL); | 1698 | __add_event_to_tracers(call, NULL); |
1599 | 1699 | ||
1600 | mutex_unlock(&event_mutex); | 1700 | mutex_unlock(&event_mutex); |
1701 | mutex_unlock(&trace_types_lock); | ||
1601 | return ret; | 1702 | return ret; |
1602 | } | 1703 | } |
1603 | 1704 | ||
1604 | /* | 1705 | /* |
1605 | * Must be called under locking both of event_mutex and trace_event_sem. | 1706 | * Must be called under locking of trace_types_lock, event_mutex and |
1707 | * trace_event_sem. | ||
1606 | */ | 1708 | */ |
1607 | static void __trace_remove_event_call(struct ftrace_event_call *call) | 1709 | static void __trace_remove_event_call(struct ftrace_event_call *call) |
1608 | { | 1710 | { |
@@ -1611,14 +1713,53 @@ static void __trace_remove_event_call(struct ftrace_event_call *call) | |||
1611 | destroy_preds(call); | 1713 | destroy_preds(call); |
1612 | } | 1714 | } |
1613 | 1715 | ||
1716 | static int probe_remove_event_call(struct ftrace_event_call *call) | ||
1717 | { | ||
1718 | struct trace_array *tr; | ||
1719 | struct ftrace_event_file *file; | ||
1720 | |||
1721 | #ifdef CONFIG_PERF_EVENTS | ||
1722 | if (call->perf_refcount) | ||
1723 | return -EBUSY; | ||
1724 | #endif | ||
1725 | do_for_each_event_file(tr, file) { | ||
1726 | if (file->event_call != call) | ||
1727 | continue; | ||
1728 | /* | ||
1729 | * We can't rely on ftrace_event_enable_disable(enable => 0) | ||
1730 | * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress | ||
1731 | * TRACE_REG_UNREGISTER. | ||
1732 | */ | ||
1733 | if (file->flags & FTRACE_EVENT_FL_ENABLED) | ||
1734 | return -EBUSY; | ||
1735 | /* | ||
1736 | * The do_for_each_event_file_safe() is | ||
1737 | * a double loop. After finding the call for this | ||
1738 | * trace_array, we use break to jump to the next | ||
1739 | * trace_array. | ||
1740 | */ | ||
1741 | break; | ||
1742 | } while_for_each_event_file(); | ||
1743 | |||
1744 | __trace_remove_event_call(call); | ||
1745 | |||
1746 | return 0; | ||
1747 | } | ||
1748 | |||
1614 | /* Remove an event_call */ | 1749 | /* Remove an event_call */ |
1615 | void trace_remove_event_call(struct ftrace_event_call *call) | 1750 | int trace_remove_event_call(struct ftrace_event_call *call) |
1616 | { | 1751 | { |
1752 | int ret; | ||
1753 | |||
1754 | mutex_lock(&trace_types_lock); | ||
1617 | mutex_lock(&event_mutex); | 1755 | mutex_lock(&event_mutex); |
1618 | down_write(&trace_event_sem); | 1756 | down_write(&trace_event_sem); |
1619 | __trace_remove_event_call(call); | 1757 | ret = probe_remove_event_call(call); |
1620 | up_write(&trace_event_sem); | 1758 | up_write(&trace_event_sem); |
1621 | mutex_unlock(&event_mutex); | 1759 | mutex_unlock(&event_mutex); |
1760 | mutex_unlock(&trace_types_lock); | ||
1761 | |||
1762 | return ret; | ||
1622 | } | 1763 | } |
1623 | 1764 | ||
1624 | #define for_each_event(event, start, end) \ | 1765 | #define for_each_event(event, start, end) \ |
@@ -1762,6 +1903,7 @@ static int trace_module_notify(struct notifier_block *self, | |||
1762 | { | 1903 | { |
1763 | struct module *mod = data; | 1904 | struct module *mod = data; |
1764 | 1905 | ||
1906 | mutex_lock(&trace_types_lock); | ||
1765 | mutex_lock(&event_mutex); | 1907 | mutex_lock(&event_mutex); |
1766 | switch (val) { | 1908 | switch (val) { |
1767 | case MODULE_STATE_COMING: | 1909 | case MODULE_STATE_COMING: |
@@ -1772,6 +1914,7 @@ static int trace_module_notify(struct notifier_block *self, | |||
1772 | break; | 1914 | break; |
1773 | } | 1915 | } |
1774 | mutex_unlock(&event_mutex); | 1916 | mutex_unlock(&event_mutex); |
1917 | mutex_unlock(&trace_types_lock); | ||
1775 | 1918 | ||
1776 | return 0; | 1919 | return 0; |
1777 | } | 1920 | } |
@@ -2011,10 +2154,7 @@ event_enable_func(struct ftrace_hash *hash, | |||
2011 | int ret; | 2154 | int ret; |
2012 | 2155 | ||
2013 | /* hash funcs only work with set_ftrace_filter */ | 2156 | /* hash funcs only work with set_ftrace_filter */ |
2014 | if (!enabled) | 2157 | if (!enabled || !param) |
2015 | return -EINVAL; | ||
2016 | |||
2017 | if (!param) | ||
2018 | return -EINVAL; | 2158 | return -EINVAL; |
2019 | 2159 | ||
2020 | system = strsep(¶m, ":"); | 2160 | system = strsep(¶m, ":"); |
@@ -2188,12 +2328,8 @@ __trace_remove_event_dirs(struct trace_array *tr) | |||
2188 | { | 2328 | { |
2189 | struct ftrace_event_file *file, *next; | 2329 | struct ftrace_event_file *file, *next; |
2190 | 2330 | ||
2191 | list_for_each_entry_safe(file, next, &tr->events, list) { | 2331 | list_for_each_entry_safe(file, next, &tr->events, list) |
2192 | list_del(&file->list); | 2332 | remove_event_file_dir(file); |
2193 | debugfs_remove_recursive(file->dir); | ||
2194 | remove_subsystem(file->system); | ||
2195 | kmem_cache_free(file_cachep, file); | ||
2196 | } | ||
2197 | } | 2333 | } |
2198 | 2334 | ||
2199 | static void | 2335 | static void |
@@ -2329,11 +2465,11 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr) | |||
2329 | 2465 | ||
2330 | int event_trace_del_tracer(struct trace_array *tr) | 2466 | int event_trace_del_tracer(struct trace_array *tr) |
2331 | { | 2467 | { |
2332 | /* Disable any running events */ | ||
2333 | __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); | ||
2334 | |||
2335 | mutex_lock(&event_mutex); | 2468 | mutex_lock(&event_mutex); |
2336 | 2469 | ||
2470 | /* Disable any running events */ | ||
2471 | __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); | ||
2472 | |||
2337 | down_write(&trace_event_sem); | 2473 | down_write(&trace_event_sem); |
2338 | __trace_remove_event_dirs(tr); | 2474 | __trace_remove_event_dirs(tr); |
2339 | debugfs_remove_recursive(tr->event_dir); | 2475 | debugfs_remove_recursive(tr->event_dir); |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index e1b653f7e1ca..97daa8cf958d 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -44,6 +44,7 @@ enum filter_op_ids | |||
44 | OP_LE, | 44 | OP_LE, |
45 | OP_GT, | 45 | OP_GT, |
46 | OP_GE, | 46 | OP_GE, |
47 | OP_BAND, | ||
47 | OP_NONE, | 48 | OP_NONE, |
48 | OP_OPEN_PAREN, | 49 | OP_OPEN_PAREN, |
49 | }; | 50 | }; |
@@ -54,6 +55,7 @@ struct filter_op { | |||
54 | int precedence; | 55 | int precedence; |
55 | }; | 56 | }; |
56 | 57 | ||
58 | /* Order must be the same as enum filter_op_ids above */ | ||
57 | static struct filter_op filter_ops[] = { | 59 | static struct filter_op filter_ops[] = { |
58 | { OP_OR, "||", 1 }, | 60 | { OP_OR, "||", 1 }, |
59 | { OP_AND, "&&", 2 }, | 61 | { OP_AND, "&&", 2 }, |
@@ -64,6 +66,7 @@ static struct filter_op filter_ops[] = { | |||
64 | { OP_LE, "<=", 5 }, | 66 | { OP_LE, "<=", 5 }, |
65 | { OP_GT, ">", 5 }, | 67 | { OP_GT, ">", 5 }, |
66 | { OP_GE, ">=", 5 }, | 68 | { OP_GE, ">=", 5 }, |
69 | { OP_BAND, "&", 6 }, | ||
67 | { OP_NONE, "OP_NONE", 0 }, | 70 | { OP_NONE, "OP_NONE", 0 }, |
68 | { OP_OPEN_PAREN, "(", 0 }, | 71 | { OP_OPEN_PAREN, "(", 0 }, |
69 | }; | 72 | }; |
@@ -156,6 +159,9 @@ static int filter_pred_##type(struct filter_pred *pred, void *event) \ | |||
156 | case OP_GE: \ | 159 | case OP_GE: \ |
157 | match = (*addr >= val); \ | 160 | match = (*addr >= val); \ |
158 | break; \ | 161 | break; \ |
162 | case OP_BAND: \ | ||
163 | match = (*addr & val); \ | ||
164 | break; \ | ||
159 | default: \ | 165 | default: \ |
160 | break; \ | 166 | break; \ |
161 | } \ | 167 | } \ |
@@ -631,17 +637,15 @@ static void append_filter_err(struct filter_parse_state *ps, | |||
631 | free_page((unsigned long) buf); | 637 | free_page((unsigned long) buf); |
632 | } | 638 | } |
633 | 639 | ||
640 | /* caller must hold event_mutex */ | ||
634 | void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) | 641 | void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) |
635 | { | 642 | { |
636 | struct event_filter *filter; | 643 | struct event_filter *filter = call->filter; |
637 | 644 | ||
638 | mutex_lock(&event_mutex); | ||
639 | filter = call->filter; | ||
640 | if (filter && filter->filter_string) | 645 | if (filter && filter->filter_string) |
641 | trace_seq_printf(s, "%s\n", filter->filter_string); | 646 | trace_seq_printf(s, "%s\n", filter->filter_string); |
642 | else | 647 | else |
643 | trace_seq_printf(s, "none\n"); | 648 | trace_seq_puts(s, "none\n"); |
644 | mutex_unlock(&event_mutex); | ||
645 | } | 649 | } |
646 | 650 | ||
647 | void print_subsystem_event_filter(struct event_subsystem *system, | 651 | void print_subsystem_event_filter(struct event_subsystem *system, |
@@ -654,7 +658,7 @@ void print_subsystem_event_filter(struct event_subsystem *system, | |||
654 | if (filter && filter->filter_string) | 658 | if (filter && filter->filter_string) |
655 | trace_seq_printf(s, "%s\n", filter->filter_string); | 659 | trace_seq_printf(s, "%s\n", filter->filter_string); |
656 | else | 660 | else |
657 | trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n"); | 661 | trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n"); |
658 | mutex_unlock(&event_mutex); | 662 | mutex_unlock(&event_mutex); |
659 | } | 663 | } |
660 | 664 | ||
@@ -1835,23 +1839,22 @@ static int create_system_filter(struct event_subsystem *system, | |||
1835 | return err; | 1839 | return err; |
1836 | } | 1840 | } |
1837 | 1841 | ||
1842 | /* caller must hold event_mutex */ | ||
1838 | int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | 1843 | int apply_event_filter(struct ftrace_event_call *call, char *filter_string) |
1839 | { | 1844 | { |
1840 | struct event_filter *filter; | 1845 | struct event_filter *filter; |
1841 | int err = 0; | 1846 | int err; |
1842 | |||
1843 | mutex_lock(&event_mutex); | ||
1844 | 1847 | ||
1845 | if (!strcmp(strstrip(filter_string), "0")) { | 1848 | if (!strcmp(strstrip(filter_string), "0")) { |
1846 | filter_disable(call); | 1849 | filter_disable(call); |
1847 | filter = call->filter; | 1850 | filter = call->filter; |
1848 | if (!filter) | 1851 | if (!filter) |
1849 | goto out_unlock; | 1852 | return 0; |
1850 | RCU_INIT_POINTER(call->filter, NULL); | 1853 | RCU_INIT_POINTER(call->filter, NULL); |
1851 | /* Make sure the filter is not being used */ | 1854 | /* Make sure the filter is not being used */ |
1852 | synchronize_sched(); | 1855 | synchronize_sched(); |
1853 | __free_filter(filter); | 1856 | __free_filter(filter); |
1854 | goto out_unlock; | 1857 | return 0; |
1855 | } | 1858 | } |
1856 | 1859 | ||
1857 | err = create_filter(call, filter_string, true, &filter); | 1860 | err = create_filter(call, filter_string, true, &filter); |
@@ -1878,8 +1881,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | |||
1878 | __free_filter(tmp); | 1881 | __free_filter(tmp); |
1879 | } | 1882 | } |
1880 | } | 1883 | } |
1881 | out_unlock: | ||
1882 | mutex_unlock(&event_mutex); | ||
1883 | 1884 | ||
1884 | return err; | 1885 | return err; |
1885 | } | 1886 | } |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index c4d6d7191988..38fe1483c508 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -199,7 +199,7 @@ static int func_set_flag(u32 old_flags, u32 bit, int set) | |||
199 | return 0; | 199 | return 0; |
200 | } | 200 | } |
201 | 201 | ||
202 | static struct tracer function_trace __read_mostly = | 202 | static struct tracer function_trace __tracer_data = |
203 | { | 203 | { |
204 | .name = "function", | 204 | .name = "function", |
205 | .init = function_trace_init, | 205 | .init = function_trace_init, |
@@ -290,6 +290,21 @@ ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) | |||
290 | trace_dump_stack(STACK_SKIP); | 290 | trace_dump_stack(STACK_SKIP); |
291 | } | 291 | } |
292 | 292 | ||
293 | static void | ||
294 | ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data) | ||
295 | { | ||
296 | if (update_count(data)) | ||
297 | ftrace_dump(DUMP_ALL); | ||
298 | } | ||
299 | |||
300 | /* Only dump the current CPU buffer. */ | ||
301 | static void | ||
302 | ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data) | ||
303 | { | ||
304 | if (update_count(data)) | ||
305 | ftrace_dump(DUMP_ORIG); | ||
306 | } | ||
307 | |||
293 | static int | 308 | static int |
294 | ftrace_probe_print(const char *name, struct seq_file *m, | 309 | ftrace_probe_print(const char *name, struct seq_file *m, |
295 | unsigned long ip, void *data) | 310 | unsigned long ip, void *data) |
@@ -327,6 +342,20 @@ ftrace_stacktrace_print(struct seq_file *m, unsigned long ip, | |||
327 | return ftrace_probe_print("stacktrace", m, ip, data); | 342 | return ftrace_probe_print("stacktrace", m, ip, data); |
328 | } | 343 | } |
329 | 344 | ||
345 | static int | ||
346 | ftrace_dump_print(struct seq_file *m, unsigned long ip, | ||
347 | struct ftrace_probe_ops *ops, void *data) | ||
348 | { | ||
349 | return ftrace_probe_print("dump", m, ip, data); | ||
350 | } | ||
351 | |||
352 | static int | ||
353 | ftrace_cpudump_print(struct seq_file *m, unsigned long ip, | ||
354 | struct ftrace_probe_ops *ops, void *data) | ||
355 | { | ||
356 | return ftrace_probe_print("cpudump", m, ip, data); | ||
357 | } | ||
358 | |||
330 | static struct ftrace_probe_ops traceon_count_probe_ops = { | 359 | static struct ftrace_probe_ops traceon_count_probe_ops = { |
331 | .func = ftrace_traceon_count, | 360 | .func = ftrace_traceon_count, |
332 | .print = ftrace_traceon_print, | 361 | .print = ftrace_traceon_print, |
@@ -342,6 +371,16 @@ static struct ftrace_probe_ops stacktrace_count_probe_ops = { | |||
342 | .print = ftrace_stacktrace_print, | 371 | .print = ftrace_stacktrace_print, |
343 | }; | 372 | }; |
344 | 373 | ||
374 | static struct ftrace_probe_ops dump_probe_ops = { | ||
375 | .func = ftrace_dump_probe, | ||
376 | .print = ftrace_dump_print, | ||
377 | }; | ||
378 | |||
379 | static struct ftrace_probe_ops cpudump_probe_ops = { | ||
380 | .func = ftrace_cpudump_probe, | ||
381 | .print = ftrace_cpudump_print, | ||
382 | }; | ||
383 | |||
345 | static struct ftrace_probe_ops traceon_probe_ops = { | 384 | static struct ftrace_probe_ops traceon_probe_ops = { |
346 | .func = ftrace_traceon, | 385 | .func = ftrace_traceon, |
347 | .print = ftrace_traceon_print, | 386 | .print = ftrace_traceon_print, |
@@ -425,6 +464,32 @@ ftrace_stacktrace_callback(struct ftrace_hash *hash, | |||
425 | param, enable); | 464 | param, enable); |
426 | } | 465 | } |
427 | 466 | ||
467 | static int | ||
468 | ftrace_dump_callback(struct ftrace_hash *hash, | ||
469 | char *glob, char *cmd, char *param, int enable) | ||
470 | { | ||
471 | struct ftrace_probe_ops *ops; | ||
472 | |||
473 | ops = &dump_probe_ops; | ||
474 | |||
475 | /* Only dump once. */ | ||
476 | return ftrace_trace_probe_callback(ops, hash, glob, cmd, | ||
477 | "1", enable); | ||
478 | } | ||
479 | |||
480 | static int | ||
481 | ftrace_cpudump_callback(struct ftrace_hash *hash, | ||
482 | char *glob, char *cmd, char *param, int enable) | ||
483 | { | ||
484 | struct ftrace_probe_ops *ops; | ||
485 | |||
486 | ops = &cpudump_probe_ops; | ||
487 | |||
488 | /* Only dump once. */ | ||
489 | return ftrace_trace_probe_callback(ops, hash, glob, cmd, | ||
490 | "1", enable); | ||
491 | } | ||
492 | |||
428 | static struct ftrace_func_command ftrace_traceon_cmd = { | 493 | static struct ftrace_func_command ftrace_traceon_cmd = { |
429 | .name = "traceon", | 494 | .name = "traceon", |
430 | .func = ftrace_trace_onoff_callback, | 495 | .func = ftrace_trace_onoff_callback, |
@@ -440,6 +505,16 @@ static struct ftrace_func_command ftrace_stacktrace_cmd = { | |||
440 | .func = ftrace_stacktrace_callback, | 505 | .func = ftrace_stacktrace_callback, |
441 | }; | 506 | }; |
442 | 507 | ||
508 | static struct ftrace_func_command ftrace_dump_cmd = { | ||
509 | .name = "dump", | ||
510 | .func = ftrace_dump_callback, | ||
511 | }; | ||
512 | |||
513 | static struct ftrace_func_command ftrace_cpudump_cmd = { | ||
514 | .name = "cpudump", | ||
515 | .func = ftrace_cpudump_callback, | ||
516 | }; | ||
517 | |||
443 | static int __init init_func_cmd_traceon(void) | 518 | static int __init init_func_cmd_traceon(void) |
444 | { | 519 | { |
445 | int ret; | 520 | int ret; |
@@ -450,13 +525,31 @@ static int __init init_func_cmd_traceon(void) | |||
450 | 525 | ||
451 | ret = register_ftrace_command(&ftrace_traceon_cmd); | 526 | ret = register_ftrace_command(&ftrace_traceon_cmd); |
452 | if (ret) | 527 | if (ret) |
453 | unregister_ftrace_command(&ftrace_traceoff_cmd); | 528 | goto out_free_traceoff; |
454 | 529 | ||
455 | ret = register_ftrace_command(&ftrace_stacktrace_cmd); | 530 | ret = register_ftrace_command(&ftrace_stacktrace_cmd); |
456 | if (ret) { | 531 | if (ret) |
457 | unregister_ftrace_command(&ftrace_traceoff_cmd); | 532 | goto out_free_traceon; |
458 | unregister_ftrace_command(&ftrace_traceon_cmd); | 533 | |
459 | } | 534 | ret = register_ftrace_command(&ftrace_dump_cmd); |
535 | if (ret) | ||
536 | goto out_free_stacktrace; | ||
537 | |||
538 | ret = register_ftrace_command(&ftrace_cpudump_cmd); | ||
539 | if (ret) | ||
540 | goto out_free_dump; | ||
541 | |||
542 | return 0; | ||
543 | |||
544 | out_free_dump: | ||
545 | unregister_ftrace_command(&ftrace_dump_cmd); | ||
546 | out_free_stacktrace: | ||
547 | unregister_ftrace_command(&ftrace_stacktrace_cmd); | ||
548 | out_free_traceon: | ||
549 | unregister_ftrace_command(&ftrace_traceon_cmd); | ||
550 | out_free_traceoff: | ||
551 | unregister_ftrace_command(&ftrace_traceoff_cmd); | ||
552 | |||
460 | return ret; | 553 | return ret; |
461 | } | 554 | } |
462 | #else | 555 | #else |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 8388bc99f2ee..b5c09242683d 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -446,7 +446,7 @@ print_graph_proc(struct trace_seq *s, pid_t pid) | |||
446 | 446 | ||
447 | /* First spaces to align center */ | 447 | /* First spaces to align center */ |
448 | for (i = 0; i < spaces / 2; i++) { | 448 | for (i = 0; i < spaces / 2; i++) { |
449 | ret = trace_seq_printf(s, " "); | 449 | ret = trace_seq_putc(s, ' '); |
450 | if (!ret) | 450 | if (!ret) |
451 | return TRACE_TYPE_PARTIAL_LINE; | 451 | return TRACE_TYPE_PARTIAL_LINE; |
452 | } | 452 | } |
@@ -457,7 +457,7 @@ print_graph_proc(struct trace_seq *s, pid_t pid) | |||
457 | 457 | ||
458 | /* Last spaces to align center */ | 458 | /* Last spaces to align center */ |
459 | for (i = 0; i < spaces - (spaces / 2); i++) { | 459 | for (i = 0; i < spaces - (spaces / 2); i++) { |
460 | ret = trace_seq_printf(s, " "); | 460 | ret = trace_seq_putc(s, ' '); |
461 | if (!ret) | 461 | if (!ret) |
462 | return TRACE_TYPE_PARTIAL_LINE; | 462 | return TRACE_TYPE_PARTIAL_LINE; |
463 | } | 463 | } |
@@ -503,7 +503,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | |||
503 | ------------------------------------------ | 503 | ------------------------------------------ |
504 | 504 | ||
505 | */ | 505 | */ |
506 | ret = trace_seq_printf(s, | 506 | ret = trace_seq_puts(s, |
507 | " ------------------------------------------\n"); | 507 | " ------------------------------------------\n"); |
508 | if (!ret) | 508 | if (!ret) |
509 | return TRACE_TYPE_PARTIAL_LINE; | 509 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -516,7 +516,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | |||
516 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 516 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
517 | return TRACE_TYPE_PARTIAL_LINE; | 517 | return TRACE_TYPE_PARTIAL_LINE; |
518 | 518 | ||
519 | ret = trace_seq_printf(s, " => "); | 519 | ret = trace_seq_puts(s, " => "); |
520 | if (!ret) | 520 | if (!ret) |
521 | return TRACE_TYPE_PARTIAL_LINE; | 521 | return TRACE_TYPE_PARTIAL_LINE; |
522 | 522 | ||
@@ -524,7 +524,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | |||
524 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 524 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
525 | return TRACE_TYPE_PARTIAL_LINE; | 525 | return TRACE_TYPE_PARTIAL_LINE; |
526 | 526 | ||
527 | ret = trace_seq_printf(s, | 527 | ret = trace_seq_puts(s, |
528 | "\n ------------------------------------------\n\n"); | 528 | "\n ------------------------------------------\n\n"); |
529 | if (!ret) | 529 | if (!ret) |
530 | return TRACE_TYPE_PARTIAL_LINE; | 530 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -645,7 +645,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
645 | ret = print_graph_proc(s, pid); | 645 | ret = print_graph_proc(s, pid); |
646 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 646 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
647 | return TRACE_TYPE_PARTIAL_LINE; | 647 | return TRACE_TYPE_PARTIAL_LINE; |
648 | ret = trace_seq_printf(s, " | "); | 648 | ret = trace_seq_puts(s, " | "); |
649 | if (!ret) | 649 | if (!ret) |
650 | return TRACE_TYPE_PARTIAL_LINE; | 650 | return TRACE_TYPE_PARTIAL_LINE; |
651 | } | 651 | } |
@@ -657,9 +657,9 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
657 | return ret; | 657 | return ret; |
658 | 658 | ||
659 | if (type == TRACE_GRAPH_ENT) | 659 | if (type == TRACE_GRAPH_ENT) |
660 | ret = trace_seq_printf(s, "==========>"); | 660 | ret = trace_seq_puts(s, "==========>"); |
661 | else | 661 | else |
662 | ret = trace_seq_printf(s, "<=========="); | 662 | ret = trace_seq_puts(s, "<=========="); |
663 | 663 | ||
664 | if (!ret) | 664 | if (!ret) |
665 | return TRACE_TYPE_PARTIAL_LINE; | 665 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -668,7 +668,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
668 | if (ret != TRACE_TYPE_HANDLED) | 668 | if (ret != TRACE_TYPE_HANDLED) |
669 | return ret; | 669 | return ret; |
670 | 670 | ||
671 | ret = trace_seq_printf(s, "\n"); | 671 | ret = trace_seq_putc(s, '\n'); |
672 | 672 | ||
673 | if (!ret) | 673 | if (!ret) |
674 | return TRACE_TYPE_PARTIAL_LINE; | 674 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -705,13 +705,13 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
705 | len += strlen(nsecs_str); | 705 | len += strlen(nsecs_str); |
706 | } | 706 | } |
707 | 707 | ||
708 | ret = trace_seq_printf(s, " us "); | 708 | ret = trace_seq_puts(s, " us "); |
709 | if (!ret) | 709 | if (!ret) |
710 | return TRACE_TYPE_PARTIAL_LINE; | 710 | return TRACE_TYPE_PARTIAL_LINE; |
711 | 711 | ||
712 | /* Print remaining spaces to fit the row's width */ | 712 | /* Print remaining spaces to fit the row's width */ |
713 | for (i = len; i < 7; i++) { | 713 | for (i = len; i < 7; i++) { |
714 | ret = trace_seq_printf(s, " "); | 714 | ret = trace_seq_putc(s, ' '); |
715 | if (!ret) | 715 | if (!ret) |
716 | return TRACE_TYPE_PARTIAL_LINE; | 716 | return TRACE_TYPE_PARTIAL_LINE; |
717 | } | 717 | } |
@@ -731,13 +731,13 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s, | |||
731 | /* No real adata, just filling the column with spaces */ | 731 | /* No real adata, just filling the column with spaces */ |
732 | switch (duration) { | 732 | switch (duration) { |
733 | case DURATION_FILL_FULL: | 733 | case DURATION_FILL_FULL: |
734 | ret = trace_seq_printf(s, " | "); | 734 | ret = trace_seq_puts(s, " | "); |
735 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 735 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
736 | case DURATION_FILL_START: | 736 | case DURATION_FILL_START: |
737 | ret = trace_seq_printf(s, " "); | 737 | ret = trace_seq_puts(s, " "); |
738 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 738 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
739 | case DURATION_FILL_END: | 739 | case DURATION_FILL_END: |
740 | ret = trace_seq_printf(s, " |"); | 740 | ret = trace_seq_puts(s, " |"); |
741 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 741 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
742 | } | 742 | } |
743 | 743 | ||
@@ -745,10 +745,10 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s, | |||
745 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { | 745 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { |
746 | /* Duration exceeded 100 msecs */ | 746 | /* Duration exceeded 100 msecs */ |
747 | if (duration > 100000ULL) | 747 | if (duration > 100000ULL) |
748 | ret = trace_seq_printf(s, "! "); | 748 | ret = trace_seq_puts(s, "! "); |
749 | /* Duration exceeded 10 msecs */ | 749 | /* Duration exceeded 10 msecs */ |
750 | else if (duration > 10000ULL) | 750 | else if (duration > 10000ULL) |
751 | ret = trace_seq_printf(s, "+ "); | 751 | ret = trace_seq_puts(s, "+ "); |
752 | } | 752 | } |
753 | 753 | ||
754 | /* | 754 | /* |
@@ -757,7 +757,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s, | |||
757 | * to fill out the space. | 757 | * to fill out the space. |
758 | */ | 758 | */ |
759 | if (ret == -1) | 759 | if (ret == -1) |
760 | ret = trace_seq_printf(s, " "); | 760 | ret = trace_seq_puts(s, " "); |
761 | 761 | ||
762 | /* Catching here any failure happenned above */ | 762 | /* Catching here any failure happenned above */ |
763 | if (!ret) | 763 | if (!ret) |
@@ -767,7 +767,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s, | |||
767 | if (ret != TRACE_TYPE_HANDLED) | 767 | if (ret != TRACE_TYPE_HANDLED) |
768 | return ret; | 768 | return ret; |
769 | 769 | ||
770 | ret = trace_seq_printf(s, "| "); | 770 | ret = trace_seq_puts(s, "| "); |
771 | if (!ret) | 771 | if (!ret) |
772 | return TRACE_TYPE_PARTIAL_LINE; | 772 | return TRACE_TYPE_PARTIAL_LINE; |
773 | 773 | ||
@@ -817,7 +817,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
817 | 817 | ||
818 | /* Function */ | 818 | /* Function */ |
819 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 819 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
820 | ret = trace_seq_printf(s, " "); | 820 | ret = trace_seq_putc(s, ' '); |
821 | if (!ret) | 821 | if (!ret) |
822 | return TRACE_TYPE_PARTIAL_LINE; | 822 | return TRACE_TYPE_PARTIAL_LINE; |
823 | } | 823 | } |
@@ -858,7 +858,7 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
858 | 858 | ||
859 | /* Function */ | 859 | /* Function */ |
860 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 860 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
861 | ret = trace_seq_printf(s, " "); | 861 | ret = trace_seq_putc(s, ' '); |
862 | if (!ret) | 862 | if (!ret) |
863 | return TRACE_TYPE_PARTIAL_LINE; | 863 | return TRACE_TYPE_PARTIAL_LINE; |
864 | } | 864 | } |
@@ -917,7 +917,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | |||
917 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 917 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
918 | return TRACE_TYPE_PARTIAL_LINE; | 918 | return TRACE_TYPE_PARTIAL_LINE; |
919 | 919 | ||
920 | ret = trace_seq_printf(s, " | "); | 920 | ret = trace_seq_puts(s, " | "); |
921 | if (!ret) | 921 | if (!ret) |
922 | return TRACE_TYPE_PARTIAL_LINE; | 922 | return TRACE_TYPE_PARTIAL_LINE; |
923 | } | 923 | } |
@@ -1117,7 +1117,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
1117 | 1117 | ||
1118 | /* Closing brace */ | 1118 | /* Closing brace */ |
1119 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 1119 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
1120 | ret = trace_seq_printf(s, " "); | 1120 | ret = trace_seq_putc(s, ' '); |
1121 | if (!ret) | 1121 | if (!ret) |
1122 | return TRACE_TYPE_PARTIAL_LINE; | 1122 | return TRACE_TYPE_PARTIAL_LINE; |
1123 | } | 1123 | } |
@@ -1129,7 +1129,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
1129 | * belongs to, write out the function name. | 1129 | * belongs to, write out the function name. |
1130 | */ | 1130 | */ |
1131 | if (func_match) { | 1131 | if (func_match) { |
1132 | ret = trace_seq_printf(s, "}\n"); | 1132 | ret = trace_seq_puts(s, "}\n"); |
1133 | if (!ret) | 1133 | if (!ret) |
1134 | return TRACE_TYPE_PARTIAL_LINE; | 1134 | return TRACE_TYPE_PARTIAL_LINE; |
1135 | } else { | 1135 | } else { |
@@ -1179,13 +1179,13 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
1179 | /* Indentation */ | 1179 | /* Indentation */ |
1180 | if (depth > 0) | 1180 | if (depth > 0) |
1181 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { | 1181 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { |
1182 | ret = trace_seq_printf(s, " "); | 1182 | ret = trace_seq_putc(s, ' '); |
1183 | if (!ret) | 1183 | if (!ret) |
1184 | return TRACE_TYPE_PARTIAL_LINE; | 1184 | return TRACE_TYPE_PARTIAL_LINE; |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | /* The comment */ | 1187 | /* The comment */ |
1188 | ret = trace_seq_printf(s, "/* "); | 1188 | ret = trace_seq_puts(s, "/* "); |
1189 | if (!ret) | 1189 | if (!ret) |
1190 | return TRACE_TYPE_PARTIAL_LINE; | 1190 | return TRACE_TYPE_PARTIAL_LINE; |
1191 | 1191 | ||
@@ -1216,7 +1216,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
1216 | s->len--; | 1216 | s->len--; |
1217 | } | 1217 | } |
1218 | 1218 | ||
1219 | ret = trace_seq_printf(s, " */\n"); | 1219 | ret = trace_seq_puts(s, " */\n"); |
1220 | if (!ret) | 1220 | if (!ret) |
1221 | return TRACE_TYPE_PARTIAL_LINE; | 1221 | return TRACE_TYPE_PARTIAL_LINE; |
1222 | 1222 | ||
@@ -1448,7 +1448,7 @@ static struct trace_event graph_trace_ret_event = { | |||
1448 | .funcs = &graph_functions | 1448 | .funcs = &graph_functions |
1449 | }; | 1449 | }; |
1450 | 1450 | ||
1451 | static struct tracer graph_trace __read_mostly = { | 1451 | static struct tracer graph_trace __tracer_data = { |
1452 | .name = "function_graph", | 1452 | .name = "function_graph", |
1453 | .open = graph_trace_open, | 1453 | .open = graph_trace_open, |
1454 | .pipe_open = graph_trace_open, | 1454 | .pipe_open = graph_trace_open, |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index b19d065a28cb..2aefbee93a6d 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -373,7 +373,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
373 | struct trace_array_cpu *data; | 373 | struct trace_array_cpu *data; |
374 | unsigned long flags; | 374 | unsigned long flags; |
375 | 375 | ||
376 | if (likely(!tracer_enabled)) | 376 | if (!tracer_enabled || !tracing_is_enabled()) |
377 | return; | 377 | return; |
378 | 378 | ||
379 | cpu = raw_smp_processor_id(); | 379 | cpu = raw_smp_processor_id(); |
@@ -416,7 +416,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
416 | else | 416 | else |
417 | return; | 417 | return; |
418 | 418 | ||
419 | if (!tracer_enabled) | 419 | if (!tracer_enabled || !tracing_is_enabled()) |
420 | return; | 420 | return; |
421 | 421 | ||
422 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); | 422 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 9f46e98ba8f2..243f6834d026 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -35,12 +35,17 @@ struct trace_probe { | |||
35 | const char *symbol; /* symbol name */ | 35 | const char *symbol; /* symbol name */ |
36 | struct ftrace_event_class class; | 36 | struct ftrace_event_class class; |
37 | struct ftrace_event_call call; | 37 | struct ftrace_event_call call; |
38 | struct ftrace_event_file * __rcu *files; | 38 | struct list_head files; |
39 | ssize_t size; /* trace entry size */ | 39 | ssize_t size; /* trace entry size */ |
40 | unsigned int nr_args; | 40 | unsigned int nr_args; |
41 | struct probe_arg args[]; | 41 | struct probe_arg args[]; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | struct event_file_link { | ||
45 | struct ftrace_event_file *file; | ||
46 | struct list_head list; | ||
47 | }; | ||
48 | |||
44 | #define SIZEOF_TRACE_PROBE(n) \ | 49 | #define SIZEOF_TRACE_PROBE(n) \ |
45 | (offsetof(struct trace_probe, args) + \ | 50 | (offsetof(struct trace_probe, args) + \ |
46 | (sizeof(struct probe_arg) * (n))) | 51 | (sizeof(struct probe_arg) * (n))) |
@@ -90,7 +95,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp) | |||
90 | } | 95 | } |
91 | 96 | ||
92 | static int register_probe_event(struct trace_probe *tp); | 97 | static int register_probe_event(struct trace_probe *tp); |
93 | static void unregister_probe_event(struct trace_probe *tp); | 98 | static int unregister_probe_event(struct trace_probe *tp); |
94 | 99 | ||
95 | static DEFINE_MUTEX(probe_lock); | 100 | static DEFINE_MUTEX(probe_lock); |
96 | static LIST_HEAD(probe_list); | 101 | static LIST_HEAD(probe_list); |
@@ -150,6 +155,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
150 | goto error; | 155 | goto error; |
151 | 156 | ||
152 | INIT_LIST_HEAD(&tp->list); | 157 | INIT_LIST_HEAD(&tp->list); |
158 | INIT_LIST_HEAD(&tp->files); | ||
153 | return tp; | 159 | return tp; |
154 | error: | 160 | error: |
155 | kfree(tp->call.name); | 161 | kfree(tp->call.name); |
@@ -183,25 +189,6 @@ static struct trace_probe *find_trace_probe(const char *event, | |||
183 | return NULL; | 189 | return NULL; |
184 | } | 190 | } |
185 | 191 | ||
186 | static int trace_probe_nr_files(struct trace_probe *tp) | ||
187 | { | ||
188 | struct ftrace_event_file **file; | ||
189 | int ret = 0; | ||
190 | |||
191 | /* | ||
192 | * Since all tp->files updater is protected by probe_enable_lock, | ||
193 | * we don't need to lock an rcu_read_lock. | ||
194 | */ | ||
195 | file = rcu_dereference_raw(tp->files); | ||
196 | if (file) | ||
197 | while (*(file++)) | ||
198 | ret++; | ||
199 | |||
200 | return ret; | ||
201 | } | ||
202 | |||
203 | static DEFINE_MUTEX(probe_enable_lock); | ||
204 | |||
205 | /* | 192 | /* |
206 | * Enable trace_probe | 193 | * Enable trace_probe |
207 | * if the file is NULL, enable "perf" handler, or enable "trace" handler. | 194 | * if the file is NULL, enable "perf" handler, or enable "trace" handler. |
@@ -211,67 +198,42 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
211 | { | 198 | { |
212 | int ret = 0; | 199 | int ret = 0; |
213 | 200 | ||
214 | mutex_lock(&probe_enable_lock); | ||
215 | |||
216 | if (file) { | 201 | if (file) { |
217 | struct ftrace_event_file **new, **old; | 202 | struct event_file_link *link; |
218 | int n = trace_probe_nr_files(tp); | 203 | |
219 | 204 | link = kmalloc(sizeof(*link), GFP_KERNEL); | |
220 | old = rcu_dereference_raw(tp->files); | 205 | if (!link) { |
221 | /* 1 is for new one and 1 is for stopper */ | ||
222 | new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *), | ||
223 | GFP_KERNEL); | ||
224 | if (!new) { | ||
225 | ret = -ENOMEM; | 206 | ret = -ENOMEM; |
226 | goto out_unlock; | 207 | goto out; |
227 | } | 208 | } |
228 | memcpy(new, old, n * sizeof(struct ftrace_event_file *)); | ||
229 | new[n] = file; | ||
230 | /* The last one keeps a NULL */ | ||
231 | 209 | ||
232 | rcu_assign_pointer(tp->files, new); | 210 | link->file = file; |
233 | tp->flags |= TP_FLAG_TRACE; | 211 | list_add_tail_rcu(&link->list, &tp->files); |
234 | 212 | ||
235 | if (old) { | 213 | tp->flags |= TP_FLAG_TRACE; |
236 | /* Make sure the probe is done with old files */ | ||
237 | synchronize_sched(); | ||
238 | kfree(old); | ||
239 | } | ||
240 | } else | 214 | } else |
241 | tp->flags |= TP_FLAG_PROFILE; | 215 | tp->flags |= TP_FLAG_PROFILE; |
242 | 216 | ||
243 | if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) && | 217 | if (trace_probe_is_registered(tp) && !trace_probe_has_gone(tp)) { |
244 | !trace_probe_has_gone(tp)) { | ||
245 | if (trace_probe_is_return(tp)) | 218 | if (trace_probe_is_return(tp)) |
246 | ret = enable_kretprobe(&tp->rp); | 219 | ret = enable_kretprobe(&tp->rp); |
247 | else | 220 | else |
248 | ret = enable_kprobe(&tp->rp.kp); | 221 | ret = enable_kprobe(&tp->rp.kp); |
249 | } | 222 | } |
250 | 223 | out: | |
251 | out_unlock: | ||
252 | mutex_unlock(&probe_enable_lock); | ||
253 | |||
254 | return ret; | 224 | return ret; |
255 | } | 225 | } |
256 | 226 | ||
257 | static int | 227 | static struct event_file_link * |
258 | trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) | 228 | find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) |
259 | { | 229 | { |
260 | struct ftrace_event_file **files; | 230 | struct event_file_link *link; |
261 | int i; | ||
262 | 231 | ||
263 | /* | 232 | list_for_each_entry(link, &tp->files, list) |
264 | * Since all tp->files updater is protected by probe_enable_lock, | 233 | if (link->file == file) |
265 | * we don't need to lock an rcu_read_lock. | 234 | return link; |
266 | */ | ||
267 | files = rcu_dereference_raw(tp->files); | ||
268 | if (files) { | ||
269 | for (i = 0; files[i]; i++) | ||
270 | if (files[i] == file) | ||
271 | return i; | ||
272 | } | ||
273 | 235 | ||
274 | return -1; | 236 | return NULL; |
275 | } | 237 | } |
276 | 238 | ||
277 | /* | 239 | /* |
@@ -281,43 +243,23 @@ trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) | |||
281 | static int | 243 | static int |
282 | disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | 244 | disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) |
283 | { | 245 | { |
246 | struct event_file_link *link = NULL; | ||
247 | int wait = 0; | ||
284 | int ret = 0; | 248 | int ret = 0; |
285 | 249 | ||
286 | mutex_lock(&probe_enable_lock); | ||
287 | |||
288 | if (file) { | 250 | if (file) { |
289 | struct ftrace_event_file **new, **old; | 251 | link = find_event_file_link(tp, file); |
290 | int n = trace_probe_nr_files(tp); | 252 | if (!link) { |
291 | int i, j; | ||
292 | |||
293 | old = rcu_dereference_raw(tp->files); | ||
294 | if (n == 0 || trace_probe_file_index(tp, file) < 0) { | ||
295 | ret = -EINVAL; | 253 | ret = -EINVAL; |
296 | goto out_unlock; | 254 | goto out; |
297 | } | 255 | } |
298 | 256 | ||
299 | if (n == 1) { /* Remove the last file */ | 257 | list_del_rcu(&link->list); |
300 | tp->flags &= ~TP_FLAG_TRACE; | 258 | wait = 1; |
301 | new = NULL; | 259 | if (!list_empty(&tp->files)) |
302 | } else { | 260 | goto out; |
303 | new = kzalloc(n * sizeof(struct ftrace_event_file *), | ||
304 | GFP_KERNEL); | ||
305 | if (!new) { | ||
306 | ret = -ENOMEM; | ||
307 | goto out_unlock; | ||
308 | } | ||
309 | |||
310 | /* This copy & check loop copies the NULL stopper too */ | ||
311 | for (i = 0, j = 0; j < n && i < n + 1; i++) | ||
312 | if (old[i] != file) | ||
313 | new[j++] = old[i]; | ||
314 | } | ||
315 | |||
316 | rcu_assign_pointer(tp->files, new); | ||
317 | 261 | ||
318 | /* Make sure the probe is done with old files */ | 262 | tp->flags &= ~TP_FLAG_TRACE; |
319 | synchronize_sched(); | ||
320 | kfree(old); | ||
321 | } else | 263 | } else |
322 | tp->flags &= ~TP_FLAG_PROFILE; | 264 | tp->flags &= ~TP_FLAG_PROFILE; |
323 | 265 | ||
@@ -326,10 +268,21 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
326 | disable_kretprobe(&tp->rp); | 268 | disable_kretprobe(&tp->rp); |
327 | else | 269 | else |
328 | disable_kprobe(&tp->rp.kp); | 270 | disable_kprobe(&tp->rp.kp); |
271 | wait = 1; | ||
272 | } | ||
273 | out: | ||
274 | if (wait) { | ||
275 | /* | ||
276 | * Synchronize with kprobe_trace_func/kretprobe_trace_func | ||
277 | * to ensure disabled (all running handlers are finished). | ||
278 | * This is not only for kfree(), but also the caller, | ||
279 | * trace_remove_event_call() supposes it for releasing | ||
280 | * event_call related objects, which will be accessed in | ||
281 | * the kprobe_trace_func/kretprobe_trace_func. | ||
282 | */ | ||
283 | synchronize_sched(); | ||
284 | kfree(link); /* Ignored if link == NULL */ | ||
329 | } | 285 | } |
330 | |||
331 | out_unlock: | ||
332 | mutex_unlock(&probe_enable_lock); | ||
333 | 286 | ||
334 | return ret; | 287 | return ret; |
335 | } | 288 | } |
@@ -398,9 +351,12 @@ static int unregister_trace_probe(struct trace_probe *tp) | |||
398 | if (trace_probe_is_enabled(tp)) | 351 | if (trace_probe_is_enabled(tp)) |
399 | return -EBUSY; | 352 | return -EBUSY; |
400 | 353 | ||
354 | /* Will fail if probe is being used by ftrace or perf */ | ||
355 | if (unregister_probe_event(tp)) | ||
356 | return -EBUSY; | ||
357 | |||
401 | __unregister_trace_probe(tp); | 358 | __unregister_trace_probe(tp); |
402 | list_del(&tp->list); | 359 | list_del(&tp->list); |
403 | unregister_probe_event(tp); | ||
404 | 360 | ||
405 | return 0; | 361 | return 0; |
406 | } | 362 | } |
@@ -679,7 +635,9 @@ static int release_all_trace_probes(void) | |||
679 | /* TODO: Use batch unregistration */ | 635 | /* TODO: Use batch unregistration */ |
680 | while (!list_empty(&probe_list)) { | 636 | while (!list_empty(&probe_list)) { |
681 | tp = list_entry(probe_list.next, struct trace_probe, list); | 637 | tp = list_entry(probe_list.next, struct trace_probe, list); |
682 | unregister_trace_probe(tp); | 638 | ret = unregister_trace_probe(tp); |
639 | if (ret) | ||
640 | goto end; | ||
683 | free_trace_probe(tp); | 641 | free_trace_probe(tp); |
684 | } | 642 | } |
685 | 643 | ||
@@ -885,20 +843,10 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, | |||
885 | static __kprobes void | 843 | static __kprobes void |
886 | kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) | 844 | kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) |
887 | { | 845 | { |
888 | /* | 846 | struct event_file_link *link; |
889 | * Note: preempt is already disabled around the kprobe handler. | ||
890 | * However, we still need an smp_read_barrier_depends() corresponding | ||
891 | * to smp_wmb() in rcu_assign_pointer() to access the pointer. | ||
892 | */ | ||
893 | struct ftrace_event_file **file = rcu_dereference_raw(tp->files); | ||
894 | |||
895 | if (unlikely(!file)) | ||
896 | return; | ||
897 | 847 | ||
898 | while (*file) { | 848 | list_for_each_entry_rcu(link, &tp->files, list) |
899 | __kprobe_trace_func(tp, regs, *file); | 849 | __kprobe_trace_func(tp, regs, link->file); |
900 | file++; | ||
901 | } | ||
902 | } | 850 | } |
903 | 851 | ||
904 | /* Kretprobe handler */ | 852 | /* Kretprobe handler */ |
@@ -945,20 +893,10 @@ static __kprobes void | |||
945 | kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | 893 | kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, |
946 | struct pt_regs *regs) | 894 | struct pt_regs *regs) |
947 | { | 895 | { |
948 | /* | 896 | struct event_file_link *link; |
949 | * Note: preempt is already disabled around the kprobe handler. | ||
950 | * However, we still need an smp_read_barrier_depends() corresponding | ||
951 | * to smp_wmb() in rcu_assign_pointer() to access the pointer. | ||
952 | */ | ||
953 | struct ftrace_event_file **file = rcu_dereference_raw(tp->files); | ||
954 | |||
955 | if (unlikely(!file)) | ||
956 | return; | ||
957 | 897 | ||
958 | while (*file) { | 898 | list_for_each_entry_rcu(link, &tp->files, list) |
959 | __kretprobe_trace_func(tp, ri, regs, *file); | 899 | __kretprobe_trace_func(tp, ri, regs, link->file); |
960 | file++; | ||
961 | } | ||
962 | } | 900 | } |
963 | 901 | ||
964 | /* Event entry printers */ | 902 | /* Event entry printers */ |
@@ -1157,13 +1095,14 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs) | |||
1157 | int size, __size, dsize; | 1095 | int size, __size, dsize; |
1158 | int rctx; | 1096 | int rctx; |
1159 | 1097 | ||
1098 | head = this_cpu_ptr(call->perf_events); | ||
1099 | if (hlist_empty(head)) | ||
1100 | return; | ||
1101 | |||
1160 | dsize = __get_data_size(tp, regs); | 1102 | dsize = __get_data_size(tp, regs); |
1161 | __size = sizeof(*entry) + tp->size + dsize; | 1103 | __size = sizeof(*entry) + tp->size + dsize; |
1162 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1104 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1163 | size -= sizeof(u32); | 1105 | size -= sizeof(u32); |
1164 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | ||
1165 | "profile buffer not large enough")) | ||
1166 | return; | ||
1167 | 1106 | ||
1168 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); | 1107 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); |
1169 | if (!entry) | 1108 | if (!entry) |
@@ -1172,10 +1111,7 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs) | |||
1172 | entry->ip = (unsigned long)tp->rp.kp.addr; | 1111 | entry->ip = (unsigned long)tp->rp.kp.addr; |
1173 | memset(&entry[1], 0, dsize); | 1112 | memset(&entry[1], 0, dsize); |
1174 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 1113 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
1175 | 1114 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); | |
1176 | head = this_cpu_ptr(call->perf_events); | ||
1177 | perf_trace_buf_submit(entry, size, rctx, | ||
1178 | entry->ip, 1, regs, head, NULL); | ||
1179 | } | 1115 | } |
1180 | 1116 | ||
1181 | /* Kretprobe profile handler */ | 1117 | /* Kretprobe profile handler */ |
@@ -1189,13 +1125,14 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
1189 | int size, __size, dsize; | 1125 | int size, __size, dsize; |
1190 | int rctx; | 1126 | int rctx; |
1191 | 1127 | ||
1128 | head = this_cpu_ptr(call->perf_events); | ||
1129 | if (hlist_empty(head)) | ||
1130 | return; | ||
1131 | |||
1192 | dsize = __get_data_size(tp, regs); | 1132 | dsize = __get_data_size(tp, regs); |
1193 | __size = sizeof(*entry) + tp->size + dsize; | 1133 | __size = sizeof(*entry) + tp->size + dsize; |
1194 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1134 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1195 | size -= sizeof(u32); | 1135 | size -= sizeof(u32); |
1196 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | ||
1197 | "profile buffer not large enough")) | ||
1198 | return; | ||
1199 | 1136 | ||
1200 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); | 1137 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); |
1201 | if (!entry) | 1138 | if (!entry) |
@@ -1204,13 +1141,16 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
1204 | entry->func = (unsigned long)tp->rp.kp.addr; | 1141 | entry->func = (unsigned long)tp->rp.kp.addr; |
1205 | entry->ret_ip = (unsigned long)ri->ret_addr; | 1142 | entry->ret_ip = (unsigned long)ri->ret_addr; |
1206 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 1143 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
1207 | 1144 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); | |
1208 | head = this_cpu_ptr(call->perf_events); | ||
1209 | perf_trace_buf_submit(entry, size, rctx, | ||
1210 | entry->ret_ip, 1, regs, head, NULL); | ||
1211 | } | 1145 | } |
1212 | #endif /* CONFIG_PERF_EVENTS */ | 1146 | #endif /* CONFIG_PERF_EVENTS */ |
1213 | 1147 | ||
1148 | /* | ||
1149 | * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex. | ||
1150 | * | ||
1151 | * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe | ||
1152 | * lockless, but we can't race with this __init function. | ||
1153 | */ | ||
1214 | static __kprobes | 1154 | static __kprobes |
1215 | int kprobe_register(struct ftrace_event_call *event, | 1155 | int kprobe_register(struct ftrace_event_call *event, |
1216 | enum trace_reg type, void *data) | 1156 | enum trace_reg type, void *data) |
@@ -1312,11 +1252,15 @@ static int register_probe_event(struct trace_probe *tp) | |||
1312 | return ret; | 1252 | return ret; |
1313 | } | 1253 | } |
1314 | 1254 | ||
1315 | static void unregister_probe_event(struct trace_probe *tp) | 1255 | static int unregister_probe_event(struct trace_probe *tp) |
1316 | { | 1256 | { |
1257 | int ret; | ||
1258 | |||
1317 | /* tp->event is unregistered in trace_remove_event_call() */ | 1259 | /* tp->event is unregistered in trace_remove_event_call() */ |
1318 | trace_remove_event_call(&tp->call); | 1260 | ret = trace_remove_event_call(&tp->call); |
1319 | kfree(tp->call.print_fmt); | 1261 | if (!ret) |
1262 | kfree(tp->call.print_fmt); | ||
1263 | return ret; | ||
1320 | } | 1264 | } |
1321 | 1265 | ||
1322 | /* Make a debugfs interface for controlling probe points */ | 1266 | /* Make a debugfs interface for controlling probe points */ |
@@ -1376,6 +1320,10 @@ find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr) | |||
1376 | return NULL; | 1320 | return NULL; |
1377 | } | 1321 | } |
1378 | 1322 | ||
1323 | /* | ||
1324 | * Nobody but us can call enable_trace_probe/disable_trace_probe at this | ||
1325 | * stage, we can do this lockless. | ||
1326 | */ | ||
1379 | static __init int kprobe_trace_self_tests_init(void) | 1327 | static __init int kprobe_trace_self_tests_init(void) |
1380 | { | 1328 | { |
1381 | int ret, warn = 0; | 1329 | int ret, warn = 0; |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index a5e8f4878bfa..b3dcfb2f0fef 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -90,7 +90,7 @@ static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) | |||
90 | if (drv) | 90 | if (drv) |
91 | ret += trace_seq_printf(s, " %s\n", drv->name); | 91 | ret += trace_seq_printf(s, " %s\n", drv->name); |
92 | else | 92 | else |
93 | ret += trace_seq_printf(s, " \n"); | 93 | ret += trace_seq_puts(s, " \n"); |
94 | return ret; | 94 | return ret; |
95 | } | 95 | } |
96 | 96 | ||
@@ -107,7 +107,7 @@ static void mmio_pipe_open(struct trace_iterator *iter) | |||
107 | struct header_iter *hiter; | 107 | struct header_iter *hiter; |
108 | struct trace_seq *s = &iter->seq; | 108 | struct trace_seq *s = &iter->seq; |
109 | 109 | ||
110 | trace_seq_printf(s, "VERSION 20070824\n"); | 110 | trace_seq_puts(s, "VERSION 20070824\n"); |
111 | 111 | ||
112 | hiter = kzalloc(sizeof(*hiter), GFP_KERNEL); | 112 | hiter = kzalloc(sizeof(*hiter), GFP_KERNEL); |
113 | if (!hiter) | 113 | if (!hiter) |
@@ -209,7 +209,7 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) | |||
209 | (rw->value >> 0) & 0xff, rw->pc, 0); | 209 | (rw->value >> 0) & 0xff, rw->pc, 0); |
210 | break; | 210 | break; |
211 | default: | 211 | default: |
212 | ret = trace_seq_printf(s, "rw what?\n"); | 212 | ret = trace_seq_puts(s, "rw what?\n"); |
213 | break; | 213 | break; |
214 | } | 214 | } |
215 | if (ret) | 215 | if (ret) |
@@ -245,7 +245,7 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter) | |||
245 | secs, usec_rem, m->map_id, 0UL, 0); | 245 | secs, usec_rem, m->map_id, 0UL, 0); |
246 | break; | 246 | break; |
247 | default: | 247 | default: |
248 | ret = trace_seq_printf(s, "map what?\n"); | 248 | ret = trace_seq_puts(s, "map what?\n"); |
249 | break; | 249 | break; |
250 | } | 250 | } |
251 | if (ret) | 251 | if (ret) |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index bb922d9ee51b..34e7cbac0c9c 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -78,7 +78,7 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) | |||
78 | 78 | ||
79 | trace_assign_type(field, entry); | 79 | trace_assign_type(field, entry); |
80 | 80 | ||
81 | ret = trace_seq_printf(s, "%s", field->buf); | 81 | ret = trace_seq_puts(s, field->buf); |
82 | if (!ret) | 82 | if (!ret) |
83 | return TRACE_TYPE_PARTIAL_LINE; | 83 | return TRACE_TYPE_PARTIAL_LINE; |
84 | 84 | ||
@@ -558,14 +558,14 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | |||
558 | if (ret) | 558 | if (ret) |
559 | ret = trace_seq_puts(s, "??"); | 559 | ret = trace_seq_puts(s, "??"); |
560 | if (ret) | 560 | if (ret) |
561 | ret = trace_seq_puts(s, "\n"); | 561 | ret = trace_seq_putc(s, '\n'); |
562 | continue; | 562 | continue; |
563 | } | 563 | } |
564 | if (!ret) | 564 | if (!ret) |
565 | break; | 565 | break; |
566 | if (ret) | 566 | if (ret) |
567 | ret = seq_print_user_ip(s, mm, ip, sym_flags); | 567 | ret = seq_print_user_ip(s, mm, ip, sym_flags); |
568 | ret = trace_seq_puts(s, "\n"); | 568 | ret = trace_seq_putc(s, '\n'); |
569 | } | 569 | } |
570 | 570 | ||
571 | if (mm) | 571 | if (mm) |
@@ -579,7 +579,7 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | |||
579 | int ret; | 579 | int ret; |
580 | 580 | ||
581 | if (!ip) | 581 | if (!ip) |
582 | return trace_seq_printf(s, "0"); | 582 | return trace_seq_putc(s, '0'); |
583 | 583 | ||
584 | if (sym_flags & TRACE_ITER_SYM_OFFSET) | 584 | if (sym_flags & TRACE_ITER_SYM_OFFSET) |
585 | ret = seq_print_sym_offset(s, "%s", ip); | 585 | ret = seq_print_sym_offset(s, "%s", ip); |
@@ -964,14 +964,14 @@ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags, | |||
964 | goto partial; | 964 | goto partial; |
965 | 965 | ||
966 | if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { | 966 | if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { |
967 | if (!trace_seq_printf(s, " <-")) | 967 | if (!trace_seq_puts(s, " <-")) |
968 | goto partial; | 968 | goto partial; |
969 | if (!seq_print_ip_sym(s, | 969 | if (!seq_print_ip_sym(s, |
970 | field->parent_ip, | 970 | field->parent_ip, |
971 | flags)) | 971 | flags)) |
972 | goto partial; | 972 | goto partial; |
973 | } | 973 | } |
974 | if (!trace_seq_printf(s, "\n")) | 974 | if (!trace_seq_putc(s, '\n')) |
975 | goto partial; | 975 | goto partial; |
976 | 976 | ||
977 | return TRACE_TYPE_HANDLED; | 977 | return TRACE_TYPE_HANDLED; |
@@ -1210,7 +1210,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter, | |||
1210 | 1210 | ||
1211 | if (!seq_print_ip_sym(s, *p, flags)) | 1211 | if (!seq_print_ip_sym(s, *p, flags)) |
1212 | goto partial; | 1212 | goto partial; |
1213 | if (!trace_seq_puts(s, "\n")) | 1213 | if (!trace_seq_putc(s, '\n')) |
1214 | goto partial; | 1214 | goto partial; |
1215 | } | 1215 | } |
1216 | 1216 | ||
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 2901e3b88590..a7329b7902f8 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -640,13 +640,20 @@ out: | |||
640 | * Enable ftrace, sleep 1/10 second, and then read the trace | 640 | * Enable ftrace, sleep 1/10 second, and then read the trace |
641 | * buffer to see if all is in order. | 641 | * buffer to see if all is in order. |
642 | */ | 642 | */ |
643 | int | 643 | __init int |
644 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | 644 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) |
645 | { | 645 | { |
646 | int save_ftrace_enabled = ftrace_enabled; | 646 | int save_ftrace_enabled = ftrace_enabled; |
647 | unsigned long count; | 647 | unsigned long count; |
648 | int ret; | 648 | int ret; |
649 | 649 | ||
650 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
651 | if (ftrace_filter_param) { | ||
652 | printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); | ||
653 | return 0; | ||
654 | } | ||
655 | #endif | ||
656 | |||
650 | /* make sure msleep has been recorded */ | 657 | /* make sure msleep has been recorded */ |
651 | msleep(1); | 658 | msleep(1); |
652 | 659 | ||
@@ -727,13 +734,20 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | |||
727 | * Pretty much the same than for the function tracer from which the selftest | 734 | * Pretty much the same than for the function tracer from which the selftest |
728 | * has been borrowed. | 735 | * has been borrowed. |
729 | */ | 736 | */ |
730 | int | 737 | __init int |
731 | trace_selftest_startup_function_graph(struct tracer *trace, | 738 | trace_selftest_startup_function_graph(struct tracer *trace, |
732 | struct trace_array *tr) | 739 | struct trace_array *tr) |
733 | { | 740 | { |
734 | int ret; | 741 | int ret; |
735 | unsigned long count; | 742 | unsigned long count; |
736 | 743 | ||
744 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
745 | if (ftrace_filter_param) { | ||
746 | printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); | ||
747 | return 0; | ||
748 | } | ||
749 | #endif | ||
750 | |||
737 | /* | 751 | /* |
738 | * Simulate the init() callback but we attach a watchdog callback | 752 | * Simulate the init() callback but we attach a watchdog callback |
739 | * to detect and recover from possible hangs | 753 | * to detect and recover from possible hangs |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 8f2ac73c7a5f..8fd03657bc7d 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -175,7 +175,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags, | |||
175 | entry = syscall_nr_to_meta(syscall); | 175 | entry = syscall_nr_to_meta(syscall); |
176 | 176 | ||
177 | if (!entry) { | 177 | if (!entry) { |
178 | trace_seq_printf(s, "\n"); | 178 | trace_seq_putc(s, '\n'); |
179 | return TRACE_TYPE_HANDLED; | 179 | return TRACE_TYPE_HANDLED; |
180 | } | 180 | } |
181 | 181 | ||
@@ -306,6 +306,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
306 | struct syscall_metadata *sys_data; | 306 | struct syscall_metadata *sys_data; |
307 | struct ring_buffer_event *event; | 307 | struct ring_buffer_event *event; |
308 | struct ring_buffer *buffer; | 308 | struct ring_buffer *buffer; |
309 | unsigned long irq_flags; | ||
310 | int pc; | ||
309 | int syscall_nr; | 311 | int syscall_nr; |
310 | int size; | 312 | int size; |
311 | 313 | ||
@@ -321,9 +323,12 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
321 | 323 | ||
322 | size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; | 324 | size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; |
323 | 325 | ||
326 | local_save_flags(irq_flags); | ||
327 | pc = preempt_count(); | ||
328 | |||
324 | buffer = tr->trace_buffer.buffer; | 329 | buffer = tr->trace_buffer.buffer; |
325 | event = trace_buffer_lock_reserve(buffer, | 330 | event = trace_buffer_lock_reserve(buffer, |
326 | sys_data->enter_event->event.type, size, 0, 0); | 331 | sys_data->enter_event->event.type, size, irq_flags, pc); |
327 | if (!event) | 332 | if (!event) |
328 | return; | 333 | return; |
329 | 334 | ||
@@ -333,7 +338,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
333 | 338 | ||
334 | if (!filter_current_check_discard(buffer, sys_data->enter_event, | 339 | if (!filter_current_check_discard(buffer, sys_data->enter_event, |
335 | entry, event)) | 340 | entry, event)) |
336 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); | 341 | trace_current_buffer_unlock_commit(buffer, event, |
342 | irq_flags, pc); | ||
337 | } | 343 | } |
338 | 344 | ||
339 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | 345 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) |
@@ -343,6 +349,8 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
343 | struct syscall_metadata *sys_data; | 349 | struct syscall_metadata *sys_data; |
344 | struct ring_buffer_event *event; | 350 | struct ring_buffer_event *event; |
345 | struct ring_buffer *buffer; | 351 | struct ring_buffer *buffer; |
352 | unsigned long irq_flags; | ||
353 | int pc; | ||
346 | int syscall_nr; | 354 | int syscall_nr; |
347 | 355 | ||
348 | syscall_nr = trace_get_syscall_nr(current, regs); | 356 | syscall_nr = trace_get_syscall_nr(current, regs); |
@@ -355,9 +363,13 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
355 | if (!sys_data) | 363 | if (!sys_data) |
356 | return; | 364 | return; |
357 | 365 | ||
366 | local_save_flags(irq_flags); | ||
367 | pc = preempt_count(); | ||
368 | |||
358 | buffer = tr->trace_buffer.buffer; | 369 | buffer = tr->trace_buffer.buffer; |
359 | event = trace_buffer_lock_reserve(buffer, | 370 | event = trace_buffer_lock_reserve(buffer, |
360 | sys_data->exit_event->event.type, sizeof(*entry), 0, 0); | 371 | sys_data->exit_event->event.type, sizeof(*entry), |
372 | irq_flags, pc); | ||
361 | if (!event) | 373 | if (!event) |
362 | return; | 374 | return; |
363 | 375 | ||
@@ -367,7 +379,8 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
367 | 379 | ||
368 | if (!filter_current_check_discard(buffer, sys_data->exit_event, | 380 | if (!filter_current_check_discard(buffer, sys_data->exit_event, |
369 | entry, event)) | 381 | entry, event)) |
370 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); | 382 | trace_current_buffer_unlock_commit(buffer, event, |
383 | irq_flags, pc); | ||
371 | } | 384 | } |
372 | 385 | ||
373 | static int reg_event_syscall_enter(struct ftrace_event_file *file, | 386 | static int reg_event_syscall_enter(struct ftrace_event_file *file, |
@@ -553,15 +566,15 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
553 | if (!sys_data) | 566 | if (!sys_data) |
554 | return; | 567 | return; |
555 | 568 | ||
569 | head = this_cpu_ptr(sys_data->enter_event->perf_events); | ||
570 | if (hlist_empty(head)) | ||
571 | return; | ||
572 | |||
556 | /* get the size after alignment with the u32 buffer size field */ | 573 | /* get the size after alignment with the u32 buffer size field */ |
557 | size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); | 574 | size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); |
558 | size = ALIGN(size + sizeof(u32), sizeof(u64)); | 575 | size = ALIGN(size + sizeof(u32), sizeof(u64)); |
559 | size -= sizeof(u32); | 576 | size -= sizeof(u32); |
560 | 577 | ||
561 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | ||
562 | "perf buffer not large enough")) | ||
563 | return; | ||
564 | |||
565 | rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, | 578 | rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, |
566 | sys_data->enter_event->event.type, regs, &rctx); | 579 | sys_data->enter_event->event.type, regs, &rctx); |
567 | if (!rec) | 580 | if (!rec) |
@@ -570,8 +583,6 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
570 | rec->nr = syscall_nr; | 583 | rec->nr = syscall_nr; |
571 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | 584 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, |
572 | (unsigned long *)&rec->args); | 585 | (unsigned long *)&rec->args); |
573 | |||
574 | head = this_cpu_ptr(sys_data->enter_event->perf_events); | ||
575 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); | 586 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); |
576 | } | 587 | } |
577 | 588 | ||
@@ -629,18 +640,14 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
629 | if (!sys_data) | 640 | if (!sys_data) |
630 | return; | 641 | return; |
631 | 642 | ||
643 | head = this_cpu_ptr(sys_data->exit_event->perf_events); | ||
644 | if (hlist_empty(head)) | ||
645 | return; | ||
646 | |||
632 | /* We can probably do that at build time */ | 647 | /* We can probably do that at build time */ |
633 | size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); | 648 | size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); |
634 | size -= sizeof(u32); | 649 | size -= sizeof(u32); |
635 | 650 | ||
636 | /* | ||
637 | * Impossible, but be paranoid with the future | ||
638 | * How to put this check outside runtime? | ||
639 | */ | ||
640 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | ||
641 | "exit event has grown above perf buffer size")) | ||
642 | return; | ||
643 | |||
644 | rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, | 651 | rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, |
645 | sys_data->exit_event->event.type, regs, &rctx); | 652 | sys_data->exit_event->event.type, regs, &rctx); |
646 | if (!rec) | 653 | if (!rec) |
@@ -648,8 +655,6 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
648 | 655 | ||
649 | rec->nr = syscall_nr; | 656 | rec->nr = syscall_nr; |
650 | rec->ret = syscall_get_return_value(current, regs); | 657 | rec->ret = syscall_get_return_value(current, regs); |
651 | |||
652 | head = this_cpu_ptr(sys_data->exit_event->perf_events); | ||
653 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); | 658 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); |
654 | } | 659 | } |
655 | 660 | ||
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 32494fb0ee64..272261b5f94f 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -70,7 +70,7 @@ struct trace_uprobe { | |||
70 | (sizeof(struct probe_arg) * (n))) | 70 | (sizeof(struct probe_arg) * (n))) |
71 | 71 | ||
72 | static int register_uprobe_event(struct trace_uprobe *tu); | 72 | static int register_uprobe_event(struct trace_uprobe *tu); |
73 | static void unregister_uprobe_event(struct trace_uprobe *tu); | 73 | static int unregister_uprobe_event(struct trace_uprobe *tu); |
74 | 74 | ||
75 | static DEFINE_MUTEX(uprobe_lock); | 75 | static DEFINE_MUTEX(uprobe_lock); |
76 | static LIST_HEAD(uprobe_list); | 76 | static LIST_HEAD(uprobe_list); |
@@ -164,11 +164,17 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou | |||
164 | } | 164 | } |
165 | 165 | ||
166 | /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ | 166 | /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ |
167 | static void unregister_trace_uprobe(struct trace_uprobe *tu) | 167 | static int unregister_trace_uprobe(struct trace_uprobe *tu) |
168 | { | 168 | { |
169 | int ret; | ||
170 | |||
171 | ret = unregister_uprobe_event(tu); | ||
172 | if (ret) | ||
173 | return ret; | ||
174 | |||
169 | list_del(&tu->list); | 175 | list_del(&tu->list); |
170 | unregister_uprobe_event(tu); | ||
171 | free_trace_uprobe(tu); | 176 | free_trace_uprobe(tu); |
177 | return 0; | ||
172 | } | 178 | } |
173 | 179 | ||
174 | /* Register a trace_uprobe and probe_event */ | 180 | /* Register a trace_uprobe and probe_event */ |
@@ -181,9 +187,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu) | |||
181 | 187 | ||
182 | /* register as an event */ | 188 | /* register as an event */ |
183 | old_tp = find_probe_event(tu->call.name, tu->call.class->system); | 189 | old_tp = find_probe_event(tu->call.name, tu->call.class->system); |
184 | if (old_tp) | 190 | if (old_tp) { |
185 | /* delete old event */ | 191 | /* delete old event */ |
186 | unregister_trace_uprobe(old_tp); | 192 | ret = unregister_trace_uprobe(old_tp); |
193 | if (ret) | ||
194 | goto end; | ||
195 | } | ||
187 | 196 | ||
188 | ret = register_uprobe_event(tu); | 197 | ret = register_uprobe_event(tu); |
189 | if (ret) { | 198 | if (ret) { |
@@ -256,6 +265,8 @@ static int create_trace_uprobe(int argc, char **argv) | |||
256 | group = UPROBE_EVENT_SYSTEM; | 265 | group = UPROBE_EVENT_SYSTEM; |
257 | 266 | ||
258 | if (is_delete) { | 267 | if (is_delete) { |
268 | int ret; | ||
269 | |||
259 | if (!event) { | 270 | if (!event) { |
260 | pr_info("Delete command needs an event name.\n"); | 271 | pr_info("Delete command needs an event name.\n"); |
261 | return -EINVAL; | 272 | return -EINVAL; |
@@ -269,9 +280,9 @@ static int create_trace_uprobe(int argc, char **argv) | |||
269 | return -ENOENT; | 280 | return -ENOENT; |
270 | } | 281 | } |
271 | /* delete an event */ | 282 | /* delete an event */ |
272 | unregister_trace_uprobe(tu); | 283 | ret = unregister_trace_uprobe(tu); |
273 | mutex_unlock(&uprobe_lock); | 284 | mutex_unlock(&uprobe_lock); |
274 | return 0; | 285 | return ret; |
275 | } | 286 | } |
276 | 287 | ||
277 | if (argc < 2) { | 288 | if (argc < 2) { |
@@ -283,8 +294,10 @@ static int create_trace_uprobe(int argc, char **argv) | |||
283 | return -EINVAL; | 294 | return -EINVAL; |
284 | } | 295 | } |
285 | arg = strchr(argv[1], ':'); | 296 | arg = strchr(argv[1], ':'); |
286 | if (!arg) | 297 | if (!arg) { |
298 | ret = -EINVAL; | ||
287 | goto fail_address_parse; | 299 | goto fail_address_parse; |
300 | } | ||
288 | 301 | ||
289 | *arg++ = '\0'; | 302 | *arg++ = '\0'; |
290 | filename = argv[1]; | 303 | filename = argv[1]; |
@@ -406,16 +419,20 @@ fail_address_parse: | |||
406 | return ret; | 419 | return ret; |
407 | } | 420 | } |
408 | 421 | ||
409 | static void cleanup_all_probes(void) | 422 | static int cleanup_all_probes(void) |
410 | { | 423 | { |
411 | struct trace_uprobe *tu; | 424 | struct trace_uprobe *tu; |
425 | int ret = 0; | ||
412 | 426 | ||
413 | mutex_lock(&uprobe_lock); | 427 | mutex_lock(&uprobe_lock); |
414 | while (!list_empty(&uprobe_list)) { | 428 | while (!list_empty(&uprobe_list)) { |
415 | tu = list_entry(uprobe_list.next, struct trace_uprobe, list); | 429 | tu = list_entry(uprobe_list.next, struct trace_uprobe, list); |
416 | unregister_trace_uprobe(tu); | 430 | ret = unregister_trace_uprobe(tu); |
431 | if (ret) | ||
432 | break; | ||
417 | } | 433 | } |
418 | mutex_unlock(&uprobe_lock); | 434 | mutex_unlock(&uprobe_lock); |
435 | return ret; | ||
419 | } | 436 | } |
420 | 437 | ||
421 | /* Probes listing interfaces */ | 438 | /* Probes listing interfaces */ |
@@ -460,8 +477,13 @@ static const struct seq_operations probes_seq_op = { | |||
460 | 477 | ||
461 | static int probes_open(struct inode *inode, struct file *file) | 478 | static int probes_open(struct inode *inode, struct file *file) |
462 | { | 479 | { |
463 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) | 480 | int ret; |
464 | cleanup_all_probes(); | 481 | |
482 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { | ||
483 | ret = cleanup_all_probes(); | ||
484 | if (ret) | ||
485 | return ret; | ||
486 | } | ||
465 | 487 | ||
466 | return seq_open(file, &probes_seq_op); | 488 | return seq_open(file, &probes_seq_op); |
467 | } | 489 | } |
@@ -816,8 +838,6 @@ static void uprobe_perf_print(struct trace_uprobe *tu, | |||
816 | 838 | ||
817 | size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | 839 | size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
818 | size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32); | 840 | size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32); |
819 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) | ||
820 | return; | ||
821 | 841 | ||
822 | preempt_disable(); | 842 | preempt_disable(); |
823 | head = this_cpu_ptr(call->perf_events); | 843 | head = this_cpu_ptr(call->perf_events); |
@@ -968,12 +988,17 @@ static int register_uprobe_event(struct trace_uprobe *tu) | |||
968 | return ret; | 988 | return ret; |
969 | } | 989 | } |
970 | 990 | ||
971 | static void unregister_uprobe_event(struct trace_uprobe *tu) | 991 | static int unregister_uprobe_event(struct trace_uprobe *tu) |
972 | { | 992 | { |
993 | int ret; | ||
994 | |||
973 | /* tu->event is unregistered in trace_remove_event_call() */ | 995 | /* tu->event is unregistered in trace_remove_event_call() */ |
974 | trace_remove_event_call(&tu->call); | 996 | ret = trace_remove_event_call(&tu->call); |
997 | if (ret) | ||
998 | return ret; | ||
975 | kfree(tu->call.print_fmt); | 999 | kfree(tu->call.print_fmt); |
976 | tu->call.print_fmt = NULL; | 1000 | tu->call.print_fmt = NULL; |
1001 | return 0; | ||
977 | } | 1002 | } |
978 | 1003 | ||
979 | /* Make a trace interface for controling probe points */ | 1004 | /* Make a trace interface for controling probe points */ |