diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-27 16:26:17 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-27 16:26:17 -0500 |
commit | 79b17ea740d9fab178d6a1aa15d848b5e6c01b82 (patch) | |
tree | b0c18df8713999e16bcc6e5a32cbef880efb3b10 /kernel | |
parent | e5d56efc97f8240d0b5d66c03949382b6d7e5570 (diff) | |
parent | 67d04bb2bcbd3e99f4c4daa58599c90a83ad314a (diff) |
Merge tag 'trace-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"This release has no new tracing features, just clean ups, minor fixes
and small optimizations"
* tag 'trace-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (25 commits)
tracing: Remove outdated ring buffer comment
tracing/probes: Fix a warning message to show correct maximum length
tracing: Fix return value check in trace_benchmark_reg()
tracing: Use modern function declaration
jump_label: Reduce the size of struct static_key
tracing/probe: Show subsystem name in messages
tracing/hwlat: Update old comment about migration
timers: Make flags output in the timer_start tracepoint useful
tracing: Have traceprobe_probes_write() not access userspace unnecessarily
tracing: Have COMM event filter key be treated as a string
ftrace: Have set_graph_function handle multiple functions in one write
ftrace: Do not hold references of ftrace_graph_{notrace_}hash out of graph_lock
tracing: Reset parser->buffer to allow multiple "puts"
ftrace: Have set_graph_functions handle write with RDWR
ftrace: Reset fgd->hash in ftrace_graph_write()
ftrace: Replace (void *)1 with a meaningful macro name FTRACE_GRAPH_EMPTY
ftrace: Create a slight optimization on searching the ftrace_hash
tracing: Add ftrace_hash_key() helper function
ftrace: Convert graph filter to use hash tables
ftrace: Expose ftrace_hash_empty and ftrace_lookup_ip
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/jump_label.c | 153 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 372 | ||||
-rw-r--r-- | kernel/trace/trace.c | 13 | ||||
-rw-r--r-- | kernel/trace/trace.h | 83 | ||||
-rw-r--r-- | kernel/trace/trace_benchmark.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 83 | ||||
-rw-r--r-- | kernel/trace/trace_entries.h | 6 | ||||
-rw-r--r-- | kernel/trace/trace_hwlat.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_probe.c | 50 | ||||
-rw-r--r-- | kernel/trace/trace_uprobe.c | 4 |
11 files changed, 536 insertions, 238 deletions
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index a9b8cf500591..6c9cb208ac48 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -236,12 +236,28 @@ void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry | |||
236 | 236 | ||
237 | static inline struct jump_entry *static_key_entries(struct static_key *key) | 237 | static inline struct jump_entry *static_key_entries(struct static_key *key) |
238 | { | 238 | { |
239 | return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK); | 239 | WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED); |
240 | return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK); | ||
240 | } | 241 | } |
241 | 242 | ||
242 | static inline bool static_key_type(struct static_key *key) | 243 | static inline bool static_key_type(struct static_key *key) |
243 | { | 244 | { |
244 | return (unsigned long)key->entries & JUMP_TYPE_MASK; | 245 | return key->type & JUMP_TYPE_TRUE; |
246 | } | ||
247 | |||
248 | static inline bool static_key_linked(struct static_key *key) | ||
249 | { | ||
250 | return key->type & JUMP_TYPE_LINKED; | ||
251 | } | ||
252 | |||
253 | static inline void static_key_clear_linked(struct static_key *key) | ||
254 | { | ||
255 | key->type &= ~JUMP_TYPE_LINKED; | ||
256 | } | ||
257 | |||
258 | static inline void static_key_set_linked(struct static_key *key) | ||
259 | { | ||
260 | key->type |= JUMP_TYPE_LINKED; | ||
245 | } | 261 | } |
246 | 262 | ||
247 | static inline struct static_key *jump_entry_key(struct jump_entry *entry) | 263 | static inline struct static_key *jump_entry_key(struct jump_entry *entry) |
@@ -254,6 +270,26 @@ static bool jump_entry_branch(struct jump_entry *entry) | |||
254 | return (unsigned long)entry->key & 1UL; | 270 | return (unsigned long)entry->key & 1UL; |
255 | } | 271 | } |
256 | 272 | ||
273 | /*** | ||
274 | * A 'struct static_key' uses a union such that it either points directly | ||
275 | * to a table of 'struct jump_entry' or to a linked list of modules which in | ||
276 | * turn point to 'struct jump_entry' tables. | ||
277 | * | ||
278 | * The two lower bits of the pointer are used to keep track of which pointer | ||
279 | * type is in use and to store the initial branch direction, we use an access | ||
280 | * function which preserves these bits. | ||
281 | */ | ||
282 | static void static_key_set_entries(struct static_key *key, | ||
283 | struct jump_entry *entries) | ||
284 | { | ||
285 | unsigned long type; | ||
286 | |||
287 | WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK); | ||
288 | type = key->type & JUMP_TYPE_MASK; | ||
289 | key->entries = entries; | ||
290 | key->type |= type; | ||
291 | } | ||
292 | |||
257 | static enum jump_label_type jump_label_type(struct jump_entry *entry) | 293 | static enum jump_label_type jump_label_type(struct jump_entry *entry) |
258 | { | 294 | { |
259 | struct static_key *key = jump_entry_key(entry); | 295 | struct static_key *key = jump_entry_key(entry); |
@@ -313,13 +349,7 @@ void __init jump_label_init(void) | |||
313 | continue; | 349 | continue; |
314 | 350 | ||
315 | key = iterk; | 351 | key = iterk; |
316 | /* | 352 | static_key_set_entries(key, iter); |
317 | * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. | ||
318 | */ | ||
319 | *((unsigned long *)&key->entries) += (unsigned long)iter; | ||
320 | #ifdef CONFIG_MODULES | ||
321 | key->next = NULL; | ||
322 | #endif | ||
323 | } | 353 | } |
324 | static_key_initialized = true; | 354 | static_key_initialized = true; |
325 | jump_label_unlock(); | 355 | jump_label_unlock(); |
@@ -343,6 +373,29 @@ struct static_key_mod { | |||
343 | struct module *mod; | 373 | struct module *mod; |
344 | }; | 374 | }; |
345 | 375 | ||
376 | static inline struct static_key_mod *static_key_mod(struct static_key *key) | ||
377 | { | ||
378 | WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED)); | ||
379 | return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); | ||
380 | } | ||
381 | |||
382 | /*** | ||
383 | * key->type and key->next are the same via union. | ||
384 | * This sets key->next and preserves the type bits. | ||
385 | * | ||
386 | * See additional comments above static_key_set_entries(). | ||
387 | */ | ||
388 | static void static_key_set_mod(struct static_key *key, | ||
389 | struct static_key_mod *mod) | ||
390 | { | ||
391 | unsigned long type; | ||
392 | |||
393 | WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK); | ||
394 | type = key->type & JUMP_TYPE_MASK; | ||
395 | key->next = mod; | ||
396 | key->type |= type; | ||
397 | } | ||
398 | |||
346 | static int __jump_label_mod_text_reserved(void *start, void *end) | 399 | static int __jump_label_mod_text_reserved(void *start, void *end) |
347 | { | 400 | { |
348 | struct module *mod; | 401 | struct module *mod; |
@@ -365,11 +418,23 @@ static void __jump_label_mod_update(struct static_key *key) | |||
365 | { | 418 | { |
366 | struct static_key_mod *mod; | 419 | struct static_key_mod *mod; |
367 | 420 | ||
368 | for (mod = key->next; mod; mod = mod->next) { | 421 | for (mod = static_key_mod(key); mod; mod = mod->next) { |
369 | struct module *m = mod->mod; | 422 | struct jump_entry *stop; |
423 | struct module *m; | ||
424 | |||
425 | /* | ||
426 | * NULL if the static_key is defined in a module | ||
427 | * that does not use it | ||
428 | */ | ||
429 | if (!mod->entries) | ||
430 | continue; | ||
370 | 431 | ||
371 | __jump_label_update(key, mod->entries, | 432 | m = mod->mod; |
372 | m->jump_entries + m->num_jump_entries); | 433 | if (!m) |
434 | stop = __stop___jump_table; | ||
435 | else | ||
436 | stop = m->jump_entries + m->num_jump_entries; | ||
437 | __jump_label_update(key, mod->entries, stop); | ||
373 | } | 438 | } |
374 | } | 439 | } |
375 | 440 | ||
@@ -404,7 +469,7 @@ static int jump_label_add_module(struct module *mod) | |||
404 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; | 469 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
405 | struct jump_entry *iter; | 470 | struct jump_entry *iter; |
406 | struct static_key *key = NULL; | 471 | struct static_key *key = NULL; |
407 | struct static_key_mod *jlm; | 472 | struct static_key_mod *jlm, *jlm2; |
408 | 473 | ||
409 | /* if the module doesn't have jump label entries, just return */ | 474 | /* if the module doesn't have jump label entries, just return */ |
410 | if (iter_start == iter_stop) | 475 | if (iter_start == iter_stop) |
@@ -421,20 +486,32 @@ static int jump_label_add_module(struct module *mod) | |||
421 | 486 | ||
422 | key = iterk; | 487 | key = iterk; |
423 | if (within_module(iter->key, mod)) { | 488 | if (within_module(iter->key, mod)) { |
424 | /* | 489 | static_key_set_entries(key, iter); |
425 | * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. | ||
426 | */ | ||
427 | *((unsigned long *)&key->entries) += (unsigned long)iter; | ||
428 | key->next = NULL; | ||
429 | continue; | 490 | continue; |
430 | } | 491 | } |
431 | jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); | 492 | jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); |
432 | if (!jlm) | 493 | if (!jlm) |
433 | return -ENOMEM; | 494 | return -ENOMEM; |
495 | if (!static_key_linked(key)) { | ||
496 | jlm2 = kzalloc(sizeof(struct static_key_mod), | ||
497 | GFP_KERNEL); | ||
498 | if (!jlm2) { | ||
499 | kfree(jlm); | ||
500 | return -ENOMEM; | ||
501 | } | ||
502 | preempt_disable(); | ||
503 | jlm2->mod = __module_address((unsigned long)key); | ||
504 | preempt_enable(); | ||
505 | jlm2->entries = static_key_entries(key); | ||
506 | jlm2->next = NULL; | ||
507 | static_key_set_mod(key, jlm2); | ||
508 | static_key_set_linked(key); | ||
509 | } | ||
434 | jlm->mod = mod; | 510 | jlm->mod = mod; |
435 | jlm->entries = iter; | 511 | jlm->entries = iter; |
436 | jlm->next = key->next; | 512 | jlm->next = static_key_mod(key); |
437 | key->next = jlm; | 513 | static_key_set_mod(key, jlm); |
514 | static_key_set_linked(key); | ||
438 | 515 | ||
439 | /* Only update if we've changed from our initial state */ | 516 | /* Only update if we've changed from our initial state */ |
440 | if (jump_label_type(iter) != jump_label_init_type(iter)) | 517 | if (jump_label_type(iter) != jump_label_init_type(iter)) |
@@ -461,16 +538,34 @@ static void jump_label_del_module(struct module *mod) | |||
461 | if (within_module(iter->key, mod)) | 538 | if (within_module(iter->key, mod)) |
462 | continue; | 539 | continue; |
463 | 540 | ||
541 | /* No memory during module load */ | ||
542 | if (WARN_ON(!static_key_linked(key))) | ||
543 | continue; | ||
544 | |||
464 | prev = &key->next; | 545 | prev = &key->next; |
465 | jlm = key->next; | 546 | jlm = static_key_mod(key); |
466 | 547 | ||
467 | while (jlm && jlm->mod != mod) { | 548 | while (jlm && jlm->mod != mod) { |
468 | prev = &jlm->next; | 549 | prev = &jlm->next; |
469 | jlm = jlm->next; | 550 | jlm = jlm->next; |
470 | } | 551 | } |
471 | 552 | ||
472 | if (jlm) { | 553 | /* No memory during module load */ |
554 | if (WARN_ON(!jlm)) | ||
555 | continue; | ||
556 | |||
557 | if (prev == &key->next) | ||
558 | static_key_set_mod(key, jlm->next); | ||
559 | else | ||
473 | *prev = jlm->next; | 560 | *prev = jlm->next; |
561 | |||
562 | kfree(jlm); | ||
563 | |||
564 | jlm = static_key_mod(key); | ||
565 | /* if only one etry is left, fold it back into the static_key */ | ||
566 | if (jlm->next == NULL) { | ||
567 | static_key_set_entries(key, jlm->entries); | ||
568 | static_key_clear_linked(key); | ||
474 | kfree(jlm); | 569 | kfree(jlm); |
475 | } | 570 | } |
476 | } | 571 | } |
@@ -499,8 +594,10 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, | |||
499 | case MODULE_STATE_COMING: | 594 | case MODULE_STATE_COMING: |
500 | jump_label_lock(); | 595 | jump_label_lock(); |
501 | ret = jump_label_add_module(mod); | 596 | ret = jump_label_add_module(mod); |
502 | if (ret) | 597 | if (ret) { |
598 | WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n"); | ||
503 | jump_label_del_module(mod); | 599 | jump_label_del_module(mod); |
600 | } | ||
504 | jump_label_unlock(); | 601 | jump_label_unlock(); |
505 | break; | 602 | break; |
506 | case MODULE_STATE_GOING: | 603 | case MODULE_STATE_GOING: |
@@ -561,11 +658,14 @@ int jump_label_text_reserved(void *start, void *end) | |||
561 | static void jump_label_update(struct static_key *key) | 658 | static void jump_label_update(struct static_key *key) |
562 | { | 659 | { |
563 | struct jump_entry *stop = __stop___jump_table; | 660 | struct jump_entry *stop = __stop___jump_table; |
564 | struct jump_entry *entry = static_key_entries(key); | 661 | struct jump_entry *entry; |
565 | #ifdef CONFIG_MODULES | 662 | #ifdef CONFIG_MODULES |
566 | struct module *mod; | 663 | struct module *mod; |
567 | 664 | ||
568 | __jump_label_mod_update(key); | 665 | if (static_key_linked(key)) { |
666 | __jump_label_mod_update(key); | ||
667 | return; | ||
668 | } | ||
569 | 669 | ||
570 | preempt_disable(); | 670 | preempt_disable(); |
571 | mod = __module_address((unsigned long)key); | 671 | mod = __module_address((unsigned long)key); |
@@ -573,6 +673,7 @@ static void jump_label_update(struct static_key *key) | |||
573 | stop = mod->jump_entries + mod->num_jump_entries; | 673 | stop = mod->jump_entries + mod->num_jump_entries; |
574 | preempt_enable(); | 674 | preempt_enable(); |
575 | #endif | 675 | #endif |
676 | entry = static_key_entries(key); | ||
576 | /* if there are no users, entry can be NULL */ | 677 | /* if there are no users, entry can be NULL */ |
577 | if (entry) | 678 | if (entry) |
578 | __jump_label_update(key, entry, stop); | 679 | __jump_label_update(key, entry, stop); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index eb230f06ba41..0c0609326391 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1110,13 +1110,6 @@ struct ftrace_func_entry { | |||
1110 | unsigned long ip; | 1110 | unsigned long ip; |
1111 | }; | 1111 | }; |
1112 | 1112 | ||
1113 | struct ftrace_hash { | ||
1114 | unsigned long size_bits; | ||
1115 | struct hlist_head *buckets; | ||
1116 | unsigned long count; | ||
1117 | struct rcu_head rcu; | ||
1118 | }; | ||
1119 | |||
1120 | /* | 1113 | /* |
1121 | * We make these constant because no one should touch them, | 1114 | * We make these constant because no one should touch them, |
1122 | * but they are used as the default "empty hash", to avoid allocating | 1115 | * but they are used as the default "empty hash", to avoid allocating |
@@ -1192,26 +1185,24 @@ struct ftrace_page { | |||
1192 | static struct ftrace_page *ftrace_pages_start; | 1185 | static struct ftrace_page *ftrace_pages_start; |
1193 | static struct ftrace_page *ftrace_pages; | 1186 | static struct ftrace_page *ftrace_pages; |
1194 | 1187 | ||
1195 | static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash) | 1188 | static __always_inline unsigned long |
1189 | ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip) | ||
1196 | { | 1190 | { |
1197 | return !hash || !hash->count; | 1191 | if (hash->size_bits > 0) |
1192 | return hash_long(ip, hash->size_bits); | ||
1193 | |||
1194 | return 0; | ||
1198 | } | 1195 | } |
1199 | 1196 | ||
1200 | static struct ftrace_func_entry * | 1197 | /* Only use this function if ftrace_hash_empty() has already been tested */ |
1201 | ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | 1198 | static __always_inline struct ftrace_func_entry * |
1199 | __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | ||
1202 | { | 1200 | { |
1203 | unsigned long key; | 1201 | unsigned long key; |
1204 | struct ftrace_func_entry *entry; | 1202 | struct ftrace_func_entry *entry; |
1205 | struct hlist_head *hhd; | 1203 | struct hlist_head *hhd; |
1206 | 1204 | ||
1207 | if (ftrace_hash_empty(hash)) | 1205 | key = ftrace_hash_key(hash, ip); |
1208 | return NULL; | ||
1209 | |||
1210 | if (hash->size_bits > 0) | ||
1211 | key = hash_long(ip, hash->size_bits); | ||
1212 | else | ||
1213 | key = 0; | ||
1214 | |||
1215 | hhd = &hash->buckets[key]; | 1206 | hhd = &hash->buckets[key]; |
1216 | 1207 | ||
1217 | hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { | 1208 | hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { |
@@ -1221,17 +1212,32 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | |||
1221 | return NULL; | 1212 | return NULL; |
1222 | } | 1213 | } |
1223 | 1214 | ||
1215 | /** | ||
1216 | * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash | ||
1217 | * @hash: The hash to look at | ||
1218 | * @ip: The instruction pointer to test | ||
1219 | * | ||
1220 | * Search a given @hash to see if a given instruction pointer (@ip) | ||
1221 | * exists in it. | ||
1222 | * | ||
1223 | * Returns the entry that holds the @ip if found. NULL otherwise. | ||
1224 | */ | ||
1225 | struct ftrace_func_entry * | ||
1226 | ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | ||
1227 | { | ||
1228 | if (ftrace_hash_empty(hash)) | ||
1229 | return NULL; | ||
1230 | |||
1231 | return __ftrace_lookup_ip(hash, ip); | ||
1232 | } | ||
1233 | |||
1224 | static void __add_hash_entry(struct ftrace_hash *hash, | 1234 | static void __add_hash_entry(struct ftrace_hash *hash, |
1225 | struct ftrace_func_entry *entry) | 1235 | struct ftrace_func_entry *entry) |
1226 | { | 1236 | { |
1227 | struct hlist_head *hhd; | 1237 | struct hlist_head *hhd; |
1228 | unsigned long key; | 1238 | unsigned long key; |
1229 | 1239 | ||
1230 | if (hash->size_bits) | 1240 | key = ftrace_hash_key(hash, entry->ip); |
1231 | key = hash_long(entry->ip, hash->size_bits); | ||
1232 | else | ||
1233 | key = 0; | ||
1234 | |||
1235 | hhd = &hash->buckets[key]; | 1241 | hhd = &hash->buckets[key]; |
1236 | hlist_add_head(&entry->hlist, hhd); | 1242 | hlist_add_head(&entry->hlist, hhd); |
1237 | hash->count++; | 1243 | hash->count++; |
@@ -1383,9 +1389,8 @@ ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); | |||
1383 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, | 1389 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, |
1384 | struct ftrace_hash *new_hash); | 1390 | struct ftrace_hash *new_hash); |
1385 | 1391 | ||
1386 | static int | 1392 | static struct ftrace_hash * |
1387 | ftrace_hash_move(struct ftrace_ops *ops, int enable, | 1393 | __ftrace_hash_move(struct ftrace_hash *src) |
1388 | struct ftrace_hash **dst, struct ftrace_hash *src) | ||
1389 | { | 1394 | { |
1390 | struct ftrace_func_entry *entry; | 1395 | struct ftrace_func_entry *entry; |
1391 | struct hlist_node *tn; | 1396 | struct hlist_node *tn; |
@@ -1393,21 +1398,13 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, | |||
1393 | struct ftrace_hash *new_hash; | 1398 | struct ftrace_hash *new_hash; |
1394 | int size = src->count; | 1399 | int size = src->count; |
1395 | int bits = 0; | 1400 | int bits = 0; |
1396 | int ret; | ||
1397 | int i; | 1401 | int i; |
1398 | 1402 | ||
1399 | /* Reject setting notrace hash on IPMODIFY ftrace_ops */ | ||
1400 | if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) | ||
1401 | return -EINVAL; | ||
1402 | |||
1403 | /* | 1403 | /* |
1404 | * If the new source is empty, just free dst and assign it | 1404 | * If the new source is empty, just return the empty_hash. |
1405 | * the empty_hash. | ||
1406 | */ | 1405 | */ |
1407 | if (!src->count) { | 1406 | if (!src->count) |
1408 | new_hash = EMPTY_HASH; | 1407 | return EMPTY_HASH; |
1409 | goto update; | ||
1410 | } | ||
1411 | 1408 | ||
1412 | /* | 1409 | /* |
1413 | * Make the hash size about 1/2 the # found | 1410 | * Make the hash size about 1/2 the # found |
@@ -1421,7 +1418,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, | |||
1421 | 1418 | ||
1422 | new_hash = alloc_ftrace_hash(bits); | 1419 | new_hash = alloc_ftrace_hash(bits); |
1423 | if (!new_hash) | 1420 | if (!new_hash) |
1424 | return -ENOMEM; | 1421 | return NULL; |
1425 | 1422 | ||
1426 | size = 1 << src->size_bits; | 1423 | size = 1 << src->size_bits; |
1427 | for (i = 0; i < size; i++) { | 1424 | for (i = 0; i < size; i++) { |
@@ -1432,7 +1429,24 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, | |||
1432 | } | 1429 | } |
1433 | } | 1430 | } |
1434 | 1431 | ||
1435 | update: | 1432 | return new_hash; |
1433 | } | ||
1434 | |||
1435 | static int | ||
1436 | ftrace_hash_move(struct ftrace_ops *ops, int enable, | ||
1437 | struct ftrace_hash **dst, struct ftrace_hash *src) | ||
1438 | { | ||
1439 | struct ftrace_hash *new_hash; | ||
1440 | int ret; | ||
1441 | |||
1442 | /* Reject setting notrace hash on IPMODIFY ftrace_ops */ | ||
1443 | if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) | ||
1444 | return -EINVAL; | ||
1445 | |||
1446 | new_hash = __ftrace_hash_move(src); | ||
1447 | if (!new_hash) | ||
1448 | return -ENOMEM; | ||
1449 | |||
1436 | /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ | 1450 | /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ |
1437 | if (enable) { | 1451 | if (enable) { |
1438 | /* IPMODIFY should be updated only when filter_hash updating */ | 1452 | /* IPMODIFY should be updated only when filter_hash updating */ |
@@ -1466,9 +1480,9 @@ static bool hash_contains_ip(unsigned long ip, | |||
1466 | * notrace hash is considered not in the notrace hash. | 1480 | * notrace hash is considered not in the notrace hash. |
1467 | */ | 1481 | */ |
1468 | return (ftrace_hash_empty(hash->filter_hash) || | 1482 | return (ftrace_hash_empty(hash->filter_hash) || |
1469 | ftrace_lookup_ip(hash->filter_hash, ip)) && | 1483 | __ftrace_lookup_ip(hash->filter_hash, ip)) && |
1470 | (ftrace_hash_empty(hash->notrace_hash) || | 1484 | (ftrace_hash_empty(hash->notrace_hash) || |
1471 | !ftrace_lookup_ip(hash->notrace_hash, ip)); | 1485 | !__ftrace_lookup_ip(hash->notrace_hash, ip)); |
1472 | } | 1486 | } |
1473 | 1487 | ||
1474 | /* | 1488 | /* |
@@ -2880,7 +2894,7 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) | |||
2880 | 2894 | ||
2881 | /* The function must be in the filter */ | 2895 | /* The function must be in the filter */ |
2882 | if (!ftrace_hash_empty(ops->func_hash->filter_hash) && | 2896 | if (!ftrace_hash_empty(ops->func_hash->filter_hash) && |
2883 | !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) | 2897 | !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) |
2884 | return 0; | 2898 | return 0; |
2885 | 2899 | ||
2886 | /* If in notrace hash, we ignore it too */ | 2900 | /* If in notrace hash, we ignore it too */ |
@@ -4382,7 +4396,7 @@ __setup("ftrace_filter=", set_ftrace_filter); | |||
4382 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 4396 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
4383 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; | 4397 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; |
4384 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | 4398 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
4385 | static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); | 4399 | static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); |
4386 | 4400 | ||
4387 | static unsigned long save_global_trampoline; | 4401 | static unsigned long save_global_trampoline; |
4388 | static unsigned long save_global_flags; | 4402 | static unsigned long save_global_flags; |
@@ -4405,18 +4419,17 @@ static void __init set_ftrace_early_graph(char *buf, int enable) | |||
4405 | { | 4419 | { |
4406 | int ret; | 4420 | int ret; |
4407 | char *func; | 4421 | char *func; |
4408 | unsigned long *table = ftrace_graph_funcs; | 4422 | struct ftrace_hash *hash; |
4409 | int *count = &ftrace_graph_count; | ||
4410 | 4423 | ||
4411 | if (!enable) { | 4424 | if (enable) |
4412 | table = ftrace_graph_notrace_funcs; | 4425 | hash = ftrace_graph_hash; |
4413 | count = &ftrace_graph_notrace_count; | 4426 | else |
4414 | } | 4427 | hash = ftrace_graph_notrace_hash; |
4415 | 4428 | ||
4416 | while (buf) { | 4429 | while (buf) { |
4417 | func = strsep(&buf, ","); | 4430 | func = strsep(&buf, ","); |
4418 | /* we allow only one expression at a time */ | 4431 | /* we allow only one expression at a time */ |
4419 | ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func); | 4432 | ret = ftrace_graph_set_hash(hash, func); |
4420 | if (ret) | 4433 | if (ret) |
4421 | printk(KERN_DEBUG "ftrace: function %s not " | 4434 | printk(KERN_DEBUG "ftrace: function %s not " |
4422 | "traceable\n", func); | 4435 | "traceable\n", func); |
@@ -4540,26 +4553,55 @@ static const struct file_operations ftrace_notrace_fops = { | |||
4540 | 4553 | ||
4541 | static DEFINE_MUTEX(graph_lock); | 4554 | static DEFINE_MUTEX(graph_lock); |
4542 | 4555 | ||
4543 | int ftrace_graph_count; | 4556 | struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH; |
4544 | int ftrace_graph_notrace_count; | 4557 | struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH; |
4545 | unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; | 4558 | |
4546 | unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; | 4559 | enum graph_filter_type { |
4560 | GRAPH_FILTER_NOTRACE = 0, | ||
4561 | GRAPH_FILTER_FUNCTION, | ||
4562 | }; | ||
4563 | |||
4564 | #define FTRACE_GRAPH_EMPTY ((void *)1) | ||
4547 | 4565 | ||
4548 | struct ftrace_graph_data { | 4566 | struct ftrace_graph_data { |
4549 | unsigned long *table; | 4567 | struct ftrace_hash *hash; |
4550 | size_t size; | 4568 | struct ftrace_func_entry *entry; |
4551 | int *count; | 4569 | int idx; /* for hash table iteration */ |
4552 | const struct seq_operations *seq_ops; | 4570 | enum graph_filter_type type; |
4571 | struct ftrace_hash *new_hash; | ||
4572 | const struct seq_operations *seq_ops; | ||
4573 | struct trace_parser parser; | ||
4553 | }; | 4574 | }; |
4554 | 4575 | ||
4555 | static void * | 4576 | static void * |
4556 | __g_next(struct seq_file *m, loff_t *pos) | 4577 | __g_next(struct seq_file *m, loff_t *pos) |
4557 | { | 4578 | { |
4558 | struct ftrace_graph_data *fgd = m->private; | 4579 | struct ftrace_graph_data *fgd = m->private; |
4580 | struct ftrace_func_entry *entry = fgd->entry; | ||
4581 | struct hlist_head *head; | ||
4582 | int i, idx = fgd->idx; | ||
4559 | 4583 | ||
4560 | if (*pos >= *fgd->count) | 4584 | if (*pos >= fgd->hash->count) |
4561 | return NULL; | 4585 | return NULL; |
4562 | return &fgd->table[*pos]; | 4586 | |
4587 | if (entry) { | ||
4588 | hlist_for_each_entry_continue(entry, hlist) { | ||
4589 | fgd->entry = entry; | ||
4590 | return entry; | ||
4591 | } | ||
4592 | |||
4593 | idx++; | ||
4594 | } | ||
4595 | |||
4596 | for (i = idx; i < 1 << fgd->hash->size_bits; i++) { | ||
4597 | head = &fgd->hash->buckets[i]; | ||
4598 | hlist_for_each_entry(entry, head, hlist) { | ||
4599 | fgd->entry = entry; | ||
4600 | fgd->idx = i; | ||
4601 | return entry; | ||
4602 | } | ||
4603 | } | ||
4604 | return NULL; | ||
4563 | } | 4605 | } |
4564 | 4606 | ||
4565 | static void * | 4607 | static void * |
@@ -4575,10 +4617,19 @@ static void *g_start(struct seq_file *m, loff_t *pos) | |||
4575 | 4617 | ||
4576 | mutex_lock(&graph_lock); | 4618 | mutex_lock(&graph_lock); |
4577 | 4619 | ||
4620 | if (fgd->type == GRAPH_FILTER_FUNCTION) | ||
4621 | fgd->hash = rcu_dereference_protected(ftrace_graph_hash, | ||
4622 | lockdep_is_held(&graph_lock)); | ||
4623 | else | ||
4624 | fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, | ||
4625 | lockdep_is_held(&graph_lock)); | ||
4626 | |||
4578 | /* Nothing, tell g_show to print all functions are enabled */ | 4627 | /* Nothing, tell g_show to print all functions are enabled */ |
4579 | if (!*fgd->count && !*pos) | 4628 | if (ftrace_hash_empty(fgd->hash) && !*pos) |
4580 | return (void *)1; | 4629 | return FTRACE_GRAPH_EMPTY; |
4581 | 4630 | ||
4631 | fgd->idx = 0; | ||
4632 | fgd->entry = NULL; | ||
4582 | return __g_next(m, pos); | 4633 | return __g_next(m, pos); |
4583 | } | 4634 | } |
4584 | 4635 | ||
@@ -4589,22 +4640,22 @@ static void g_stop(struct seq_file *m, void *p) | |||
4589 | 4640 | ||
4590 | static int g_show(struct seq_file *m, void *v) | 4641 | static int g_show(struct seq_file *m, void *v) |
4591 | { | 4642 | { |
4592 | unsigned long *ptr = v; | 4643 | struct ftrace_func_entry *entry = v; |
4593 | 4644 | ||
4594 | if (!ptr) | 4645 | if (!entry) |
4595 | return 0; | 4646 | return 0; |
4596 | 4647 | ||
4597 | if (ptr == (unsigned long *)1) { | 4648 | if (entry == FTRACE_GRAPH_EMPTY) { |
4598 | struct ftrace_graph_data *fgd = m->private; | 4649 | struct ftrace_graph_data *fgd = m->private; |
4599 | 4650 | ||
4600 | if (fgd->table == ftrace_graph_funcs) | 4651 | if (fgd->type == GRAPH_FILTER_FUNCTION) |
4601 | seq_puts(m, "#### all functions enabled ####\n"); | 4652 | seq_puts(m, "#### all functions enabled ####\n"); |
4602 | else | 4653 | else |
4603 | seq_puts(m, "#### no functions disabled ####\n"); | 4654 | seq_puts(m, "#### no functions disabled ####\n"); |
4604 | return 0; | 4655 | return 0; |
4605 | } | 4656 | } |
4606 | 4657 | ||
4607 | seq_printf(m, "%ps\n", (void *)*ptr); | 4658 | seq_printf(m, "%ps\n", (void *)entry->ip); |
4608 | 4659 | ||
4609 | return 0; | 4660 | return 0; |
4610 | } | 4661 | } |
@@ -4621,24 +4672,51 @@ __ftrace_graph_open(struct inode *inode, struct file *file, | |||
4621 | struct ftrace_graph_data *fgd) | 4672 | struct ftrace_graph_data *fgd) |
4622 | { | 4673 | { |
4623 | int ret = 0; | 4674 | int ret = 0; |
4675 | struct ftrace_hash *new_hash = NULL; | ||
4624 | 4676 | ||
4625 | mutex_lock(&graph_lock); | 4677 | if (file->f_mode & FMODE_WRITE) { |
4626 | if ((file->f_mode & FMODE_WRITE) && | 4678 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; |
4627 | (file->f_flags & O_TRUNC)) { | 4679 | |
4628 | *fgd->count = 0; | 4680 | if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX)) |
4629 | memset(fgd->table, 0, fgd->size * sizeof(*fgd->table)); | 4681 | return -ENOMEM; |
4682 | |||
4683 | if (file->f_flags & O_TRUNC) | ||
4684 | new_hash = alloc_ftrace_hash(size_bits); | ||
4685 | else | ||
4686 | new_hash = alloc_and_copy_ftrace_hash(size_bits, | ||
4687 | fgd->hash); | ||
4688 | if (!new_hash) { | ||
4689 | ret = -ENOMEM; | ||
4690 | goto out; | ||
4691 | } | ||
4630 | } | 4692 | } |
4631 | mutex_unlock(&graph_lock); | ||
4632 | 4693 | ||
4633 | if (file->f_mode & FMODE_READ) { | 4694 | if (file->f_mode & FMODE_READ) { |
4634 | ret = seq_open(file, fgd->seq_ops); | 4695 | ret = seq_open(file, &ftrace_graph_seq_ops); |
4635 | if (!ret) { | 4696 | if (!ret) { |
4636 | struct seq_file *m = file->private_data; | 4697 | struct seq_file *m = file->private_data; |
4637 | m->private = fgd; | 4698 | m->private = fgd; |
4699 | } else { | ||
4700 | /* Failed */ | ||
4701 | free_ftrace_hash(new_hash); | ||
4702 | new_hash = NULL; | ||
4638 | } | 4703 | } |
4639 | } else | 4704 | } else |
4640 | file->private_data = fgd; | 4705 | file->private_data = fgd; |
4641 | 4706 | ||
4707 | out: | ||
4708 | if (ret < 0 && file->f_mode & FMODE_WRITE) | ||
4709 | trace_parser_put(&fgd->parser); | ||
4710 | |||
4711 | fgd->new_hash = new_hash; | ||
4712 | |||
4713 | /* | ||
4714 | * All uses of fgd->hash must be taken with the graph_lock | ||
4715 | * held. The graph_lock is going to be released, so force | ||
4716 | * fgd->hash to be reinitialized when it is taken again. | ||
4717 | */ | ||
4718 | fgd->hash = NULL; | ||
4719 | |||
4642 | return ret; | 4720 | return ret; |
4643 | } | 4721 | } |
4644 | 4722 | ||
@@ -4646,6 +4724,7 @@ static int | |||
4646 | ftrace_graph_open(struct inode *inode, struct file *file) | 4724 | ftrace_graph_open(struct inode *inode, struct file *file) |
4647 | { | 4725 | { |
4648 | struct ftrace_graph_data *fgd; | 4726 | struct ftrace_graph_data *fgd; |
4727 | int ret; | ||
4649 | 4728 | ||
4650 | if (unlikely(ftrace_disabled)) | 4729 | if (unlikely(ftrace_disabled)) |
4651 | return -ENODEV; | 4730 | return -ENODEV; |
@@ -4654,18 +4733,26 @@ ftrace_graph_open(struct inode *inode, struct file *file) | |||
4654 | if (fgd == NULL) | 4733 | if (fgd == NULL) |
4655 | return -ENOMEM; | 4734 | return -ENOMEM; |
4656 | 4735 | ||
4657 | fgd->table = ftrace_graph_funcs; | 4736 | mutex_lock(&graph_lock); |
4658 | fgd->size = FTRACE_GRAPH_MAX_FUNCS; | 4737 | |
4659 | fgd->count = &ftrace_graph_count; | 4738 | fgd->hash = rcu_dereference_protected(ftrace_graph_hash, |
4739 | lockdep_is_held(&graph_lock)); | ||
4740 | fgd->type = GRAPH_FILTER_FUNCTION; | ||
4660 | fgd->seq_ops = &ftrace_graph_seq_ops; | 4741 | fgd->seq_ops = &ftrace_graph_seq_ops; |
4661 | 4742 | ||
4662 | return __ftrace_graph_open(inode, file, fgd); | 4743 | ret = __ftrace_graph_open(inode, file, fgd); |
4744 | if (ret < 0) | ||
4745 | kfree(fgd); | ||
4746 | |||
4747 | mutex_unlock(&graph_lock); | ||
4748 | return ret; | ||
4663 | } | 4749 | } |
4664 | 4750 | ||
4665 | static int | 4751 | static int |
4666 | ftrace_graph_notrace_open(struct inode *inode, struct file *file) | 4752 | ftrace_graph_notrace_open(struct inode *inode, struct file *file) |
4667 | { | 4753 | { |
4668 | struct ftrace_graph_data *fgd; | 4754 | struct ftrace_graph_data *fgd; |
4755 | int ret; | ||
4669 | 4756 | ||
4670 | if (unlikely(ftrace_disabled)) | 4757 | if (unlikely(ftrace_disabled)) |
4671 | return -ENODEV; | 4758 | return -ENODEV; |
@@ -4674,45 +4761,97 @@ ftrace_graph_notrace_open(struct inode *inode, struct file *file) | |||
4674 | if (fgd == NULL) | 4761 | if (fgd == NULL) |
4675 | return -ENOMEM; | 4762 | return -ENOMEM; |
4676 | 4763 | ||
4677 | fgd->table = ftrace_graph_notrace_funcs; | 4764 | mutex_lock(&graph_lock); |
4678 | fgd->size = FTRACE_GRAPH_MAX_FUNCS; | 4765 | |
4679 | fgd->count = &ftrace_graph_notrace_count; | 4766 | fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, |
4767 | lockdep_is_held(&graph_lock)); | ||
4768 | fgd->type = GRAPH_FILTER_NOTRACE; | ||
4680 | fgd->seq_ops = &ftrace_graph_seq_ops; | 4769 | fgd->seq_ops = &ftrace_graph_seq_ops; |
4681 | 4770 | ||
4682 | return __ftrace_graph_open(inode, file, fgd); | 4771 | ret = __ftrace_graph_open(inode, file, fgd); |
4772 | if (ret < 0) | ||
4773 | kfree(fgd); | ||
4774 | |||
4775 | mutex_unlock(&graph_lock); | ||
4776 | return ret; | ||
4683 | } | 4777 | } |
4684 | 4778 | ||
4685 | static int | 4779 | static int |
4686 | ftrace_graph_release(struct inode *inode, struct file *file) | 4780 | ftrace_graph_release(struct inode *inode, struct file *file) |
4687 | { | 4781 | { |
4782 | struct ftrace_graph_data *fgd; | ||
4783 | struct ftrace_hash *old_hash, *new_hash; | ||
4784 | struct trace_parser *parser; | ||
4785 | int ret = 0; | ||
4786 | |||
4688 | if (file->f_mode & FMODE_READ) { | 4787 | if (file->f_mode & FMODE_READ) { |
4689 | struct seq_file *m = file->private_data; | 4788 | struct seq_file *m = file->private_data; |
4690 | 4789 | ||
4691 | kfree(m->private); | 4790 | fgd = m->private; |
4692 | seq_release(inode, file); | 4791 | seq_release(inode, file); |
4693 | } else { | 4792 | } else { |
4694 | kfree(file->private_data); | 4793 | fgd = file->private_data; |
4695 | } | 4794 | } |
4696 | 4795 | ||
4697 | return 0; | 4796 | |
4797 | if (file->f_mode & FMODE_WRITE) { | ||
4798 | |||
4799 | parser = &fgd->parser; | ||
4800 | |||
4801 | if (trace_parser_loaded((parser))) { | ||
4802 | parser->buffer[parser->idx] = 0; | ||
4803 | ret = ftrace_graph_set_hash(fgd->new_hash, | ||
4804 | parser->buffer); | ||
4805 | } | ||
4806 | |||
4807 | trace_parser_put(parser); | ||
4808 | |||
4809 | new_hash = __ftrace_hash_move(fgd->new_hash); | ||
4810 | if (!new_hash) { | ||
4811 | ret = -ENOMEM; | ||
4812 | goto out; | ||
4813 | } | ||
4814 | |||
4815 | mutex_lock(&graph_lock); | ||
4816 | |||
4817 | if (fgd->type == GRAPH_FILTER_FUNCTION) { | ||
4818 | old_hash = rcu_dereference_protected(ftrace_graph_hash, | ||
4819 | lockdep_is_held(&graph_lock)); | ||
4820 | rcu_assign_pointer(ftrace_graph_hash, new_hash); | ||
4821 | } else { | ||
4822 | old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, | ||
4823 | lockdep_is_held(&graph_lock)); | ||
4824 | rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash); | ||
4825 | } | ||
4826 | |||
4827 | mutex_unlock(&graph_lock); | ||
4828 | |||
4829 | /* Wait till all users are no longer using the old hash */ | ||
4830 | synchronize_sched(); | ||
4831 | |||
4832 | free_ftrace_hash(old_hash); | ||
4833 | } | ||
4834 | |||
4835 | out: | ||
4836 | kfree(fgd->new_hash); | ||
4837 | kfree(fgd); | ||
4838 | |||
4839 | return ret; | ||
4698 | } | 4840 | } |
4699 | 4841 | ||
4700 | static int | 4842 | static int |
4701 | ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) | 4843 | ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) |
4702 | { | 4844 | { |
4703 | struct ftrace_glob func_g; | 4845 | struct ftrace_glob func_g; |
4704 | struct dyn_ftrace *rec; | 4846 | struct dyn_ftrace *rec; |
4705 | struct ftrace_page *pg; | 4847 | struct ftrace_page *pg; |
4848 | struct ftrace_func_entry *entry; | ||
4706 | int fail = 1; | 4849 | int fail = 1; |
4707 | int not; | 4850 | int not; |
4708 | bool exists; | ||
4709 | int i; | ||
4710 | 4851 | ||
4711 | /* decode regex */ | 4852 | /* decode regex */ |
4712 | func_g.type = filter_parse_regex(buffer, strlen(buffer), | 4853 | func_g.type = filter_parse_regex(buffer, strlen(buffer), |
4713 | &func_g.search, ¬); | 4854 | &func_g.search, ¬); |
4714 | if (!not && *idx >= size) | ||
4715 | return -EBUSY; | ||
4716 | 4855 | ||
4717 | func_g.len = strlen(func_g.search); | 4856 | func_g.len = strlen(func_g.search); |
4718 | 4857 | ||
@@ -4729,26 +4868,18 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) | |||
4729 | continue; | 4868 | continue; |
4730 | 4869 | ||
4731 | if (ftrace_match_record(rec, &func_g, NULL, 0)) { | 4870 | if (ftrace_match_record(rec, &func_g, NULL, 0)) { |
4732 | /* if it is in the array */ | 4871 | entry = ftrace_lookup_ip(hash, rec->ip); |
4733 | exists = false; | ||
4734 | for (i = 0; i < *idx; i++) { | ||
4735 | if (array[i] == rec->ip) { | ||
4736 | exists = true; | ||
4737 | break; | ||
4738 | } | ||
4739 | } | ||
4740 | 4872 | ||
4741 | if (!not) { | 4873 | if (!not) { |
4742 | fail = 0; | 4874 | fail = 0; |
4743 | if (!exists) { | 4875 | |
4744 | array[(*idx)++] = rec->ip; | 4876 | if (entry) |
4745 | if (*idx >= size) | 4877 | continue; |
4746 | goto out; | 4878 | if (add_hash_entry(hash, rec->ip) < 0) |
4747 | } | 4879 | goto out; |
4748 | } else { | 4880 | } else { |
4749 | if (exists) { | 4881 | if (entry) { |
4750 | array[i] = array[--(*idx)]; | 4882 | free_hash_entry(hash, entry); |
4751 | array[*idx] = 0; | ||
4752 | fail = 0; | 4883 | fail = 0; |
4753 | } | 4884 | } |
4754 | } | 4885 | } |
@@ -4767,35 +4898,34 @@ static ssize_t | |||
4767 | ftrace_graph_write(struct file *file, const char __user *ubuf, | 4898 | ftrace_graph_write(struct file *file, const char __user *ubuf, |
4768 | size_t cnt, loff_t *ppos) | 4899 | size_t cnt, loff_t *ppos) |
4769 | { | 4900 | { |
4770 | struct trace_parser parser; | ||
4771 | ssize_t read, ret = 0; | 4901 | ssize_t read, ret = 0; |
4772 | struct ftrace_graph_data *fgd = file->private_data; | 4902 | struct ftrace_graph_data *fgd = file->private_data; |
4903 | struct trace_parser *parser; | ||
4773 | 4904 | ||
4774 | if (!cnt) | 4905 | if (!cnt) |
4775 | return 0; | 4906 | return 0; |
4776 | 4907 | ||
4777 | if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) | 4908 | /* Read mode uses seq functions */ |
4778 | return -ENOMEM; | 4909 | if (file->f_mode & FMODE_READ) { |
4779 | 4910 | struct seq_file *m = file->private_data; | |
4780 | read = trace_get_user(&parser, ubuf, cnt, ppos); | 4911 | fgd = m->private; |
4912 | } | ||
4781 | 4913 | ||
4782 | if (read >= 0 && trace_parser_loaded((&parser))) { | 4914 | parser = &fgd->parser; |
4783 | parser.buffer[parser.idx] = 0; | ||
4784 | 4915 | ||
4785 | mutex_lock(&graph_lock); | 4916 | read = trace_get_user(parser, ubuf, cnt, ppos); |
4786 | 4917 | ||
4787 | /* we allow only one expression at a time */ | 4918 | if (read >= 0 && trace_parser_loaded(parser) && |
4788 | ret = ftrace_set_func(fgd->table, fgd->count, fgd->size, | 4919 | !trace_parser_cont(parser)) { |
4789 | parser.buffer); | ||
4790 | 4920 | ||
4791 | mutex_unlock(&graph_lock); | 4921 | ret = ftrace_graph_set_hash(fgd->new_hash, |
4922 | parser->buffer); | ||
4923 | trace_parser_clear(parser); | ||
4792 | } | 4924 | } |
4793 | 4925 | ||
4794 | if (!ret) | 4926 | if (!ret) |
4795 | ret = read; | 4927 | ret = read; |
4796 | 4928 | ||
4797 | trace_parser_put(&parser); | ||
4798 | |||
4799 | return ret; | 4929 | return ret; |
4800 | } | 4930 | } |
4801 | 4931 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 310f0ea0d1a2..707445ceb7ef 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -260,16 +260,8 @@ unsigned long long ns2usecs(u64 nsec) | |||
260 | TRACE_ITER_EVENT_FORK | 260 | TRACE_ITER_EVENT_FORK |
261 | 261 | ||
262 | /* | 262 | /* |
263 | * The global_trace is the descriptor that holds the tracing | 263 | * The global_trace is the descriptor that holds the top-level tracing |
264 | * buffers for the live tracing. For each CPU, it contains | 264 | * buffers for the live tracing. |
265 | * a link list of pages that will store trace entries. The | ||
266 | * page descriptor of the pages in the memory is used to hold | ||
267 | * the link list by linking the lru item in the page descriptor | ||
268 | * to each of the pages in the buffer per CPU. | ||
269 | * | ||
270 | * For each active CPU there is a data field that holds the | ||
271 | * pages for the buffer for that CPU. Each CPU has the same number | ||
272 | * of pages allocated for its buffer. | ||
273 | */ | 265 | */ |
274 | static struct trace_array global_trace = { | 266 | static struct trace_array global_trace = { |
275 | .trace_flags = TRACE_DEFAULT_FLAGS, | 267 | .trace_flags = TRACE_DEFAULT_FLAGS, |
@@ -1193,6 +1185,7 @@ int trace_parser_get_init(struct trace_parser *parser, int size) | |||
1193 | void trace_parser_put(struct trace_parser *parser) | 1185 | void trace_parser_put(struct trace_parser *parser) |
1194 | { | 1186 | { |
1195 | kfree(parser->buffer); | 1187 | kfree(parser->buffer); |
1188 | parser->buffer = NULL; | ||
1196 | } | 1189 | } |
1197 | 1190 | ||
1198 | /* | 1191 | /* |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 1ea51ab53edf..ae1cce91fead 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -753,6 +753,21 @@ enum print_line_t print_trace_line(struct trace_iterator *iter); | |||
753 | 753 | ||
754 | extern char trace_find_mark(unsigned long long duration); | 754 | extern char trace_find_mark(unsigned long long duration); |
755 | 755 | ||
756 | struct ftrace_hash { | ||
757 | unsigned long size_bits; | ||
758 | struct hlist_head *buckets; | ||
759 | unsigned long count; | ||
760 | struct rcu_head rcu; | ||
761 | }; | ||
762 | |||
763 | struct ftrace_func_entry * | ||
764 | ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip); | ||
765 | |||
766 | static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash) | ||
767 | { | ||
768 | return !hash || !hash->count; | ||
769 | } | ||
770 | |||
756 | /* Standard output formatting function used for function return traces */ | 771 | /* Standard output formatting function used for function return traces */ |
757 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 772 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
758 | 773 | ||
@@ -787,53 +802,50 @@ extern void __trace_graph_return(struct trace_array *tr, | |||
787 | struct ftrace_graph_ret *trace, | 802 | struct ftrace_graph_ret *trace, |
788 | unsigned long flags, int pc); | 803 | unsigned long flags, int pc); |
789 | 804 | ||
790 | |||
791 | #ifdef CONFIG_DYNAMIC_FTRACE | 805 | #ifdef CONFIG_DYNAMIC_FTRACE |
792 | /* TODO: make this variable */ | 806 | extern struct ftrace_hash *ftrace_graph_hash; |
793 | #define FTRACE_GRAPH_MAX_FUNCS 32 | 807 | extern struct ftrace_hash *ftrace_graph_notrace_hash; |
794 | extern int ftrace_graph_count; | ||
795 | extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; | ||
796 | extern int ftrace_graph_notrace_count; | ||
797 | extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS]; | ||
798 | 808 | ||
799 | static inline int ftrace_graph_addr(unsigned long addr) | 809 | static inline int ftrace_graph_addr(unsigned long addr) |
800 | { | 810 | { |
801 | int i; | 811 | int ret = 0; |
802 | 812 | ||
803 | if (!ftrace_graph_count) | 813 | preempt_disable_notrace(); |
804 | return 1; | 814 | |
805 | 815 | if (ftrace_hash_empty(ftrace_graph_hash)) { | |
806 | for (i = 0; i < ftrace_graph_count; i++) { | 816 | ret = 1; |
807 | if (addr == ftrace_graph_funcs[i]) { | 817 | goto out; |
808 | /* | ||
809 | * If no irqs are to be traced, but a set_graph_function | ||
810 | * is set, and called by an interrupt handler, we still | ||
811 | * want to trace it. | ||
812 | */ | ||
813 | if (in_irq()) | ||
814 | trace_recursion_set(TRACE_IRQ_BIT); | ||
815 | else | ||
816 | trace_recursion_clear(TRACE_IRQ_BIT); | ||
817 | return 1; | ||
818 | } | ||
819 | } | 818 | } |
820 | 819 | ||
821 | return 0; | 820 | if (ftrace_lookup_ip(ftrace_graph_hash, addr)) { |
821 | /* | ||
822 | * If no irqs are to be traced, but a set_graph_function | ||
823 | * is set, and called by an interrupt handler, we still | ||
824 | * want to trace it. | ||
825 | */ | ||
826 | if (in_irq()) | ||
827 | trace_recursion_set(TRACE_IRQ_BIT); | ||
828 | else | ||
829 | trace_recursion_clear(TRACE_IRQ_BIT); | ||
830 | ret = 1; | ||
831 | } | ||
832 | |||
833 | out: | ||
834 | preempt_enable_notrace(); | ||
835 | return ret; | ||
822 | } | 836 | } |
823 | 837 | ||
824 | static inline int ftrace_graph_notrace_addr(unsigned long addr) | 838 | static inline int ftrace_graph_notrace_addr(unsigned long addr) |
825 | { | 839 | { |
826 | int i; | 840 | int ret = 0; |
827 | 841 | ||
828 | if (!ftrace_graph_notrace_count) | 842 | preempt_disable_notrace(); |
829 | return 0; | ||
830 | 843 | ||
831 | for (i = 0; i < ftrace_graph_notrace_count; i++) { | 844 | if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr)) |
832 | if (addr == ftrace_graph_notrace_funcs[i]) | 845 | ret = 1; |
833 | return 1; | ||
834 | } | ||
835 | 846 | ||
836 | return 0; | 847 | preempt_enable_notrace(); |
848 | return ret; | ||
837 | } | 849 | } |
838 | #else | 850 | #else |
839 | static inline int ftrace_graph_addr(unsigned long addr) | 851 | static inline int ftrace_graph_addr(unsigned long addr) |
@@ -1300,7 +1312,8 @@ static inline bool is_string_field(struct ftrace_event_field *field) | |||
1300 | { | 1312 | { |
1301 | return field->filter_type == FILTER_DYN_STRING || | 1313 | return field->filter_type == FILTER_DYN_STRING || |
1302 | field->filter_type == FILTER_STATIC_STRING || | 1314 | field->filter_type == FILTER_STATIC_STRING || |
1303 | field->filter_type == FILTER_PTR_STRING; | 1315 | field->filter_type == FILTER_PTR_STRING || |
1316 | field->filter_type == FILTER_COMM; | ||
1304 | } | 1317 | } |
1305 | 1318 | ||
1306 | static inline bool is_function_field(struct ftrace_event_field *field) | 1319 | static inline bool is_function_field(struct ftrace_event_field *field) |
diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c index e3b488825ae3..e49fbe901cfc 100644 --- a/kernel/trace/trace_benchmark.c +++ b/kernel/trace/trace_benchmark.c | |||
@@ -175,9 +175,9 @@ int trace_benchmark_reg(void) | |||
175 | 175 | ||
176 | bm_event_thread = kthread_run(benchmark_event_kthread, | 176 | bm_event_thread = kthread_run(benchmark_event_kthread, |
177 | NULL, "event_benchmark"); | 177 | NULL, "event_benchmark"); |
178 | if (!bm_event_thread) { | 178 | if (IS_ERR(bm_event_thread)) { |
179 | pr_warning("trace benchmark failed to create kernel thread\n"); | 179 | pr_warning("trace benchmark failed to create kernel thread\n"); |
180 | return -ENOMEM; | 180 | return PTR_ERR(bm_event_thread); |
181 | } | 181 | } |
182 | 182 | ||
183 | return 0; | 183 | return 0; |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 75489de546b6..4d8fdf3184dc 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -27,7 +27,7 @@ static DEFINE_MUTEX(branch_tracing_mutex); | |||
27 | static struct trace_array *branch_tracer; | 27 | static struct trace_array *branch_tracer; |
28 | 28 | ||
29 | static void | 29 | static void |
30 | probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | 30 | probe_likely_condition(struct ftrace_likely_data *f, int val, int expect) |
31 | { | 31 | { |
32 | struct trace_event_call *call = &event_branch; | 32 | struct trace_event_call *call = &event_branch; |
33 | struct trace_array *tr = branch_tracer; | 33 | struct trace_array *tr = branch_tracer; |
@@ -68,16 +68,17 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
68 | entry = ring_buffer_event_data(event); | 68 | entry = ring_buffer_event_data(event); |
69 | 69 | ||
70 | /* Strip off the path, only save the file */ | 70 | /* Strip off the path, only save the file */ |
71 | p = f->file + strlen(f->file); | 71 | p = f->data.file + strlen(f->data.file); |
72 | while (p >= f->file && *p != '/') | 72 | while (p >= f->data.file && *p != '/') |
73 | p--; | 73 | p--; |
74 | p++; | 74 | p++; |
75 | 75 | ||
76 | strncpy(entry->func, f->func, TRACE_FUNC_SIZE); | 76 | strncpy(entry->func, f->data.func, TRACE_FUNC_SIZE); |
77 | strncpy(entry->file, p, TRACE_FILE_SIZE); | 77 | strncpy(entry->file, p, TRACE_FILE_SIZE); |
78 | entry->func[TRACE_FUNC_SIZE] = 0; | 78 | entry->func[TRACE_FUNC_SIZE] = 0; |
79 | entry->file[TRACE_FILE_SIZE] = 0; | 79 | entry->file[TRACE_FILE_SIZE] = 0; |
80 | entry->line = f->line; | 80 | entry->constant = f->constant; |
81 | entry->line = f->data.line; | ||
81 | entry->correct = val == expect; | 82 | entry->correct = val == expect; |
82 | 83 | ||
83 | if (!call_filter_check_discard(call, entry, buffer, event)) | 84 | if (!call_filter_check_discard(call, entry, buffer, event)) |
@@ -89,7 +90,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
89 | } | 90 | } |
90 | 91 | ||
91 | static inline | 92 | static inline |
92 | void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) | 93 | void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) |
93 | { | 94 | { |
94 | if (!branch_tracing_enabled) | 95 | if (!branch_tracing_enabled) |
95 | return; | 96 | return; |
@@ -195,13 +196,19 @@ core_initcall(init_branch_tracer); | |||
195 | 196 | ||
196 | #else | 197 | #else |
197 | static inline | 198 | static inline |
198 | void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) | 199 | void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) |
199 | { | 200 | { |
200 | } | 201 | } |
201 | #endif /* CONFIG_BRANCH_TRACER */ | 202 | #endif /* CONFIG_BRANCH_TRACER */ |
202 | 203 | ||
203 | void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) | 204 | void ftrace_likely_update(struct ftrace_likely_data *f, int val, |
205 | int expect, int is_constant) | ||
204 | { | 206 | { |
207 | /* A constant is always correct */ | ||
208 | if (is_constant) { | ||
209 | f->constant++; | ||
210 | val = expect; | ||
211 | } | ||
205 | /* | 212 | /* |
206 | * I would love to have a trace point here instead, but the | 213 | * I would love to have a trace point here instead, but the |
207 | * trace point code is so inundated with unlikely and likely | 214 | * trace point code is so inundated with unlikely and likely |
@@ -212,9 +219,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) | |||
212 | 219 | ||
213 | /* FIXME: Make this atomic! */ | 220 | /* FIXME: Make this atomic! */ |
214 | if (val == expect) | 221 | if (val == expect) |
215 | f->correct++; | 222 | f->data.correct++; |
216 | else | 223 | else |
217 | f->incorrect++; | 224 | f->data.incorrect++; |
218 | } | 225 | } |
219 | EXPORT_SYMBOL(ftrace_likely_update); | 226 | EXPORT_SYMBOL(ftrace_likely_update); |
220 | 227 | ||
@@ -245,29 +252,60 @@ static inline long get_incorrect_percent(struct ftrace_branch_data *p) | |||
245 | return percent; | 252 | return percent; |
246 | } | 253 | } |
247 | 254 | ||
248 | static int branch_stat_show(struct seq_file *m, void *v) | 255 | static const char *branch_stat_process_file(struct ftrace_branch_data *p) |
249 | { | 256 | { |
250 | struct ftrace_branch_data *p = v; | ||
251 | const char *f; | 257 | const char *f; |
252 | long percent; | ||
253 | 258 | ||
254 | /* Only print the file, not the path */ | 259 | /* Only print the file, not the path */ |
255 | f = p->file + strlen(p->file); | 260 | f = p->file + strlen(p->file); |
256 | while (f >= p->file && *f != '/') | 261 | while (f >= p->file && *f != '/') |
257 | f--; | 262 | f--; |
258 | f++; | 263 | return ++f; |
264 | } | ||
265 | |||
266 | static void branch_stat_show(struct seq_file *m, | ||
267 | struct ftrace_branch_data *p, const char *f) | ||
268 | { | ||
269 | long percent; | ||
259 | 270 | ||
260 | /* | 271 | /* |
261 | * The miss is overlayed on correct, and hit on incorrect. | 272 | * The miss is overlayed on correct, and hit on incorrect. |
262 | */ | 273 | */ |
263 | percent = get_incorrect_percent(p); | 274 | percent = get_incorrect_percent(p); |
264 | 275 | ||
265 | seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); | ||
266 | if (percent < 0) | 276 | if (percent < 0) |
267 | seq_puts(m, " X "); | 277 | seq_puts(m, " X "); |
268 | else | 278 | else |
269 | seq_printf(m, "%3ld ", percent); | 279 | seq_printf(m, "%3ld ", percent); |
280 | |||
270 | seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); | 281 | seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); |
282 | } | ||
283 | |||
284 | static int branch_stat_show_normal(struct seq_file *m, | ||
285 | struct ftrace_branch_data *p, const char *f) | ||
286 | { | ||
287 | seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); | ||
288 | branch_stat_show(m, p, f); | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | static int annotate_branch_stat_show(struct seq_file *m, void *v) | ||
293 | { | ||
294 | struct ftrace_likely_data *p = v; | ||
295 | const char *f; | ||
296 | int l; | ||
297 | |||
298 | f = branch_stat_process_file(&p->data); | ||
299 | |||
300 | if (!p->constant) | ||
301 | return branch_stat_show_normal(m, &p->data, f); | ||
302 | |||
303 | l = snprintf(NULL, 0, "/%lu", p->constant); | ||
304 | l = l > 8 ? 0 : 8 - l; | ||
305 | |||
306 | seq_printf(m, "%8lu/%lu %*lu ", | ||
307 | p->data.correct, p->constant, l, p->data.incorrect); | ||
308 | branch_stat_show(m, &p->data, f); | ||
271 | return 0; | 309 | return 0; |
272 | } | 310 | } |
273 | 311 | ||
@@ -279,7 +317,7 @@ static void *annotated_branch_stat_start(struct tracer_stat *trace) | |||
279 | static void * | 317 | static void * |
280 | annotated_branch_stat_next(void *v, int idx) | 318 | annotated_branch_stat_next(void *v, int idx) |
281 | { | 319 | { |
282 | struct ftrace_branch_data *p = v; | 320 | struct ftrace_likely_data *p = v; |
283 | 321 | ||
284 | ++p; | 322 | ++p; |
285 | 323 | ||
@@ -328,7 +366,7 @@ static struct tracer_stat annotated_branch_stats = { | |||
328 | .stat_next = annotated_branch_stat_next, | 366 | .stat_next = annotated_branch_stat_next, |
329 | .stat_cmp = annotated_branch_stat_cmp, | 367 | .stat_cmp = annotated_branch_stat_cmp, |
330 | .stat_headers = annotated_branch_stat_headers, | 368 | .stat_headers = annotated_branch_stat_headers, |
331 | .stat_show = branch_stat_show | 369 | .stat_show = annotate_branch_stat_show |
332 | }; | 370 | }; |
333 | 371 | ||
334 | __init static int init_annotated_branch_stats(void) | 372 | __init static int init_annotated_branch_stats(void) |
@@ -379,12 +417,21 @@ all_branch_stat_next(void *v, int idx) | |||
379 | return p; | 417 | return p; |
380 | } | 418 | } |
381 | 419 | ||
420 | static int all_branch_stat_show(struct seq_file *m, void *v) | ||
421 | { | ||
422 | struct ftrace_branch_data *p = v; | ||
423 | const char *f; | ||
424 | |||
425 | f = branch_stat_process_file(p); | ||
426 | return branch_stat_show_normal(m, p, f); | ||
427 | } | ||
428 | |||
382 | static struct tracer_stat all_branch_stats = { | 429 | static struct tracer_stat all_branch_stats = { |
383 | .name = "branch_all", | 430 | .name = "branch_all", |
384 | .stat_start = all_branch_stat_start, | 431 | .stat_start = all_branch_stat_start, |
385 | .stat_next = all_branch_stat_next, | 432 | .stat_next = all_branch_stat_next, |
386 | .stat_headers = all_branch_stat_headers, | 433 | .stat_headers = all_branch_stat_headers, |
387 | .stat_show = branch_stat_show | 434 | .stat_show = all_branch_stat_show |
388 | }; | 435 | }; |
389 | 436 | ||
390 | __init static int all_annotated_branch_stats(void) | 437 | __init static int all_annotated_branch_stats(void) |
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index eb7396b7e7c3..c203ac4df791 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h | |||
@@ -328,11 +328,13 @@ FTRACE_ENTRY(branch, trace_branch, | |||
328 | __array( char, func, TRACE_FUNC_SIZE+1 ) | 328 | __array( char, func, TRACE_FUNC_SIZE+1 ) |
329 | __array( char, file, TRACE_FILE_SIZE+1 ) | 329 | __array( char, file, TRACE_FILE_SIZE+1 ) |
330 | __field( char, correct ) | 330 | __field( char, correct ) |
331 | __field( char, constant ) | ||
331 | ), | 332 | ), |
332 | 333 | ||
333 | F_printk("%u:%s:%s (%u)", | 334 | F_printk("%u:%s:%s (%u)%s", |
334 | __entry->line, | 335 | __entry->line, |
335 | __entry->func, __entry->file, __entry->correct), | 336 | __entry->func, __entry->file, __entry->correct, |
337 | __entry->constant ? " CONSTANT" : ""), | ||
336 | 338 | ||
337 | FILTER_OTHER | 339 | FILTER_OTHER |
338 | ); | 340 | ); |
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index af344a1bf0d0..1199fe1d8eba 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c | |||
@@ -322,10 +322,7 @@ static void move_to_next_cpu(bool initmask) | |||
322 | * need to ensure nothing else might be running (and thus preempting). | 322 | * need to ensure nothing else might be running (and thus preempting). |
323 | * Obviously this should never be used in production environments. | 323 | * Obviously this should never be used in production environments. |
324 | * | 324 | * |
325 | * Currently this runs on which ever CPU it was scheduled on, but most | 325 | * Executes one loop interaction on each CPU in tracing_cpumask sysfs file. |
326 | * real-world hardware latency situations occur across several CPUs, | ||
327 | * but we might later generalize this if we find there are any actualy | ||
328 | * systems with alternate SMI delivery or other hardware latencies. | ||
329 | */ | 326 | */ |
330 | static int kthread_fn(void *data) | 327 | static int kthread_fn(void *data) |
331 | { | 328 | { |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 7ad9e53ad174..eadd96ef772f 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -16,6 +16,7 @@ | |||
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | #define pr_fmt(fmt) "trace_kprobe: " fmt | ||
19 | 20 | ||
20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
21 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 8c0553d9afd3..52478f033f88 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c | |||
@@ -21,6 +21,7 @@ | |||
21 | * Copyright (C) IBM Corporation, 2010-2011 | 21 | * Copyright (C) IBM Corporation, 2010-2011 |
22 | * Author: Srikar Dronamraju | 22 | * Author: Srikar Dronamraju |
23 | */ | 23 | */ |
24 | #define pr_fmt(fmt) "trace_probe: " fmt | ||
24 | 25 | ||
25 | #include "trace_probe.h" | 26 | #include "trace_probe.h" |
26 | 27 | ||
@@ -647,7 +648,7 @@ ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer, | |||
647 | size_t count, loff_t *ppos, | 648 | size_t count, loff_t *ppos, |
648 | int (*createfn)(int, char **)) | 649 | int (*createfn)(int, char **)) |
649 | { | 650 | { |
650 | char *kbuf, *tmp; | 651 | char *kbuf, *buf, *tmp; |
651 | int ret = 0; | 652 | int ret = 0; |
652 | size_t done = 0; | 653 | size_t done = 0; |
653 | size_t size; | 654 | size_t size; |
@@ -667,27 +668,38 @@ ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer, | |||
667 | goto out; | 668 | goto out; |
668 | } | 669 | } |
669 | kbuf[size] = '\0'; | 670 | kbuf[size] = '\0'; |
670 | tmp = strchr(kbuf, '\n'); | 671 | buf = kbuf; |
672 | do { | ||
673 | tmp = strchr(buf, '\n'); | ||
674 | if (tmp) { | ||
675 | *tmp = '\0'; | ||
676 | size = tmp - buf + 1; | ||
677 | } else { | ||
678 | size = strlen(buf); | ||
679 | if (done + size < count) { | ||
680 | if (buf != kbuf) | ||
681 | break; | ||
682 | /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ | ||
683 | pr_warn("Line length is too long: Should be less than %d\n", | ||
684 | WRITE_BUFSIZE - 2); | ||
685 | ret = -EINVAL; | ||
686 | goto out; | ||
687 | } | ||
688 | } | ||
689 | done += size; | ||
671 | 690 | ||
672 | if (tmp) { | 691 | /* Remove comments */ |
673 | *tmp = '\0'; | 692 | tmp = strchr(buf, '#'); |
674 | size = tmp - kbuf + 1; | ||
675 | } else if (done + size < count) { | ||
676 | pr_warn("Line length is too long: Should be less than %d\n", | ||
677 | WRITE_BUFSIZE); | ||
678 | ret = -EINVAL; | ||
679 | goto out; | ||
680 | } | ||
681 | done += size; | ||
682 | /* Remove comments */ | ||
683 | tmp = strchr(kbuf, '#'); | ||
684 | 693 | ||
685 | if (tmp) | 694 | if (tmp) |
686 | *tmp = '\0'; | 695 | *tmp = '\0'; |
687 | 696 | ||
688 | ret = traceprobe_command(kbuf, createfn); | 697 | ret = traceprobe_command(buf, createfn); |
689 | if (ret) | 698 | if (ret) |
690 | goto out; | 699 | goto out; |
700 | buf += size; | ||
701 | |||
702 | } while (done < count); | ||
691 | } | 703 | } |
692 | ret = done; | 704 | ret = done; |
693 | 705 | ||
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 0913693caf6e..f4379e772171 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -17,6 +17,7 @@ | |||
17 | * Copyright (C) IBM Corporation, 2010-2012 | 17 | * Copyright (C) IBM Corporation, 2010-2012 |
18 | * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> | 18 | * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> |
19 | */ | 19 | */ |
20 | #define pr_fmt(fmt) "trace_kprobe: " fmt | ||
20 | 21 | ||
21 | #include <linux/module.h> | 22 | #include <linux/module.h> |
22 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
@@ -431,7 +432,8 @@ static int create_trace_uprobe(int argc, char **argv) | |||
431 | pr_info("Probe point is not specified.\n"); | 432 | pr_info("Probe point is not specified.\n"); |
432 | return -EINVAL; | 433 | return -EINVAL; |
433 | } | 434 | } |
434 | arg = strchr(argv[1], ':'); | 435 | /* Find the last occurrence, in case the path contains ':' too. */ |
436 | arg = strrchr(argv[1], ':'); | ||
435 | if (!arg) { | 437 | if (!arg) { |
436 | ret = -EINVAL; | 438 | ret = -EINVAL; |
437 | goto fail_address_parse; | 439 | goto fail_address_parse; |