diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-10-23 09:33:07 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-23 10:00:24 -0400 |
commit | 08f5ac906d2c0faf96d608c54a0b03177376da8d (patch) | |
tree | ee43d94b168b9c3b757db83aea78b45eb3a556c1 /kernel/trace | |
parent | 4d296c24326783bff1282ac72f310d8bac8df413 (diff) |
ftrace: remove ftrace hash
The ftrace hash was used by the ftrace_daemon code. The record ip function
would place the calling address (ip) into the hash. The daemon would later
read the hash and modify that code.
The hash complicates the code. This patch removes it.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 243 | ||||
-rw-r--r-- | kernel/trace/trace.c | 3 |
2 files changed, 35 insertions, 211 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 226fd9132d53..07762c08a944 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/ftrace.h> | 25 | #include <linux/ftrace.h> |
26 | #include <linux/sysctl.h> | 26 | #include <linux/sysctl.h> |
27 | #include <linux/ctype.h> | 27 | #include <linux/ctype.h> |
28 | #include <linux/hash.h> | ||
29 | #include <linux/list.h> | 28 | #include <linux/list.h> |
30 | 29 | ||
31 | #include <asm/ftrace.h> | 30 | #include <asm/ftrace.h> |
@@ -189,9 +188,7 @@ static int ftrace_filtered; | |||
189 | static int tracing_on; | 188 | static int tracing_on; |
190 | static int frozen_record_count; | 189 | static int frozen_record_count; |
191 | 190 | ||
192 | static struct hlist_head ftrace_hash[FTRACE_HASHSIZE]; | 191 | static LIST_HEAD(ftrace_new_addrs); |
193 | |||
194 | static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); | ||
195 | 192 | ||
196 | static DEFINE_MUTEX(ftrace_regex_lock); | 193 | static DEFINE_MUTEX(ftrace_regex_lock); |
197 | 194 | ||
@@ -210,8 +207,6 @@ struct ftrace_page { | |||
210 | static struct ftrace_page *ftrace_pages_start; | 207 | static struct ftrace_page *ftrace_pages_start; |
211 | static struct ftrace_page *ftrace_pages; | 208 | static struct ftrace_page *ftrace_pages; |
212 | 209 | ||
213 | static int ftrace_record_suspend; | ||
214 | |||
215 | static struct dyn_ftrace *ftrace_free_records; | 210 | static struct dyn_ftrace *ftrace_free_records; |
216 | 211 | ||
217 | 212 | ||
@@ -242,72 +237,6 @@ static inline int record_frozen(struct dyn_ftrace *rec) | |||
242 | # define record_frozen(rec) ({ 0; }) | 237 | # define record_frozen(rec) ({ 0; }) |
243 | #endif /* CONFIG_KPROBES */ | 238 | #endif /* CONFIG_KPROBES */ |
244 | 239 | ||
245 | int skip_trace(unsigned long ip) | ||
246 | { | ||
247 | unsigned long fl; | ||
248 | struct dyn_ftrace *rec; | ||
249 | struct hlist_node *t; | ||
250 | struct hlist_head *head; | ||
251 | |||
252 | if (frozen_record_count == 0) | ||
253 | return 0; | ||
254 | |||
255 | head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)]; | ||
256 | hlist_for_each_entry_rcu(rec, t, head, node) { | ||
257 | if (rec->ip == ip) { | ||
258 | if (record_frozen(rec)) { | ||
259 | if (rec->flags & FTRACE_FL_FAILED) | ||
260 | return 1; | ||
261 | |||
262 | if (!(rec->flags & FTRACE_FL_CONVERTED)) | ||
263 | return 1; | ||
264 | |||
265 | if (!tracing_on || !ftrace_enabled) | ||
266 | return 1; | ||
267 | |||
268 | if (ftrace_filtered) { | ||
269 | fl = rec->flags & (FTRACE_FL_FILTER | | ||
270 | FTRACE_FL_NOTRACE); | ||
271 | if (!fl || (fl & FTRACE_FL_NOTRACE)) | ||
272 | return 1; | ||
273 | } | ||
274 | } | ||
275 | break; | ||
276 | } | ||
277 | } | ||
278 | |||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | static inline int | ||
283 | ftrace_ip_in_hash(unsigned long ip, unsigned long key) | ||
284 | { | ||
285 | struct dyn_ftrace *p; | ||
286 | struct hlist_node *t; | ||
287 | int found = 0; | ||
288 | |||
289 | hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) { | ||
290 | if (p->ip == ip) { | ||
291 | found = 1; | ||
292 | break; | ||
293 | } | ||
294 | } | ||
295 | |||
296 | return found; | ||
297 | } | ||
298 | |||
299 | static inline void | ||
300 | ftrace_add_hash(struct dyn_ftrace *node, unsigned long key) | ||
301 | { | ||
302 | hlist_add_head_rcu(&node->node, &ftrace_hash[key]); | ||
303 | } | ||
304 | |||
305 | /* called from kstop_machine */ | ||
306 | static inline void ftrace_del_hash(struct dyn_ftrace *node) | ||
307 | { | ||
308 | hlist_del(&node->node); | ||
309 | } | ||
310 | |||
311 | static void ftrace_free_rec(struct dyn_ftrace *rec) | 240 | static void ftrace_free_rec(struct dyn_ftrace *rec) |
312 | { | 241 | { |
313 | rec->ip = (unsigned long)ftrace_free_records; | 242 | rec->ip = (unsigned long)ftrace_free_records; |
@@ -362,69 +291,36 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | |||
362 | } | 291 | } |
363 | 292 | ||
364 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { | 293 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { |
365 | if (!ftrace_pages->next) | 294 | if (!ftrace_pages->next) { |
366 | return NULL; | 295 | /* allocate another page */ |
296 | ftrace_pages->next = | ||
297 | (void *)get_zeroed_page(GFP_KERNEL); | ||
298 | if (!ftrace_pages->next) | ||
299 | return NULL; | ||
300 | } | ||
367 | ftrace_pages = ftrace_pages->next; | 301 | ftrace_pages = ftrace_pages->next; |
368 | } | 302 | } |
369 | 303 | ||
370 | return &ftrace_pages->records[ftrace_pages->index++]; | 304 | return &ftrace_pages->records[ftrace_pages->index++]; |
371 | } | 305 | } |
372 | 306 | ||
373 | static void | 307 | static struct dyn_ftrace * |
374 | ftrace_record_ip(unsigned long ip) | 308 | ftrace_record_ip(unsigned long ip) |
375 | { | 309 | { |
376 | struct dyn_ftrace *node; | 310 | struct dyn_ftrace *rec; |
377 | unsigned long key; | ||
378 | int resched; | ||
379 | int cpu; | ||
380 | 311 | ||
381 | if (!ftrace_enabled || ftrace_disabled) | 312 | if (!ftrace_enabled || ftrace_disabled) |
382 | return; | 313 | return NULL; |
383 | |||
384 | resched = need_resched(); | ||
385 | preempt_disable_notrace(); | ||
386 | |||
387 | /* | ||
388 | * We simply need to protect against recursion. | ||
389 | * Use the the raw version of smp_processor_id and not | ||
390 | * __get_cpu_var which can call debug hooks that can | ||
391 | * cause a recursive crash here. | ||
392 | */ | ||
393 | cpu = raw_smp_processor_id(); | ||
394 | per_cpu(ftrace_shutdown_disable_cpu, cpu)++; | ||
395 | if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1) | ||
396 | goto out; | ||
397 | |||
398 | if (unlikely(ftrace_record_suspend)) | ||
399 | goto out; | ||
400 | |||
401 | key = hash_long(ip, FTRACE_HASHBITS); | ||
402 | |||
403 | FTRACE_WARN_ON_ONCE(key >= FTRACE_HASHSIZE); | ||
404 | |||
405 | if (ftrace_ip_in_hash(ip, key)) | ||
406 | goto out; | ||
407 | |||
408 | /* This ip may have hit the hash before the lock */ | ||
409 | if (ftrace_ip_in_hash(ip, key)) | ||
410 | goto out; | ||
411 | |||
412 | node = ftrace_alloc_dyn_node(ip); | ||
413 | if (!node) | ||
414 | goto out; | ||
415 | 314 | ||
416 | node->ip = ip; | 315 | rec = ftrace_alloc_dyn_node(ip); |
316 | if (!rec) | ||
317 | return NULL; | ||
417 | 318 | ||
418 | ftrace_add_hash(node, key); | 319 | rec->ip = ip; |
419 | 320 | ||
420 | out: | 321 | list_add(&rec->list, &ftrace_new_addrs); |
421 | per_cpu(ftrace_shutdown_disable_cpu, cpu)--; | ||
422 | 322 | ||
423 | /* prevent recursion with scheduler */ | 323 | return rec; |
424 | if (resched) | ||
425 | preempt_enable_no_resched_notrace(); | ||
426 | else | ||
427 | preempt_enable_notrace(); | ||
428 | } | 324 | } |
429 | 325 | ||
430 | #define FTRACE_ADDR ((long)(ftrace_caller)) | 326 | #define FTRACE_ADDR ((long)(ftrace_caller)) |
@@ -543,7 +439,6 @@ static void ftrace_replace_code(int enable) | |||
543 | rec->flags |= FTRACE_FL_FAILED; | 439 | rec->flags |= FTRACE_FL_FAILED; |
544 | if ((system_state == SYSTEM_BOOTING) || | 440 | if ((system_state == SYSTEM_BOOTING) || |
545 | !core_kernel_text(rec->ip)) { | 441 | !core_kernel_text(rec->ip)) { |
546 | ftrace_del_hash(rec); | ||
547 | ftrace_free_rec(rec); | 442 | ftrace_free_rec(rec); |
548 | } | 443 | } |
549 | } | 444 | } |
@@ -551,15 +446,6 @@ static void ftrace_replace_code(int enable) | |||
551 | } | 446 | } |
552 | } | 447 | } |
553 | 448 | ||
554 | static void ftrace_shutdown_replenish(void) | ||
555 | { | ||
556 | if (ftrace_pages->next) | ||
557 | return; | ||
558 | |||
559 | /* allocate another page */ | ||
560 | ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
561 | } | ||
562 | |||
563 | static void print_ip_ins(const char *fmt, unsigned char *p) | 449 | static void print_ip_ins(const char *fmt, unsigned char *p) |
564 | { | 450 | { |
565 | int i; | 451 | int i; |
@@ -616,18 +502,11 @@ ftrace_code_disable(struct dyn_ftrace *rec) | |||
616 | return 1; | 502 | return 1; |
617 | } | 503 | } |
618 | 504 | ||
619 | static int ftrace_update_code(void *ignore); | ||
620 | |||
621 | static int __ftrace_modify_code(void *data) | 505 | static int __ftrace_modify_code(void *data) |
622 | { | 506 | { |
623 | int *command = data; | 507 | int *command = data; |
624 | 508 | ||
625 | if (*command & FTRACE_ENABLE_CALLS) { | 509 | if (*command & FTRACE_ENABLE_CALLS) { |
626 | /* | ||
627 | * Update any recorded ips now that we have the | ||
628 | * machine stopped | ||
629 | */ | ||
630 | ftrace_update_code(NULL); | ||
631 | ftrace_replace_code(1); | 510 | ftrace_replace_code(1); |
632 | tracing_on = 1; | 511 | tracing_on = 1; |
633 | } else if (*command & FTRACE_DISABLE_CALLS) { | 512 | } else if (*command & FTRACE_DISABLE_CALLS) { |
@@ -738,84 +617,34 @@ static cycle_t ftrace_update_time; | |||
738 | static unsigned long ftrace_update_cnt; | 617 | static unsigned long ftrace_update_cnt; |
739 | unsigned long ftrace_update_tot_cnt; | 618 | unsigned long ftrace_update_tot_cnt; |
740 | 619 | ||
741 | static int ftrace_update_code(void *ignore) | 620 | static int ftrace_update_code(void) |
742 | { | 621 | { |
743 | int i, save_ftrace_enabled; | 622 | struct dyn_ftrace *p, *t; |
744 | cycle_t start, stop; | 623 | cycle_t start, stop; |
745 | struct dyn_ftrace *p; | ||
746 | struct hlist_node *t, *n; | ||
747 | struct hlist_head *head, temp_list; | ||
748 | |||
749 | /* Don't be recording funcs now */ | ||
750 | ftrace_record_suspend++; | ||
751 | save_ftrace_enabled = ftrace_enabled; | ||
752 | ftrace_enabled = 0; | ||
753 | 624 | ||
754 | start = ftrace_now(raw_smp_processor_id()); | 625 | start = ftrace_now(raw_smp_processor_id()); |
755 | ftrace_update_cnt = 0; | 626 | ftrace_update_cnt = 0; |
756 | 627 | ||
757 | /* No locks needed, the machine is stopped! */ | 628 | list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) { |
758 | for (i = 0; i < FTRACE_HASHSIZE; i++) { | ||
759 | INIT_HLIST_HEAD(&temp_list); | ||
760 | head = &ftrace_hash[i]; | ||
761 | 629 | ||
762 | /* all CPUS are stopped, we are safe to modify code */ | 630 | /* If something went wrong, bail without enabling anything */ |
763 | hlist_for_each_entry_safe(p, t, n, head, node) { | 631 | if (unlikely(ftrace_disabled)) |
764 | /* Skip over failed records which have not been | 632 | return -1; |
765 | * freed. */ | ||
766 | if (p->flags & FTRACE_FL_FAILED) | ||
767 | continue; | ||
768 | 633 | ||
769 | /* Unconverted records are always at the head of the | 634 | list_del_init(&p->list); |
770 | * hash bucket. Once we encounter a converted record, | ||
771 | * simply skip over to the next bucket. Saves ftraced | ||
772 | * some processor cycles (ftrace does its bid for | ||
773 | * global warming :-p ). */ | ||
774 | if (p->flags & (FTRACE_FL_CONVERTED)) | ||
775 | break; | ||
776 | 635 | ||
777 | /* Ignore updates to this record's mcount site. | 636 | /* convert record (i.e, patch mcount-call with NOP) */ |
778 | * Reintroduce this record at the head of this | 637 | if (ftrace_code_disable(p)) { |
779 | * bucket to attempt to "convert" it again if | 638 | p->flags |= FTRACE_FL_CONVERTED; |
780 | * the kprobe on it is unregistered before the | 639 | ftrace_update_cnt++; |
781 | * next run. */ | 640 | } else |
782 | if (get_kprobe((void *)p->ip)) { | 641 | ftrace_free_rec(p); |
783 | ftrace_del_hash(p); | ||
784 | INIT_HLIST_NODE(&p->node); | ||
785 | hlist_add_head(&p->node, &temp_list); | ||
786 | freeze_record(p); | ||
787 | continue; | ||
788 | } else { | ||
789 | unfreeze_record(p); | ||
790 | } | ||
791 | |||
792 | /* convert record (i.e, patch mcount-call with NOP) */ | ||
793 | if (ftrace_code_disable(p)) { | ||
794 | p->flags |= FTRACE_FL_CONVERTED; | ||
795 | ftrace_update_cnt++; | ||
796 | } else { | ||
797 | if ((system_state == SYSTEM_BOOTING) || | ||
798 | !core_kernel_text(p->ip)) { | ||
799 | ftrace_del_hash(p); | ||
800 | ftrace_free_rec(p); | ||
801 | } | ||
802 | } | ||
803 | } | ||
804 | |||
805 | hlist_for_each_entry_safe(p, t, n, &temp_list, node) { | ||
806 | hlist_del(&p->node); | ||
807 | INIT_HLIST_NODE(&p->node); | ||
808 | hlist_add_head(&p->node, head); | ||
809 | } | ||
810 | } | 642 | } |
811 | 643 | ||
812 | stop = ftrace_now(raw_smp_processor_id()); | 644 | stop = ftrace_now(raw_smp_processor_id()); |
813 | ftrace_update_time = stop - start; | 645 | ftrace_update_time = stop - start; |
814 | ftrace_update_tot_cnt += ftrace_update_cnt; | 646 | ftrace_update_tot_cnt += ftrace_update_cnt; |
815 | 647 | ||
816 | ftrace_enabled = save_ftrace_enabled; | ||
817 | ftrace_record_suspend--; | ||
818 | |||
819 | return 0; | 648 | return 0; |
820 | } | 649 | } |
821 | 650 | ||
@@ -847,7 +676,7 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | |||
847 | pg = ftrace_pages = ftrace_pages_start; | 676 | pg = ftrace_pages = ftrace_pages_start; |
848 | 677 | ||
849 | cnt = num_to_init / ENTRIES_PER_PAGE; | 678 | cnt = num_to_init / ENTRIES_PER_PAGE; |
850 | pr_info("ftrace: allocating %ld hash entries in %d pages\n", | 679 | pr_info("ftrace: allocating %ld entries in %d pages\n", |
851 | num_to_init, cnt); | 680 | num_to_init, cnt); |
852 | 681 | ||
853 | for (i = 0; i < cnt; i++) { | 682 | for (i = 0; i < cnt; i++) { |
@@ -1451,20 +1280,18 @@ static int ftrace_convert_nops(unsigned long *start, | |||
1451 | unsigned long addr; | 1280 | unsigned long addr; |
1452 | unsigned long flags; | 1281 | unsigned long flags; |
1453 | 1282 | ||
1283 | mutex_lock(&ftrace_start_lock); | ||
1454 | p = start; | 1284 | p = start; |
1455 | while (p < end) { | 1285 | while (p < end) { |
1456 | addr = ftrace_call_adjust(*p++); | 1286 | addr = ftrace_call_adjust(*p++); |
1457 | /* should not be called from interrupt context */ | ||
1458 | spin_lock(&ftrace_lock); | ||
1459 | ftrace_record_ip(addr); | 1287 | ftrace_record_ip(addr); |
1460 | spin_unlock(&ftrace_lock); | ||
1461 | ftrace_shutdown_replenish(); | ||
1462 | } | 1288 | } |
1463 | 1289 | ||
1464 | /* p is ignored */ | 1290 | /* disable interrupts to prevent kstop machine */ |
1465 | local_irq_save(flags); | 1291 | local_irq_save(flags); |
1466 | ftrace_update_code(p); | 1292 | ftrace_update_code(); |
1467 | local_irq_restore(flags); | 1293 | local_irq_restore(flags); |
1294 | mutex_unlock(&ftrace_start_lock); | ||
1468 | 1295 | ||
1469 | return 0; | 1296 | return 0; |
1470 | } | 1297 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 333a5162149b..06951e229443 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -865,9 +865,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
865 | if (unlikely(!ftrace_function_enabled)) | 865 | if (unlikely(!ftrace_function_enabled)) |
866 | return; | 866 | return; |
867 | 867 | ||
868 | if (skip_trace(ip)) | ||
869 | return; | ||
870 | |||
871 | pc = preempt_count(); | 868 | pc = preempt_count(); |
872 | resched = need_resched(); | 869 | resched = need_resched(); |
873 | preempt_disable_notrace(); | 870 | preempt_disable_notrace(); |