aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-10-23 09:33:05 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-23 10:00:22 -0400
commitcb7be3b2fc2cf089ee52b16f0fd9ebb29e9944e1 (patch)
tree4d5e4c1d1211ee7be1a583a9f9c4b33b7d4ce2ee /kernel/trace
parent07c4cc1cdaa08fcb6c0275dd7be49eae37260169 (diff)
ftrace: remove daemon
The ftrace daemon is complex and error prone. This patch strips it out of the code. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c322
-rw-r--r--kernel/trace/trace_selftest.c14
2 files changed, 28 insertions, 308 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b08996ca561d..e758cab0836f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -165,25 +165,8 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
165} 165}
166 166
167#ifdef CONFIG_DYNAMIC_FTRACE 167#ifdef CONFIG_DYNAMIC_FTRACE
168
169#ifndef CONFIG_FTRACE_MCOUNT_RECORD 168#ifndef CONFIG_FTRACE_MCOUNT_RECORD
170/* 169# error Dynamic ftrace depends on MCOUNT_RECORD
171 * The hash lock is only needed when the recording of the mcount
172 * callers are dynamic. That is, by the caller themselves and
173 * not recorded via the compilation.
174 */
175static DEFINE_SPINLOCK(ftrace_hash_lock);
176#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
177#define ftrace_hash_unlock(flags) \
178 spin_unlock_irqrestore(&ftrace_hash_lock, flags)
179static void ftrace_release_hash(unsigned long start, unsigned long end);
180#else
181/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
182#define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
183#define ftrace_hash_unlock(flags) do { } while(0)
184static inline void ftrace_release_hash(unsigned long start, unsigned long end)
185{
186}
187#endif 170#endif
188 171
189/* 172/*
@@ -194,8 +177,6 @@ static inline void ftrace_release_hash(unsigned long start, unsigned long end)
194 */ 177 */
195static unsigned long mcount_addr = MCOUNT_ADDR; 178static unsigned long mcount_addr = MCOUNT_ADDR;
196 179
197static struct task_struct *ftraced_task;
198
199enum { 180enum {
200 FTRACE_ENABLE_CALLS = (1 << 0), 181 FTRACE_ENABLE_CALLS = (1 << 0),
201 FTRACE_DISABLE_CALLS = (1 << 1), 182 FTRACE_DISABLE_CALLS = (1 << 1),
@@ -212,7 +193,6 @@ static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
212 193
213static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); 194static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
214 195
215static DEFINE_MUTEX(ftraced_lock);
216static DEFINE_MUTEX(ftrace_regex_lock); 196static DEFINE_MUTEX(ftrace_regex_lock);
217 197
218struct ftrace_page { 198struct ftrace_page {
@@ -230,10 +210,6 @@ struct ftrace_page {
230static struct ftrace_page *ftrace_pages_start; 210static struct ftrace_page *ftrace_pages_start;
231static struct ftrace_page *ftrace_pages; 211static struct ftrace_page *ftrace_pages;
232 212
233static int ftraced_trigger;
234static int ftraced_suspend;
235static int ftraced_stop;
236
237static int ftrace_record_suspend; 213static int ftrace_record_suspend;
238 214
239static struct dyn_ftrace *ftrace_free_records; 215static struct dyn_ftrace *ftrace_free_records;
@@ -398,7 +374,6 @@ static void
398ftrace_record_ip(unsigned long ip) 374ftrace_record_ip(unsigned long ip)
399{ 375{
400 struct dyn_ftrace *node; 376 struct dyn_ftrace *node;
401 unsigned long flags;
402 unsigned long key; 377 unsigned long key;
403 int resched; 378 int resched;
404 int cpu; 379 int cpu;
@@ -430,24 +405,18 @@ ftrace_record_ip(unsigned long ip)
430 if (ftrace_ip_in_hash(ip, key)) 405 if (ftrace_ip_in_hash(ip, key))
431 goto out; 406 goto out;
432 407
433 ftrace_hash_lock(flags);
434
435 /* This ip may have hit the hash before the lock */ 408 /* This ip may have hit the hash before the lock */
436 if (ftrace_ip_in_hash(ip, key)) 409 if (ftrace_ip_in_hash(ip, key))
437 goto out_unlock; 410 goto out;
438 411
439 node = ftrace_alloc_dyn_node(ip); 412 node = ftrace_alloc_dyn_node(ip);
440 if (!node) 413 if (!node)
441 goto out_unlock; 414 goto out;
442 415
443 node->ip = ip; 416 node->ip = ip;
444 417
445 ftrace_add_hash(node, key); 418 ftrace_add_hash(node, key);
446 419
447 ftraced_trigger = 1;
448
449 out_unlock:
450 ftrace_hash_unlock(flags);
451 out: 420 out:
452 per_cpu(ftrace_shutdown_disable_cpu, cpu)--; 421 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
453 422
@@ -647,7 +616,7 @@ ftrace_code_disable(struct dyn_ftrace *rec)
647 return 1; 616 return 1;
648} 617}
649 618
650static int __ftrace_update_code(void *ignore); 619static int ftrace_update_code(void *ignore);
651 620
652static int __ftrace_modify_code(void *data) 621static int __ftrace_modify_code(void *data)
653{ 622{
@@ -659,7 +628,7 @@ static int __ftrace_modify_code(void *data)
659 * Update any recorded ips now that we have the 628 * Update any recorded ips now that we have the
660 * machine stopped 629 * machine stopped
661 */ 630 */
662 __ftrace_update_code(NULL); 631 ftrace_update_code(NULL);
663 ftrace_replace_code(1); 632 ftrace_replace_code(1);
664 tracing_on = 1; 633 tracing_on = 1;
665 } else if (*command & FTRACE_DISABLE_CALLS) { 634 } else if (*command & FTRACE_DISABLE_CALLS) {
@@ -686,26 +655,9 @@ static void ftrace_run_update_code(int command)
686 stop_machine(__ftrace_modify_code, &command, NULL); 655 stop_machine(__ftrace_modify_code, &command, NULL);
687} 656}
688 657
689void ftrace_disable_daemon(void)
690{
691 /* Stop the daemon from calling kstop_machine */
692 mutex_lock(&ftraced_lock);
693 ftraced_stop = 1;
694 mutex_unlock(&ftraced_lock);
695
696 ftrace_force_update();
697}
698
699void ftrace_enable_daemon(void)
700{
701 mutex_lock(&ftraced_lock);
702 ftraced_stop = 0;
703 mutex_unlock(&ftraced_lock);
704
705 ftrace_force_update();
706}
707
708static ftrace_func_t saved_ftrace_func; 658static ftrace_func_t saved_ftrace_func;
659static int ftrace_start;
660static DEFINE_MUTEX(ftrace_start_lock);
709 661
710static void ftrace_startup(void) 662static void ftrace_startup(void)
711{ 663{
@@ -714,9 +666,9 @@ static void ftrace_startup(void)
714 if (unlikely(ftrace_disabled)) 666 if (unlikely(ftrace_disabled))
715 return; 667 return;
716 668
717 mutex_lock(&ftraced_lock); 669 mutex_lock(&ftrace_start_lock);
718 ftraced_suspend++; 670 ftrace_start++;
719 if (ftraced_suspend == 1) 671 if (ftrace_start == 1)
720 command |= FTRACE_ENABLE_CALLS; 672 command |= FTRACE_ENABLE_CALLS;
721 673
722 if (saved_ftrace_func != ftrace_trace_function) { 674 if (saved_ftrace_func != ftrace_trace_function) {
@@ -729,7 +681,7 @@ static void ftrace_startup(void)
729 681
730 ftrace_run_update_code(command); 682 ftrace_run_update_code(command);
731 out: 683 out:
732 mutex_unlock(&ftraced_lock); 684 mutex_unlock(&ftrace_start_lock);
733} 685}
734 686
735static void ftrace_shutdown(void) 687static void ftrace_shutdown(void)
@@ -739,9 +691,9 @@ static void ftrace_shutdown(void)
739 if (unlikely(ftrace_disabled)) 691 if (unlikely(ftrace_disabled))
740 return; 692 return;
741 693
742 mutex_lock(&ftraced_lock); 694 mutex_lock(&ftrace_start_lock);
743 ftraced_suspend--; 695 ftrace_start--;
744 if (!ftraced_suspend) 696 if (!ftrace_start)
745 command |= FTRACE_DISABLE_CALLS; 697 command |= FTRACE_DISABLE_CALLS;
746 698
747 if (saved_ftrace_func != ftrace_trace_function) { 699 if (saved_ftrace_func != ftrace_trace_function) {
@@ -754,7 +706,7 @@ static void ftrace_shutdown(void)
754 706
755 ftrace_run_update_code(command); 707 ftrace_run_update_code(command);
756 out: 708 out:
757 mutex_unlock(&ftraced_lock); 709 mutex_unlock(&ftrace_start_lock);
758} 710}
759 711
760static void ftrace_startup_sysctl(void) 712static void ftrace_startup_sysctl(void)
@@ -764,15 +716,15 @@ static void ftrace_startup_sysctl(void)
764 if (unlikely(ftrace_disabled)) 716 if (unlikely(ftrace_disabled))
765 return; 717 return;
766 718
767 mutex_lock(&ftraced_lock); 719 mutex_lock(&ftrace_start_lock);
768 /* Force update next time */ 720 /* Force update next time */
769 saved_ftrace_func = NULL; 721 saved_ftrace_func = NULL;
770 /* ftraced_suspend is true if we want ftrace running */ 722 /* ftrace_start is true if we want ftrace running */
771 if (ftraced_suspend) 723 if (ftrace_start)
772 command |= FTRACE_ENABLE_CALLS; 724 command |= FTRACE_ENABLE_CALLS;
773 725
774 ftrace_run_update_code(command); 726 ftrace_run_update_code(command);
775 mutex_unlock(&ftraced_lock); 727 mutex_unlock(&ftrace_start_lock);
776} 728}
777 729
778static void ftrace_shutdown_sysctl(void) 730static void ftrace_shutdown_sysctl(void)
@@ -782,20 +734,20 @@ static void ftrace_shutdown_sysctl(void)
782 if (unlikely(ftrace_disabled)) 734 if (unlikely(ftrace_disabled))
783 return; 735 return;
784 736
785 mutex_lock(&ftraced_lock); 737 mutex_lock(&ftrace_start_lock);
786 /* ftraced_suspend is true if ftrace is running */ 738 /* ftrace_start is true if ftrace is running */
787 if (ftraced_suspend) 739 if (ftrace_start)
788 command |= FTRACE_DISABLE_CALLS; 740 command |= FTRACE_DISABLE_CALLS;
789 741
790 ftrace_run_update_code(command); 742 ftrace_run_update_code(command);
791 mutex_unlock(&ftraced_lock); 743 mutex_unlock(&ftrace_start_lock);
792} 744}
793 745
794static cycle_t ftrace_update_time; 746static cycle_t ftrace_update_time;
795static unsigned long ftrace_update_cnt; 747static unsigned long ftrace_update_cnt;
796unsigned long ftrace_update_tot_cnt; 748unsigned long ftrace_update_tot_cnt;
797 749
798static int __ftrace_update_code(void *ignore) 750static int ftrace_update_code(void *ignore)
799{ 751{
800 int i, save_ftrace_enabled; 752 int i, save_ftrace_enabled;
801 cycle_t start, stop; 753 cycle_t start, stop;
@@ -869,7 +821,6 @@ static int __ftrace_update_code(void *ignore)
869 stop = ftrace_now(raw_smp_processor_id()); 821 stop = ftrace_now(raw_smp_processor_id());
870 ftrace_update_time = stop - start; 822 ftrace_update_time = stop - start;
871 ftrace_update_tot_cnt += ftrace_update_cnt; 823 ftrace_update_tot_cnt += ftrace_update_cnt;
872 ftraced_trigger = 0;
873 824
874 ftrace_enabled = save_ftrace_enabled; 825 ftrace_enabled = save_ftrace_enabled;
875 ftrace_record_suspend--; 826 ftrace_record_suspend--;
@@ -877,17 +828,6 @@ static int __ftrace_update_code(void *ignore)
877 return 0; 828 return 0;
878} 829}
879 830
880static int ftrace_update_code(void)
881{
882 if (unlikely(ftrace_disabled) ||
883 !ftrace_enabled || !ftraced_trigger)
884 return 0;
885
886 stop_machine(__ftrace_update_code, NULL, NULL);
887
888 return 1;
889}
890
891static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) 831static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
892{ 832{
893 struct ftrace_page *pg; 833 struct ftrace_page *pg;
@@ -1425,10 +1365,10 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1425 } 1365 }
1426 1366
1427 mutex_lock(&ftrace_sysctl_lock); 1367 mutex_lock(&ftrace_sysctl_lock);
1428 mutex_lock(&ftraced_lock); 1368 mutex_lock(&ftrace_start_lock);
1429 if (iter->filtered && ftraced_suspend && ftrace_enabled) 1369 if (iter->filtered && ftrace_start && ftrace_enabled)
1430 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1370 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1431 mutex_unlock(&ftraced_lock); 1371 mutex_unlock(&ftrace_start_lock);
1432 mutex_unlock(&ftrace_sysctl_lock); 1372 mutex_unlock(&ftrace_sysctl_lock);
1433 1373
1434 kfree(iter); 1374 kfree(iter);
@@ -1448,55 +1388,6 @@ ftrace_notrace_release(struct inode *inode, struct file *file)
1448 return ftrace_regex_release(inode, file, 0); 1388 return ftrace_regex_release(inode, file, 0);
1449} 1389}
1450 1390
1451static ssize_t
1452ftraced_read(struct file *filp, char __user *ubuf,
1453 size_t cnt, loff_t *ppos)
1454{
1455 /* don't worry about races */
1456 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1457 int r = strlen(buf);
1458
1459 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1460}
1461
1462static ssize_t
1463ftraced_write(struct file *filp, const char __user *ubuf,
1464 size_t cnt, loff_t *ppos)
1465{
1466 char buf[64];
1467 long val;
1468 int ret;
1469
1470 if (cnt >= sizeof(buf))
1471 return -EINVAL;
1472
1473 if (copy_from_user(&buf, ubuf, cnt))
1474 return -EFAULT;
1475
1476 if (strncmp(buf, "enable", 6) == 0)
1477 val = 1;
1478 else if (strncmp(buf, "disable", 7) == 0)
1479 val = 0;
1480 else {
1481 buf[cnt] = 0;
1482
1483 ret = strict_strtoul(buf, 10, &val);
1484 if (ret < 0)
1485 return ret;
1486
1487 val = !!val;
1488 }
1489
1490 if (val)
1491 ftrace_enable_daemon();
1492 else
1493 ftrace_disable_daemon();
1494
1495 filp->f_pos += cnt;
1496
1497 return cnt;
1498}
1499
1500static struct file_operations ftrace_avail_fops = { 1391static struct file_operations ftrace_avail_fops = {
1501 .open = ftrace_avail_open, 1392 .open = ftrace_avail_open,
1502 .read = seq_read, 1393 .read = seq_read,
@@ -1527,38 +1418,6 @@ static struct file_operations ftrace_notrace_fops = {
1527 .release = ftrace_notrace_release, 1418 .release = ftrace_notrace_release,
1528}; 1419};
1529 1420
1530static struct file_operations ftraced_fops = {
1531 .open = tracing_open_generic,
1532 .read = ftraced_read,
1533 .write = ftraced_write,
1534};
1535
1536/**
1537 * ftrace_force_update - force an update to all recording ftrace functions
1538 */
1539int ftrace_force_update(void)
1540{
1541 int ret = 0;
1542
1543 if (unlikely(ftrace_disabled))
1544 return -ENODEV;
1545
1546 mutex_lock(&ftrace_sysctl_lock);
1547 mutex_lock(&ftraced_lock);
1548
1549 /*
1550 * If ftraced_trigger is not set, then there is nothing
1551 * to update.
1552 */
1553 if (ftraced_trigger && !ftrace_update_code())
1554 ret = -EBUSY;
1555
1556 mutex_unlock(&ftraced_lock);
1557 mutex_unlock(&ftrace_sysctl_lock);
1558
1559 return ret;
1560}
1561
1562static __init int ftrace_init_debugfs(void) 1421static __init int ftrace_init_debugfs(void)
1563{ 1422{
1564 struct dentry *d_tracer; 1423 struct dentry *d_tracer;
@@ -1589,17 +1448,11 @@ static __init int ftrace_init_debugfs(void)
1589 pr_warning("Could not create debugfs " 1448 pr_warning("Could not create debugfs "
1590 "'set_ftrace_notrace' entry\n"); 1449 "'set_ftrace_notrace' entry\n");
1591 1450
1592 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1593 NULL, &ftraced_fops);
1594 if (!entry)
1595 pr_warning("Could not create debugfs "
1596 "'ftraced_enabled' entry\n");
1597 return 0; 1451 return 0;
1598} 1452}
1599 1453
1600fs_initcall(ftrace_init_debugfs); 1454fs_initcall(ftrace_init_debugfs);
1601 1455
1602#ifdef CONFIG_FTRACE_MCOUNT_RECORD
1603static int ftrace_convert_nops(unsigned long *start, 1456static int ftrace_convert_nops(unsigned long *start,
1604 unsigned long *end) 1457 unsigned long *end)
1605{ 1458{
@@ -1619,7 +1472,7 @@ static int ftrace_convert_nops(unsigned long *start,
1619 1472
1620 /* p is ignored */ 1473 /* p is ignored */
1621 local_irq_save(flags); 1474 local_irq_save(flags);
1622 __ftrace_update_code(p); 1475 ftrace_update_code(p);
1623 local_irq_restore(flags); 1476 local_irq_restore(flags);
1624 1477
1625 return 0; 1478 return 0;
@@ -1666,122 +1519,6 @@ void __init ftrace_init(void)
1666 failed: 1519 failed:
1667 ftrace_disabled = 1; 1520 ftrace_disabled = 1;
1668} 1521}
1669#else /* CONFIG_FTRACE_MCOUNT_RECORD */
1670
1671static void ftrace_release_hash(unsigned long start, unsigned long end)
1672{
1673 struct dyn_ftrace *rec;
1674 struct hlist_node *t, *n;
1675 struct hlist_head *head, temp_list;
1676 unsigned long flags;
1677 int i, cpu;
1678
1679 preempt_disable_notrace();
1680
1681 /* disable incase we call something that calls mcount */
1682 cpu = raw_smp_processor_id();
1683 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
1684
1685 ftrace_hash_lock(flags);
1686
1687 for (i = 0; i < FTRACE_HASHSIZE; i++) {
1688 INIT_HLIST_HEAD(&temp_list);
1689 head = &ftrace_hash[i];
1690
1691 /* all CPUS are stopped, we are safe to modify code */
1692 hlist_for_each_entry_safe(rec, t, n, head, node) {
1693 if (rec->flags & FTRACE_FL_FREE)
1694 continue;
1695
1696 if ((rec->ip >= start) && (rec->ip < end))
1697 ftrace_free_rec(rec);
1698 }
1699 }
1700
1701 ftrace_hash_unlock(flags);
1702
1703 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
1704 preempt_enable_notrace();
1705
1706}
1707
1708static int ftraced(void *ignore)
1709{
1710 unsigned long usecs;
1711
1712 while (!kthread_should_stop()) {
1713
1714 set_current_state(TASK_INTERRUPTIBLE);
1715
1716 /* check once a second */
1717 schedule_timeout(HZ);
1718
1719 if (unlikely(ftrace_disabled))
1720 continue;
1721
1722 mutex_lock(&ftrace_sysctl_lock);
1723 mutex_lock(&ftraced_lock);
1724 if (!ftraced_suspend && !ftraced_stop &&
1725 ftrace_update_code()) {
1726 usecs = nsecs_to_usecs(ftrace_update_time);
1727 if (ftrace_update_tot_cnt > 100000) {
1728 ftrace_update_tot_cnt = 0;
1729 pr_info("hm, dftrace overflow: %lu change%s"
1730 " (%lu total) in %lu usec%s\n",
1731 ftrace_update_cnt,
1732 ftrace_update_cnt != 1 ? "s" : "",
1733 ftrace_update_tot_cnt,
1734 usecs, usecs != 1 ? "s" : "");
1735 FTRACE_WARN_ON_ONCE(1);
1736 }
1737 }
1738 mutex_unlock(&ftraced_lock);
1739 mutex_unlock(&ftrace_sysctl_lock);
1740
1741 ftrace_shutdown_replenish();
1742 }
1743 __set_current_state(TASK_RUNNING);
1744 return 0;
1745}
1746
1747static int __init ftrace_dynamic_init(void)
1748{
1749 struct task_struct *p;
1750 unsigned long addr;
1751 int ret;
1752
1753 addr = (unsigned long)ftrace_record_ip;
1754
1755 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1756
1757 /* ftrace_dyn_arch_init places the return code in addr */
1758 if (addr) {
1759 ret = (int)addr;
1760 goto failed;
1761 }
1762
1763 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1764 if (ret)
1765 goto failed;
1766
1767 p = kthread_run(ftraced, NULL, "ftraced");
1768 if (IS_ERR(p)) {
1769 ret = -1;
1770 goto failed;
1771 }
1772
1773 last_ftrace_enabled = ftrace_enabled = 1;
1774 ftraced_task = p;
1775
1776 return 0;
1777
1778 failed:
1779 ftrace_disabled = 1;
1780 return ret;
1781}
1782
1783core_initcall(ftrace_dynamic_init);
1784#endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1785 1522
1786#else 1523#else
1787# define ftrace_startup() do { } while (0) 1524# define ftrace_startup() do { } while (0)
@@ -1801,9 +1538,6 @@ void ftrace_kill(void)
1801{ 1538{
1802 ftrace_disabled = 1; 1539 ftrace_disabled = 1;
1803 ftrace_enabled = 0; 1540 ftrace_enabled = 0;
1804#ifdef CONFIG_DYNAMIC_FTRACE
1805 ftraced_suspend = -1;
1806#endif
1807 clear_ftrace_function(); 1541 clear_ftrace_function();
1808} 1542}
1809 1543
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 95815d26a041..90bc752a7580 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -99,13 +99,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
99 /* passed in by parameter to fool gcc from optimizing */ 99 /* passed in by parameter to fool gcc from optimizing */
100 func(); 100 func();
101 101
102 /* update the records */
103 ret = ftrace_force_update();
104 if (ret) {
105 printk(KERN_CONT ".. ftraced failed .. ");
106 return ret;
107 }
108
109 /* 102 /*
110 * Some archs *cough*PowerPC*cough* add charachters to the 103 * Some archs *cough*PowerPC*cough* add charachters to the
111 * start of the function names. We simply put a '*' to 104 * start of the function names. We simply put a '*' to
@@ -183,13 +176,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
183 /* make sure msleep has been recorded */ 176 /* make sure msleep has been recorded */
184 msleep(1); 177 msleep(1);
185 178
186 /* force the recorded functions to be traced */
187 ret = ftrace_force_update();
188 if (ret) {
189 printk(KERN_CONT ".. ftraced failed .. ");
190 return ret;
191 }
192
193 /* start the tracing */ 179 /* start the tracing */
194 ftrace_enabled = 1; 180 ftrace_enabled = 1;
195 tracer_enabled = 1; 181 tracer_enabled = 1;