aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ftrace.h31
-rw-r--r--kernel/trace/ftrace.c253
2 files changed, 246 insertions, 38 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 26eafcef75be..4f0b6fec379d 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -133,6 +133,8 @@ struct ftrace_func_command {
133int ftrace_arch_code_modify_prepare(void); 133int ftrace_arch_code_modify_prepare(void);
134int ftrace_arch_code_modify_post_process(void); 134int ftrace_arch_code_modify_post_process(void);
135 135
136void ftrace_bug(int err, unsigned long ip);
137
136struct seq_file; 138struct seq_file;
137 139
138struct ftrace_probe_ops { 140struct ftrace_probe_ops {
@@ -190,6 +192,35 @@ void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
190int register_ftrace_command(struct ftrace_func_command *cmd); 192int register_ftrace_command(struct ftrace_func_command *cmd);
191int unregister_ftrace_command(struct ftrace_func_command *cmd); 193int unregister_ftrace_command(struct ftrace_func_command *cmd);
192 194
195enum {
196 FTRACE_UPDATE_CALLS = (1 << 0),
197 FTRACE_DISABLE_CALLS = (1 << 1),
198 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
199 FTRACE_START_FUNC_RET = (1 << 3),
200 FTRACE_STOP_FUNC_RET = (1 << 4),
201};
202
203enum {
204 FTRACE_UPDATE_IGNORE,
205 FTRACE_UPDATE_MAKE_CALL,
206 FTRACE_UPDATE_MAKE_NOP,
207};
208
209void arch_ftrace_update_code(int command);
210
211struct ftrace_rec_iter;
212
213struct ftrace_rec_iter *ftrace_rec_iter_start(void);
214struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
215struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
216
217int ftrace_update_record(struct dyn_ftrace *rec, int enable);
218int ftrace_test_record(struct dyn_ftrace *rec, int enable);
219void ftrace_run_stop_machine(int command);
220int ftrace_location(unsigned long ip);
221
222extern ftrace_func_t ftrace_trace_function;
223
193/* defined in arch */ 224/* defined in arch */
194extern int ftrace_ip_converted(unsigned long ip); 225extern int ftrace_ip_converted(unsigned long ip);
195extern int ftrace_dyn_arch_init(void *data); 226extern int ftrace_dyn_arch_init(void *data);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 25b4f4da0fe8..655b432fb890 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -947,13 +947,6 @@ struct ftrace_func_probe {
947 struct rcu_head rcu; 947 struct rcu_head rcu;
948}; 948};
949 949
950enum {
951 FTRACE_UPDATE_CALLS = (1 << 0),
952 FTRACE_DISABLE_CALLS = (1 << 1),
953 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
954 FTRACE_START_FUNC_RET = (1 << 3),
955 FTRACE_STOP_FUNC_RET = (1 << 4),
956};
957struct ftrace_func_entry { 950struct ftrace_func_entry {
958 struct hlist_node hlist; 951 struct hlist_node hlist;
959 unsigned long ip; 952 unsigned long ip;
@@ -1307,6 +1300,28 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1307 } \ 1300 } \
1308 } 1301 }
1309 1302
1303/**
1304 * ftrace_location - return true if the ip giving is a traced location
1305 * @ip: the instruction pointer to check
1306 *
1307 * Returns 1 if @ip given is a pointer to a ftrace location.
1308 * That is, the instruction that is either a NOP or call to
1309 * the function tracer. It checks the ftrace internal tables to
1310 * determine if the address belongs or not.
1311 */
1312int ftrace_location(unsigned long ip)
1313{
1314 struct ftrace_page *pg;
1315 struct dyn_ftrace *rec;
1316
1317 do_for_each_ftrace_rec(pg, rec) {
1318 if (rec->ip == ip)
1319 return 1;
1320 } while_for_each_ftrace_rec();
1321
1322 return 0;
1323}
1324
1310static void __ftrace_hash_rec_update(struct ftrace_ops *ops, 1325static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1311 int filter_hash, 1326 int filter_hash,
1312 bool inc) 1327 bool inc)
@@ -1475,7 +1490,19 @@ static void print_ip_ins(const char *fmt, unsigned char *p)
1475 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); 1490 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1476} 1491}
1477 1492
1478static void ftrace_bug(int failed, unsigned long ip) 1493/**
1494 * ftrace_bug - report and shutdown function tracer
1495 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1496 * @ip: The address that failed
1497 *
1498 * The arch code that enables or disables the function tracing
1499 * can call ftrace_bug() when it has detected a problem in
1500 * modifying the code. @failed should be one of either:
1501 * EFAULT - if the problem happens on reading the @ip address
1502 * EINVAL - if what is read at @ip is not what was expected
1503 * EPERM - if the problem happens on writting to the @ip address
1504 */
1505void ftrace_bug(int failed, unsigned long ip)
1479{ 1506{
1480 switch (failed) { 1507 switch (failed) {
1481 case -EFAULT: 1508 case -EFAULT:
@@ -1517,15 +1544,10 @@ int ftrace_text_reserved(void *start, void *end)
1517 return 0; 1544 return 0;
1518} 1545}
1519 1546
1520 1547static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1521static int
1522__ftrace_replace_code(struct dyn_ftrace *rec, int update)
1523{ 1548{
1524 unsigned long ftrace_addr;
1525 unsigned long flag = 0UL; 1549 unsigned long flag = 0UL;
1526 1550
1527 ftrace_addr = (unsigned long)FTRACE_ADDR;
1528
1529 /* 1551 /*
1530 * If we are updating calls: 1552 * If we are updating calls:
1531 * 1553 *
@@ -1537,20 +1559,74 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int update)
1537 * If we are disabling calls, then disable all records that 1559 * If we are disabling calls, then disable all records that
1538 * are enabled. 1560 * are enabled.
1539 */ 1561 */
1540 if (update && (rec->flags & ~FTRACE_FL_MASK)) 1562 if (enable && (rec->flags & ~FTRACE_FL_MASK))
1541 flag = FTRACE_FL_ENABLED; 1563 flag = FTRACE_FL_ENABLED;
1542 1564
1543 /* If the state of this record hasn't changed, then do nothing */ 1565 /* If the state of this record hasn't changed, then do nothing */
1544 if ((rec->flags & FTRACE_FL_ENABLED) == flag) 1566 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1545 return 0; 1567 return FTRACE_UPDATE_IGNORE;
1546 1568
1547 if (flag) { 1569 if (flag) {
1548 rec->flags |= FTRACE_FL_ENABLED; 1570 if (update)
1571 rec->flags |= FTRACE_FL_ENABLED;
1572 return FTRACE_UPDATE_MAKE_CALL;
1573 }
1574
1575 if (update)
1576 rec->flags &= ~FTRACE_FL_ENABLED;
1577
1578 return FTRACE_UPDATE_MAKE_NOP;
1579}
1580
1581/**
1582 * ftrace_update_record, set a record that now is tracing or not
1583 * @rec: the record to update
1584 * @enable: set to 1 if the record is tracing, zero to force disable
1585 *
1586 * The records that represent all functions that can be traced need
1587 * to be updated when tracing has been enabled.
1588 */
1589int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1590{
1591 return ftrace_check_record(rec, enable, 1);
1592}
1593
1594/**
1595 * ftrace_test_record, check if the record has been enabled or not
1596 * @rec: the record to test
1597 * @enable: set to 1 to check if enabled, 0 if it is disabled
1598 *
1599 * The arch code may need to test if a record is already set to
1600 * tracing to determine how to modify the function code that it
1601 * represents.
1602 */
1603int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1604{
1605 return ftrace_check_record(rec, enable, 0);
1606}
1607
1608static int
1609__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1610{
1611 unsigned long ftrace_addr;
1612 int ret;
1613
1614 ftrace_addr = (unsigned long)FTRACE_ADDR;
1615
1616 ret = ftrace_update_record(rec, enable);
1617
1618 switch (ret) {
1619 case FTRACE_UPDATE_IGNORE:
1620 return 0;
1621
1622 case FTRACE_UPDATE_MAKE_CALL:
1549 return ftrace_make_call(rec, ftrace_addr); 1623 return ftrace_make_call(rec, ftrace_addr);
1624
1625 case FTRACE_UPDATE_MAKE_NOP:
1626 return ftrace_make_nop(NULL, rec, ftrace_addr);
1550 } 1627 }
1551 1628
1552 rec->flags &= ~FTRACE_FL_ENABLED; 1629 return -1; /* unknow ftrace bug */
1553 return ftrace_make_nop(NULL, rec, ftrace_addr);
1554} 1630}
1555 1631
1556static void ftrace_replace_code(int update) 1632static void ftrace_replace_code(int update)
@@ -1576,6 +1652,78 @@ static void ftrace_replace_code(int update)
1576 } while_for_each_ftrace_rec(); 1652 } while_for_each_ftrace_rec();
1577} 1653}
1578 1654
1655struct ftrace_rec_iter {
1656 struct ftrace_page *pg;
1657 int index;
1658};
1659
1660/**
1661 * ftrace_rec_iter_start, start up iterating over traced functions
1662 *
1663 * Returns an iterator handle that is used to iterate over all
1664 * the records that represent address locations where functions
1665 * are traced.
1666 *
1667 * May return NULL if no records are available.
1668 */
1669struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1670{
1671 /*
1672 * We only use a single iterator.
1673 * Protected by the ftrace_lock mutex.
1674 */
1675 static struct ftrace_rec_iter ftrace_rec_iter;
1676 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1677
1678 iter->pg = ftrace_pages_start;
1679 iter->index = 0;
1680
1681 /* Could have empty pages */
1682 while (iter->pg && !iter->pg->index)
1683 iter->pg = iter->pg->next;
1684
1685 if (!iter->pg)
1686 return NULL;
1687
1688 return iter;
1689}
1690
1691/**
1692 * ftrace_rec_iter_next, get the next record to process.
1693 * @iter: The handle to the iterator.
1694 *
1695 * Returns the next iterator after the given iterator @iter.
1696 */
1697struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1698{
1699 iter->index++;
1700
1701 if (iter->index >= iter->pg->index) {
1702 iter->pg = iter->pg->next;
1703 iter->index = 0;
1704
1705 /* Could have empty pages */
1706 while (iter->pg && !iter->pg->index)
1707 iter->pg = iter->pg->next;
1708 }
1709
1710 if (!iter->pg)
1711 return NULL;
1712
1713 return iter;
1714}
1715
1716/**
1717 * ftrace_rec_iter_record, get the record at the iterator location
1718 * @iter: The current iterator location
1719 *
1720 * Returns the record that the current @iter is at.
1721 */
1722struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1723{
1724 return &iter->pg->records[iter->index];
1725}
1726
1579static int 1727static int
1580ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) 1728ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1581{ 1729{
@@ -1617,12 +1765,6 @@ static int __ftrace_modify_code(void *data)
1617{ 1765{
1618 int *command = data; 1766 int *command = data;
1619 1767
1620 /*
1621 * Do not call function tracer while we update the code.
1622 * We are in stop machine, no worrying about races.
1623 */
1624 function_trace_stop++;
1625
1626 if (*command & FTRACE_UPDATE_CALLS) 1768 if (*command & FTRACE_UPDATE_CALLS)
1627 ftrace_replace_code(1); 1769 ftrace_replace_code(1);
1628 else if (*command & FTRACE_DISABLE_CALLS) 1770 else if (*command & FTRACE_DISABLE_CALLS)
@@ -1636,21 +1778,33 @@ static int __ftrace_modify_code(void *data)
1636 else if (*command & FTRACE_STOP_FUNC_RET) 1778 else if (*command & FTRACE_STOP_FUNC_RET)
1637 ftrace_disable_ftrace_graph_caller(); 1779 ftrace_disable_ftrace_graph_caller();
1638 1780
1639#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1640 /*
1641 * For archs that call ftrace_test_stop_func(), we must
1642 * wait till after we update all the function callers
1643 * before we update the callback. This keeps different
1644 * ops that record different functions from corrupting
1645 * each other.
1646 */
1647 __ftrace_trace_function = __ftrace_trace_function_delay;
1648#endif
1649 function_trace_stop--;
1650
1651 return 0; 1781 return 0;
1652} 1782}
1653 1783
1784/**
1785 * ftrace_run_stop_machine, go back to the stop machine method
1786 * @command: The command to tell ftrace what to do
1787 *
1788 * If an arch needs to fall back to the stop machine method, the
1789 * it can call this function.
1790 */
1791void ftrace_run_stop_machine(int command)
1792{
1793 stop_machine(__ftrace_modify_code, &command, NULL);
1794}
1795
1796/**
1797 * arch_ftrace_update_code, modify the code to trace or not trace
1798 * @command: The command that needs to be done
1799 *
1800 * Archs can override this function if it does not need to
1801 * run stop_machine() to modify code.
1802 */
1803void __weak arch_ftrace_update_code(int command)
1804{
1805 ftrace_run_stop_machine(command);
1806}
1807
1654static void ftrace_run_update_code(int command) 1808static void ftrace_run_update_code(int command)
1655{ 1809{
1656 int ret; 1810 int ret;
@@ -1659,8 +1813,31 @@ static void ftrace_run_update_code(int command)
1659 FTRACE_WARN_ON(ret); 1813 FTRACE_WARN_ON(ret);
1660 if (ret) 1814 if (ret)
1661 return; 1815 return;
1816 /*
1817 * Do not call function tracer while we update the code.
1818 * We are in stop machine.
1819 */
1820 function_trace_stop++;
1662 1821
1663 stop_machine(__ftrace_modify_code, &command, NULL); 1822 /*
1823 * By default we use stop_machine() to modify the code.
1824 * But archs can do what ever they want as long as it
1825 * is safe. The stop_machine() is the safest, but also
1826 * produces the most overhead.
1827 */
1828 arch_ftrace_update_code(command);
1829
1830#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1831 /*
1832 * For archs that call ftrace_test_stop_func(), we must
1833 * wait till after we update all the function callers
1834 * before we update the callback. This keeps different
1835 * ops that record different functions from corrupting
1836 * each other.
1837 */
1838 __ftrace_trace_function = __ftrace_trace_function_delay;
1839#endif
1840 function_trace_stop--;
1664 1841
1665 ret = ftrace_arch_code_modify_post_process(); 1842 ret = ftrace_arch_code_modify_post_process();
1666 FTRACE_WARN_ON(ret); 1843 FTRACE_WARN_ON(ret);