aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 14:50:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 14:50:00 -0400
commitb8c0aa46b3e86083721b57ed2eec6bd2c29ebfba (patch)
tree45e349bf8a14aa99279d323fdc515e849fd349f3 /kernel/trace
parentc7ed326fa7cafb83ced5a8b02517a61672fe9e90 (diff)
parentdc6f03f26f570104a2bb03f9d1deb588026d7c75 (diff)
Merge tag 'trace-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "This pull request has a lot of work done. The main thing is the changes to the ftrace function callback infrastructure. It's introducing a way to allow different functions to call directly different trampolines instead of all calling the same "mcount" one. The only user of this for now is the function graph tracer, which always had a different trampoline, but the function tracer trampoline was called and did basically nothing, and then the function graph tracer trampoline was called. The difference now, is that the function graph tracer trampoline can be called directly if a function is only being traced by the function graph trampoline. If function tracing is also happening on the same function, the old way is still done. The accounting for this takes up more memory when function graph tracing is activated, as it needs to keep track of which functions it uses. I have a new way that wont take as much memory, but it's not ready yet for this merge window, and will have to wait for the next one. Another big change was the removal of the ftrace_start/stop() calls that were used by the suspend/resume code that stopped function tracing when entering into suspend and resume paths. The stop of ftrace was done because there was some function that would crash the system if one called smp_processor_id()! The stop/start was a big hammer to solve the issue at the time, which was when ftrace was first introduced into Linux. Now ftrace has better infrastructure to debug such issues, and I found the problem function and labeled it with "notrace" and function tracing can now safely be activated all the way down into the guts of suspend and resume Other changes include clean ups of uprobe code, clean up of the trace_seq() code, and other various small fixes and clean ups to ftrace and tracing" * tag 'trace-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (57 commits) ftrace: Add warning if tramp hash does not match nr_trampolines ftrace: Fix trampoline hash update check on rec->flags ring-buffer: Use rb_page_size() instead of open coded head_page size ftrace: Rename ftrace_ops field from trampolines to nr_trampolines tracing: Convert local function_graph functions to static ftrace: Do not copy old hash when resetting tracing: let user specify tracing_thresh after selecting function_graph ring-buffer: Always run per-cpu ring buffer resize with schedule_work_on() tracing: Remove function_trace_stop and HAVE_FUNCTION_TRACE_MCOUNT_TEST s390/ftrace: remove check of obsolete variable function_trace_stop arm64, ftrace: Remove check of obsolete variable function_trace_stop Blackfin: ftrace: Remove check of obsolete variable function_trace_stop metag: ftrace: Remove check of obsolete variable function_trace_stop microblaze: ftrace: Remove check of obsolete variable function_trace_stop MIPS: ftrace: Remove check of obsolete variable function_trace_stop parisc: ftrace: Remove check of obsolete variable function_trace_stop sh: ftrace: Remove check of obsolete variable function_trace_stop sparc64,ftrace: Remove check of obsolete variable function_trace_stop tile: ftrace: Remove check of obsolete variable function_trace_stop ftrace: x86: Remove check of obsolete variable function_trace_stop ...
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig5
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/ftrace.c445
-rw-r--r--kernel/trace/ring_buffer.c26
-rw-r--r--kernel/trace/trace.c96
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_events.c56
-rw-r--r--kernel/trace/trace_functions_graph.c43
-rw-r--r--kernel/trace/trace_output.c282
-rw-r--r--kernel/trace/trace_output.h4
-rw-r--r--kernel/trace/trace_seq.c428
11 files changed, 932 insertions, 456 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index d4409356f40d..a5da09c899dd 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -29,11 +29,6 @@ config HAVE_FUNCTION_GRAPH_FP_TEST
29 help 29 help
30 See Documentation/trace/ftrace-design.txt 30 See Documentation/trace/ftrace-design.txt
31 31
32config HAVE_FUNCTION_TRACE_MCOUNT_TEST
33 bool
34 help
35 See Documentation/trace/ftrace-design.txt
36
37config HAVE_DYNAMIC_FTRACE 32config HAVE_DYNAMIC_FTRACE
38 bool 33 bool
39 help 34 help
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 2611613f14f1..67d6369ddf83 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_RING_BUFFER_BENCHMARK) += ring_buffer_benchmark.o
28 28
29obj-$(CONFIG_TRACING) += trace.o 29obj-$(CONFIG_TRACING) += trace.o
30obj-$(CONFIG_TRACING) += trace_output.o 30obj-$(CONFIG_TRACING) += trace_output.o
31obj-$(CONFIG_TRACING) += trace_seq.o
31obj-$(CONFIG_TRACING) += trace_stat.o 32obj-$(CONFIG_TRACING) += trace_stat.o
32obj-$(CONFIG_TRACING) += trace_printk.o 33obj-$(CONFIG_TRACING) += trace_printk.o
33obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o 34obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ac9d1dad630b..1654b12c891a 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -80,9 +80,6 @@ static struct ftrace_ops ftrace_list_end __read_mostly = {
80int ftrace_enabled __read_mostly; 80int ftrace_enabled __read_mostly;
81static int last_ftrace_enabled; 81static int last_ftrace_enabled;
82 82
83/* Quick disabling of function tracer. */
84int function_trace_stop __read_mostly;
85
86/* Current function tracing op */ 83/* Current function tracing op */
87struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; 84struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
88/* What to set function_trace_op to */ 85/* What to set function_trace_op to */
@@ -1042,6 +1039,8 @@ static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1042 1039
1043#ifdef CONFIG_DYNAMIC_FTRACE 1040#ifdef CONFIG_DYNAMIC_FTRACE
1044 1041
1042static struct ftrace_ops *removed_ops;
1043
1045#ifndef CONFIG_FTRACE_MCOUNT_RECORD 1044#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1046# error Dynamic ftrace depends on MCOUNT_RECORD 1045# error Dynamic ftrace depends on MCOUNT_RECORD
1047#endif 1046#endif
@@ -1304,25 +1303,15 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1304 struct ftrace_hash *new_hash; 1303 struct ftrace_hash *new_hash;
1305 int size = src->count; 1304 int size = src->count;
1306 int bits = 0; 1305 int bits = 0;
1307 int ret;
1308 int i; 1306 int i;
1309 1307
1310 /* 1308 /*
1311 * Remove the current set, update the hash and add
1312 * them back.
1313 */
1314 ftrace_hash_rec_disable(ops, enable);
1315
1316 /*
1317 * If the new source is empty, just free dst and assign it 1309 * If the new source is empty, just free dst and assign it
1318 * the empty_hash. 1310 * the empty_hash.
1319 */ 1311 */
1320 if (!src->count) { 1312 if (!src->count) {
1321 free_ftrace_hash_rcu(*dst); 1313 new_hash = EMPTY_HASH;
1322 rcu_assign_pointer(*dst, EMPTY_HASH); 1314 goto update;
1323 /* still need to update the function records */
1324 ret = 0;
1325 goto out;
1326 } 1315 }
1327 1316
1328 /* 1317 /*
@@ -1335,10 +1324,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1335 if (bits > FTRACE_HASH_MAX_BITS) 1324 if (bits > FTRACE_HASH_MAX_BITS)
1336 bits = FTRACE_HASH_MAX_BITS; 1325 bits = FTRACE_HASH_MAX_BITS;
1337 1326
1338 ret = -ENOMEM;
1339 new_hash = alloc_ftrace_hash(bits); 1327 new_hash = alloc_ftrace_hash(bits);
1340 if (!new_hash) 1328 if (!new_hash)
1341 goto out; 1329 return -ENOMEM;
1342 1330
1343 size = 1 << src->size_bits; 1331 size = 1 << src->size_bits;
1344 for (i = 0; i < size; i++) { 1332 for (i = 0; i < size; i++) {
@@ -1349,20 +1337,20 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1349 } 1337 }
1350 } 1338 }
1351 1339
1340update:
1341 /*
1342 * Remove the current set, update the hash and add
1343 * them back.
1344 */
1345 ftrace_hash_rec_disable(ops, enable);
1346
1352 old_hash = *dst; 1347 old_hash = *dst;
1353 rcu_assign_pointer(*dst, new_hash); 1348 rcu_assign_pointer(*dst, new_hash);
1354 free_ftrace_hash_rcu(old_hash); 1349 free_ftrace_hash_rcu(old_hash);
1355 1350
1356 ret = 0;
1357 out:
1358 /*
1359 * Enable regardless of ret:
1360 * On success, we enable the new hash.
1361 * On failure, we re-enable the original hash.
1362 */
1363 ftrace_hash_rec_enable(ops, enable); 1351 ftrace_hash_rec_enable(ops, enable);
1364 1352
1365 return ret; 1353 return 0;
1366} 1354}
1367 1355
1368/* 1356/*
@@ -1492,6 +1480,53 @@ int ftrace_text_reserved(const void *start, const void *end)
1492 return (int)!!ret; 1480 return (int)!!ret;
1493} 1481}
1494 1482
1483/* Test if ops registered to this rec needs regs */
1484static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1485{
1486 struct ftrace_ops *ops;
1487 bool keep_regs = false;
1488
1489 for (ops = ftrace_ops_list;
1490 ops != &ftrace_list_end; ops = ops->next) {
1491 /* pass rec in as regs to have non-NULL val */
1492 if (ftrace_ops_test(ops, rec->ip, rec)) {
1493 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1494 keep_regs = true;
1495 break;
1496 }
1497 }
1498 }
1499
1500 return keep_regs;
1501}
1502
1503static void ftrace_remove_tramp(struct ftrace_ops *ops,
1504 struct dyn_ftrace *rec)
1505{
1506 struct ftrace_func_entry *entry;
1507
1508 entry = ftrace_lookup_ip(ops->tramp_hash, rec->ip);
1509 if (!entry)
1510 return;
1511
1512 /*
1513 * The tramp_hash entry will be removed at time
1514 * of update.
1515 */
1516 ops->nr_trampolines--;
1517 rec->flags &= ~FTRACE_FL_TRAMP;
1518}
1519
1520static void ftrace_clear_tramps(struct dyn_ftrace *rec)
1521{
1522 struct ftrace_ops *op;
1523
1524 do_for_each_ftrace_op(op, ftrace_ops_list) {
1525 if (op->nr_trampolines)
1526 ftrace_remove_tramp(op, rec);
1527 } while_for_each_ftrace_op(op);
1528}
1529
1495static void __ftrace_hash_rec_update(struct ftrace_ops *ops, 1530static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1496 int filter_hash, 1531 int filter_hash,
1497 bool inc) 1532 bool inc)
@@ -1572,8 +1607,30 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1572 1607
1573 if (inc) { 1608 if (inc) {
1574 rec->flags++; 1609 rec->flags++;
1575 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) 1610 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1576 return; 1611 return;
1612
1613 /*
1614 * If there's only a single callback registered to a
1615 * function, and the ops has a trampoline registered
1616 * for it, then we can call it directly.
1617 */
1618 if (ftrace_rec_count(rec) == 1 && ops->trampoline) {
1619 rec->flags |= FTRACE_FL_TRAMP;
1620 ops->nr_trampolines++;
1621 } else {
1622 /*
1623 * If we are adding another function callback
1624 * to this function, and the previous had a
1625 * trampoline used, then we need to go back to
1626 * the default trampoline.
1627 */
1628 rec->flags &= ~FTRACE_FL_TRAMP;
1629
1630 /* remove trampolines from any ops for this rec */
1631 ftrace_clear_tramps(rec);
1632 }
1633
1577 /* 1634 /*
1578 * If any ops wants regs saved for this function 1635 * If any ops wants regs saved for this function
1579 * then all ops will get saved regs. 1636 * then all ops will get saved regs.
@@ -1581,9 +1638,30 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1581 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 1638 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1582 rec->flags |= FTRACE_FL_REGS; 1639 rec->flags |= FTRACE_FL_REGS;
1583 } else { 1640 } else {
1584 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) 1641 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1585 return; 1642 return;
1586 rec->flags--; 1643 rec->flags--;
1644
1645 if (ops->trampoline && !ftrace_rec_count(rec))
1646 ftrace_remove_tramp(ops, rec);
1647
1648 /*
1649 * If the rec had REGS enabled and the ops that is
1650 * being removed had REGS set, then see if there is
1651 * still any ops for this record that wants regs.
1652 * If not, we can stop recording them.
1653 */
1654 if (ftrace_rec_count(rec) > 0 &&
1655 rec->flags & FTRACE_FL_REGS &&
1656 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1657 if (!test_rec_ops_needs_regs(rec))
1658 rec->flags &= ~FTRACE_FL_REGS;
1659 }
1660
1661 /*
1662 * flags will be cleared in ftrace_check_record()
1663 * if rec count is zero.
1664 */
1587 } 1665 }
1588 count++; 1666 count++;
1589 /* Shortcut, if we handled all records, we are done. */ 1667 /* Shortcut, if we handled all records, we are done. */
@@ -1668,17 +1746,23 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1668 * If we are disabling calls, then disable all records that 1746 * If we are disabling calls, then disable all records that
1669 * are enabled. 1747 * are enabled.
1670 */ 1748 */
1671 if (enable && (rec->flags & ~FTRACE_FL_MASK)) 1749 if (enable && ftrace_rec_count(rec))
1672 flag = FTRACE_FL_ENABLED; 1750 flag = FTRACE_FL_ENABLED;
1673 1751
1674 /* 1752 /*
1675 * If enabling and the REGS flag does not match the REGS_EN, then 1753 * If enabling and the REGS flag does not match the REGS_EN, or
1676 * do not ignore this record. Set flags to fail the compare against 1754 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
1677 * ENABLED. 1755 * this record. Set flags to fail the compare against ENABLED.
1678 */ 1756 */
1679 if (flag && 1757 if (flag) {
1680 (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN))) 1758 if (!(rec->flags & FTRACE_FL_REGS) !=
1681 flag |= FTRACE_FL_REGS; 1759 !(rec->flags & FTRACE_FL_REGS_EN))
1760 flag |= FTRACE_FL_REGS;
1761
1762 if (!(rec->flags & FTRACE_FL_TRAMP) !=
1763 !(rec->flags & FTRACE_FL_TRAMP_EN))
1764 flag |= FTRACE_FL_TRAMP;
1765 }
1682 1766
1683 /* If the state of this record hasn't changed, then do nothing */ 1767 /* If the state of this record hasn't changed, then do nothing */
1684 if ((rec->flags & FTRACE_FL_ENABLED) == flag) 1768 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
@@ -1696,6 +1780,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1696 else 1780 else
1697 rec->flags &= ~FTRACE_FL_REGS_EN; 1781 rec->flags &= ~FTRACE_FL_REGS_EN;
1698 } 1782 }
1783 if (flag & FTRACE_FL_TRAMP) {
1784 if (rec->flags & FTRACE_FL_TRAMP)
1785 rec->flags |= FTRACE_FL_TRAMP_EN;
1786 else
1787 rec->flags &= ~FTRACE_FL_TRAMP_EN;
1788 }
1699 } 1789 }
1700 1790
1701 /* 1791 /*
@@ -1704,7 +1794,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1704 * Otherwise, 1794 * Otherwise,
1705 * return UPDATE_MODIFY_CALL to tell the caller to convert 1795 * return UPDATE_MODIFY_CALL to tell the caller to convert
1706 * from the save regs, to a non-save regs function or 1796 * from the save regs, to a non-save regs function or
1707 * vice versa. 1797 * vice versa, or from a trampoline call.
1708 */ 1798 */
1709 if (flag & FTRACE_FL_ENABLED) 1799 if (flag & FTRACE_FL_ENABLED)
1710 return FTRACE_UPDATE_MAKE_CALL; 1800 return FTRACE_UPDATE_MAKE_CALL;
@@ -1714,7 +1804,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1714 1804
1715 if (update) { 1805 if (update) {
1716 /* If there's no more users, clear all flags */ 1806 /* If there's no more users, clear all flags */
1717 if (!(rec->flags & ~FTRACE_FL_MASK)) 1807 if (!ftrace_rec_count(rec))
1718 rec->flags = 0; 1808 rec->flags = 0;
1719 else 1809 else
1720 /* Just disable the record (keep REGS state) */ 1810 /* Just disable the record (keep REGS state) */
@@ -1751,6 +1841,43 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1751 return ftrace_check_record(rec, enable, 0); 1841 return ftrace_check_record(rec, enable, 0);
1752} 1842}
1753 1843
1844static struct ftrace_ops *
1845ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
1846{
1847 struct ftrace_ops *op;
1848
1849 /* Removed ops need to be tested first */
1850 if (removed_ops && removed_ops->tramp_hash) {
1851 if (ftrace_lookup_ip(removed_ops->tramp_hash, rec->ip))
1852 return removed_ops;
1853 }
1854
1855 do_for_each_ftrace_op(op, ftrace_ops_list) {
1856 if (!op->tramp_hash)
1857 continue;
1858
1859 if (ftrace_lookup_ip(op->tramp_hash, rec->ip))
1860 return op;
1861
1862 } while_for_each_ftrace_op(op);
1863
1864 return NULL;
1865}
1866
1867static struct ftrace_ops *
1868ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
1869{
1870 struct ftrace_ops *op;
1871
1872 do_for_each_ftrace_op(op, ftrace_ops_list) {
1873 /* pass rec in as regs to have non-NULL val */
1874 if (ftrace_ops_test(op, rec->ip, rec))
1875 return op;
1876 } while_for_each_ftrace_op(op);
1877
1878 return NULL;
1879}
1880
1754/** 1881/**
1755 * ftrace_get_addr_new - Get the call address to set to 1882 * ftrace_get_addr_new - Get the call address to set to
1756 * @rec: The ftrace record descriptor 1883 * @rec: The ftrace record descriptor
@@ -1763,6 +1890,20 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1763 */ 1890 */
1764unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) 1891unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
1765{ 1892{
1893 struct ftrace_ops *ops;
1894
1895 /* Trampolines take precedence over regs */
1896 if (rec->flags & FTRACE_FL_TRAMP) {
1897 ops = ftrace_find_tramp_ops_new(rec);
1898 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
1899 pr_warning("Bad trampoline accounting at: %p (%pS)\n",
1900 (void *)rec->ip, (void *)rec->ip);
1901 /* Ftrace is shutting down, return anything */
1902 return (unsigned long)FTRACE_ADDR;
1903 }
1904 return ops->trampoline;
1905 }
1906
1766 if (rec->flags & FTRACE_FL_REGS) 1907 if (rec->flags & FTRACE_FL_REGS)
1767 return (unsigned long)FTRACE_REGS_ADDR; 1908 return (unsigned long)FTRACE_REGS_ADDR;
1768 else 1909 else
@@ -1781,6 +1922,20 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
1781 */ 1922 */
1782unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) 1923unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
1783{ 1924{
1925 struct ftrace_ops *ops;
1926
1927 /* Trampolines take precedence over regs */
1928 if (rec->flags & FTRACE_FL_TRAMP_EN) {
1929 ops = ftrace_find_tramp_ops_curr(rec);
1930 if (FTRACE_WARN_ON(!ops)) {
1931 pr_warning("Bad trampoline accounting at: %p (%pS)\n",
1932 (void *)rec->ip, (void *)rec->ip);
1933 /* Ftrace is shutting down, return anything */
1934 return (unsigned long)FTRACE_ADDR;
1935 }
1936 return ops->trampoline;
1937 }
1938
1784 if (rec->flags & FTRACE_FL_REGS_EN) 1939 if (rec->flags & FTRACE_FL_REGS_EN)
1785 return (unsigned long)FTRACE_REGS_ADDR; 1940 return (unsigned long)FTRACE_REGS_ADDR;
1786 else 1941 else
@@ -2023,6 +2178,89 @@ void __weak arch_ftrace_update_code(int command)
2023 ftrace_run_stop_machine(command); 2178 ftrace_run_stop_machine(command);
2024} 2179}
2025 2180
2181static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops)
2182{
2183 struct ftrace_page *pg;
2184 struct dyn_ftrace *rec;
2185 int size, bits;
2186 int ret;
2187
2188 size = ops->nr_trampolines;
2189 bits = 0;
2190 /*
2191 * Make the hash size about 1/2 the # found
2192 */
2193 for (size /= 2; size; size >>= 1)
2194 bits++;
2195
2196 ops->tramp_hash = alloc_ftrace_hash(bits);
2197 /*
2198 * TODO: a failed allocation is going to screw up
2199 * the accounting of what needs to be modified
2200 * and not. For now, we kill ftrace if we fail
2201 * to allocate here. But there are ways around this,
2202 * but that will take a little more work.
2203 */
2204 if (!ops->tramp_hash)
2205 return -ENOMEM;
2206
2207 do_for_each_ftrace_rec(pg, rec) {
2208 if (ftrace_rec_count(rec) == 1 &&
2209 ftrace_ops_test(ops, rec->ip, rec)) {
2210
2211 /*
2212 * If another ops adds to a rec, the rec will
2213 * lose its trampoline and never get it back
2214 * until all ops are off of it.
2215 */
2216 if (!(rec->flags & FTRACE_FL_TRAMP))
2217 continue;
2218
2219 /* This record had better have a trampoline */
2220 if (FTRACE_WARN_ON(!(rec->flags & FTRACE_FL_TRAMP_EN)))
2221 return -1;
2222
2223 ret = add_hash_entry(ops->tramp_hash, rec->ip);
2224 if (ret < 0)
2225 return ret;
2226 }
2227 } while_for_each_ftrace_rec();
2228
2229 /* The number of recs in the hash must match nr_trampolines */
2230 FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines);
2231
2232 return 0;
2233}
2234
2235static int ftrace_save_tramp_hashes(void)
2236{
2237 struct ftrace_ops *op;
2238 int ret;
2239
2240 /*
2241 * Now that any trampoline is being used, we need to save the
2242 * hashes for the ops that have them. This allows the mapping
2243 * back from the record to the ops that has the trampoline to
2244 * know what code is being replaced. Modifying code must always
2245 * verify what it is changing.
2246 */
2247 do_for_each_ftrace_op(op, ftrace_ops_list) {
2248
2249 /* The tramp_hash is recreated each time. */
2250 free_ftrace_hash(op->tramp_hash);
2251 op->tramp_hash = NULL;
2252
2253 if (op->nr_trampolines) {
2254 ret = ftrace_save_ops_tramp_hash(op);
2255 if (ret)
2256 return ret;
2257 }
2258
2259 } while_for_each_ftrace_op(op);
2260
2261 return 0;
2262}
2263
2026static void ftrace_run_update_code(int command) 2264static void ftrace_run_update_code(int command)
2027{ 2265{
2028 int ret; 2266 int ret;
@@ -2031,11 +2269,6 @@ static void ftrace_run_update_code(int command)
2031 FTRACE_WARN_ON(ret); 2269 FTRACE_WARN_ON(ret);
2032 if (ret) 2270 if (ret)
2033 return; 2271 return;
2034 /*
2035 * Do not call function tracer while we update the code.
2036 * We are in stop machine.
2037 */
2038 function_trace_stop++;
2039 2272
2040 /* 2273 /*
2041 * By default we use stop_machine() to modify the code. 2274 * By default we use stop_machine() to modify the code.
@@ -2045,15 +2278,15 @@ static void ftrace_run_update_code(int command)
2045 */ 2278 */
2046 arch_ftrace_update_code(command); 2279 arch_ftrace_update_code(command);
2047 2280
2048 function_trace_stop--;
2049
2050 ret = ftrace_arch_code_modify_post_process(); 2281 ret = ftrace_arch_code_modify_post_process();
2051 FTRACE_WARN_ON(ret); 2282 FTRACE_WARN_ON(ret);
2283
2284 ret = ftrace_save_tramp_hashes();
2285 FTRACE_WARN_ON(ret);
2052} 2286}
2053 2287
2054static ftrace_func_t saved_ftrace_func; 2288static ftrace_func_t saved_ftrace_func;
2055static int ftrace_start_up; 2289static int ftrace_start_up;
2056static int global_start_up;
2057 2290
2058static void control_ops_free(struct ftrace_ops *ops) 2291static void control_ops_free(struct ftrace_ops *ops)
2059{ 2292{
@@ -2117,8 +2350,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2117 2350
2118 ftrace_hash_rec_disable(ops, 1); 2351 ftrace_hash_rec_disable(ops, 1);
2119 2352
2120 if (!global_start_up) 2353 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2121 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2122 2354
2123 command |= FTRACE_UPDATE_CALLS; 2355 command |= FTRACE_UPDATE_CALLS;
2124 2356
@@ -2139,8 +2371,16 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2139 return 0; 2371 return 0;
2140 } 2372 }
2141 2373
2374 /*
2375 * If the ops uses a trampoline, then it needs to be
2376 * tested first on update.
2377 */
2378 removed_ops = ops;
2379
2142 ftrace_run_update_code(command); 2380 ftrace_run_update_code(command);
2143 2381
2382 removed_ops = NULL;
2383
2144 /* 2384 /*
2145 * Dynamic ops may be freed, we must make sure that all 2385 * Dynamic ops may be freed, we must make sure that all
2146 * callers are done before leaving this function. 2386 * callers are done before leaving this function.
@@ -2398,7 +2638,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
2398 return start_pg; 2638 return start_pg;
2399 2639
2400 free_pages: 2640 free_pages:
2401 while (start_pg) { 2641 pg = start_pg;
2642 while (pg) {
2402 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 2643 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2403 free_pages((unsigned long)pg->records, order); 2644 free_pages((unsigned long)pg->records, order);
2404 start_pg = pg->next; 2645 start_pg = pg->next;
@@ -2595,8 +2836,10 @@ static void *t_start(struct seq_file *m, loff_t *pos)
2595 * off, we can short cut and just print out that all 2836 * off, we can short cut and just print out that all
2596 * functions are enabled. 2837 * functions are enabled.
2597 */ 2838 */
2598 if (iter->flags & FTRACE_ITER_FILTER && 2839 if ((iter->flags & FTRACE_ITER_FILTER &&
2599 ftrace_hash_empty(ops->filter_hash)) { 2840 ftrace_hash_empty(ops->filter_hash)) ||
2841 (iter->flags & FTRACE_ITER_NOTRACE &&
2842 ftrace_hash_empty(ops->notrace_hash))) {
2600 if (*pos > 0) 2843 if (*pos > 0)
2601 return t_hash_start(m, pos); 2844 return t_hash_start(m, pos);
2602 iter->flags |= FTRACE_ITER_PRINTALL; 2845 iter->flags |= FTRACE_ITER_PRINTALL;
@@ -2641,7 +2884,10 @@ static int t_show(struct seq_file *m, void *v)
2641 return t_hash_show(m, iter); 2884 return t_hash_show(m, iter);
2642 2885
2643 if (iter->flags & FTRACE_ITER_PRINTALL) { 2886 if (iter->flags & FTRACE_ITER_PRINTALL) {
2644 seq_printf(m, "#### all functions enabled ####\n"); 2887 if (iter->flags & FTRACE_ITER_NOTRACE)
2888 seq_printf(m, "#### no functions disabled ####\n");
2889 else
2890 seq_printf(m, "#### all functions enabled ####\n");
2645 return 0; 2891 return 0;
2646 } 2892 }
2647 2893
@@ -2651,10 +2897,22 @@ static int t_show(struct seq_file *m, void *v)
2651 return 0; 2897 return 0;
2652 2898
2653 seq_printf(m, "%ps", (void *)rec->ip); 2899 seq_printf(m, "%ps", (void *)rec->ip);
2654 if (iter->flags & FTRACE_ITER_ENABLED) 2900 if (iter->flags & FTRACE_ITER_ENABLED) {
2655 seq_printf(m, " (%ld)%s", 2901 seq_printf(m, " (%ld)%s",
2656 rec->flags & ~FTRACE_FL_MASK, 2902 ftrace_rec_count(rec),
2657 rec->flags & FTRACE_FL_REGS ? " R" : ""); 2903 rec->flags & FTRACE_FL_REGS ? " R" : " ");
2904 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2905 struct ftrace_ops *ops;
2906
2907 ops = ftrace_find_tramp_ops_curr(rec);
2908 if (ops && ops->trampoline)
2909 seq_printf(m, "\ttramp: %pS",
2910 (void *)ops->trampoline);
2911 else
2912 seq_printf(m, "\ttramp: ERROR!");
2913 }
2914 }
2915
2658 seq_printf(m, "\n"); 2916 seq_printf(m, "\n");
2659 2917
2660 return 0; 2918 return 0;
@@ -2702,13 +2960,6 @@ ftrace_enabled_open(struct inode *inode, struct file *file)
2702 return iter ? 0 : -ENOMEM; 2960 return iter ? 0 : -ENOMEM;
2703} 2961}
2704 2962
2705static void ftrace_filter_reset(struct ftrace_hash *hash)
2706{
2707 mutex_lock(&ftrace_lock);
2708 ftrace_hash_clear(hash);
2709 mutex_unlock(&ftrace_lock);
2710}
2711
2712/** 2963/**
2713 * ftrace_regex_open - initialize function tracer filter files 2964 * ftrace_regex_open - initialize function tracer filter files
2714 * @ops: The ftrace_ops that hold the hash filters 2965 * @ops: The ftrace_ops that hold the hash filters
@@ -2758,7 +3009,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
2758 hash = ops->filter_hash; 3009 hash = ops->filter_hash;
2759 3010
2760 if (file->f_mode & FMODE_WRITE) { 3011 if (file->f_mode & FMODE_WRITE) {
2761 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); 3012 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3013
3014 if (file->f_flags & O_TRUNC)
3015 iter->hash = alloc_ftrace_hash(size_bits);
3016 else
3017 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3018
2762 if (!iter->hash) { 3019 if (!iter->hash) {
2763 trace_parser_put(&iter->parser); 3020 trace_parser_put(&iter->parser);
2764 kfree(iter); 3021 kfree(iter);
@@ -2767,10 +3024,6 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
2767 } 3024 }
2768 } 3025 }
2769 3026
2770 if ((file->f_mode & FMODE_WRITE) &&
2771 (file->f_flags & O_TRUNC))
2772 ftrace_filter_reset(iter->hash);
2773
2774 if (file->f_mode & FMODE_READ) { 3027 if (file->f_mode & FMODE_READ) {
2775 iter->pg = ftrace_pages_start; 3028 iter->pg = ftrace_pages_start;
2776 3029
@@ -3471,14 +3724,16 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3471 else 3724 else
3472 orig_hash = &ops->notrace_hash; 3725 orig_hash = &ops->notrace_hash;
3473 3726
3474 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3727 if (reset)
3728 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
3729 else
3730 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3731
3475 if (!hash) { 3732 if (!hash) {
3476 ret = -ENOMEM; 3733 ret = -ENOMEM;
3477 goto out_regex_unlock; 3734 goto out_regex_unlock;
3478 } 3735 }
3479 3736
3480 if (reset)
3481 ftrace_filter_reset(hash);
3482 if (buf && !ftrace_match_records(hash, buf, len)) { 3737 if (buf && !ftrace_match_records(hash, buf, len)) {
3483 ret = -EINVAL; 3738 ret = -EINVAL;
3484 goto out_regex_unlock; 3739 goto out_regex_unlock;
@@ -3630,6 +3885,7 @@ __setup("ftrace_filter=", set_ftrace_filter);
3630 3885
3631#ifdef CONFIG_FUNCTION_GRAPH_TRACER 3886#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3632static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; 3887static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3888static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3633static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); 3889static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
3634 3890
3635static int __init set_graph_function(char *str) 3891static int __init set_graph_function(char *str)
@@ -3639,16 +3895,29 @@ static int __init set_graph_function(char *str)
3639} 3895}
3640__setup("ftrace_graph_filter=", set_graph_function); 3896__setup("ftrace_graph_filter=", set_graph_function);
3641 3897
3642static void __init set_ftrace_early_graph(char *buf) 3898static int __init set_graph_notrace_function(char *str)
3899{
3900 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
3901 return 1;
3902}
3903__setup("ftrace_graph_notrace=", set_graph_notrace_function);
3904
3905static void __init set_ftrace_early_graph(char *buf, int enable)
3643{ 3906{
3644 int ret; 3907 int ret;
3645 char *func; 3908 char *func;
3909 unsigned long *table = ftrace_graph_funcs;
3910 int *count = &ftrace_graph_count;
3911
3912 if (!enable) {
3913 table = ftrace_graph_notrace_funcs;
3914 count = &ftrace_graph_notrace_count;
3915 }
3646 3916
3647 while (buf) { 3917 while (buf) {
3648 func = strsep(&buf, ","); 3918 func = strsep(&buf, ",");
3649 /* we allow only one expression at a time */ 3919 /* we allow only one expression at a time */
3650 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, 3920 ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
3651 FTRACE_GRAPH_MAX_FUNCS, func);
3652 if (ret) 3921 if (ret)
3653 printk(KERN_DEBUG "ftrace: function %s not " 3922 printk(KERN_DEBUG "ftrace: function %s not "
3654 "traceable\n", func); 3923 "traceable\n", func);
@@ -3677,7 +3946,9 @@ static void __init set_ftrace_early_filters(void)
3677 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); 3946 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3678#ifdef CONFIG_FUNCTION_GRAPH_TRACER 3947#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3679 if (ftrace_graph_buf[0]) 3948 if (ftrace_graph_buf[0])
3680 set_ftrace_early_graph(ftrace_graph_buf); 3949 set_ftrace_early_graph(ftrace_graph_buf, 1);
3950 if (ftrace_graph_notrace_buf[0])
3951 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
3681#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 3952#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3682} 3953}
3683 3954
@@ -3819,7 +4090,12 @@ static int g_show(struct seq_file *m, void *v)
3819 return 0; 4090 return 0;
3820 4091
3821 if (ptr == (unsigned long *)1) { 4092 if (ptr == (unsigned long *)1) {
3822 seq_printf(m, "#### all functions enabled ####\n"); 4093 struct ftrace_graph_data *fgd = m->private;
4094
4095 if (fgd->table == ftrace_graph_funcs)
4096 seq_printf(m, "#### all functions enabled ####\n");
4097 else
4098 seq_printf(m, "#### no functions disabled ####\n");
3823 return 0; 4099 return 0;
3824 } 4100 }
3825 4101
@@ -4447,9 +4723,6 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4447 struct ftrace_ops *op; 4723 struct ftrace_ops *op;
4448 int bit; 4724 int bit;
4449 4725
4450 if (function_trace_stop)
4451 return;
4452
4453 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); 4726 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4454 if (bit < 0) 4727 if (bit < 0)
4455 return; 4728 return;
@@ -4461,9 +4734,8 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4461 preempt_disable_notrace(); 4734 preempt_disable_notrace();
4462 do_for_each_ftrace_op(op, ftrace_ops_list) { 4735 do_for_each_ftrace_op(op, ftrace_ops_list) {
4463 if (ftrace_ops_test(op, ip, regs)) { 4736 if (ftrace_ops_test(op, ip, regs)) {
4464 if (WARN_ON(!op->func)) { 4737 if (FTRACE_WARN_ON(!op->func)) {
4465 function_trace_stop = 1; 4738 pr_warn("op=%p %pS\n", op, op);
4466 printk("op=%p %pS\n", op, op);
4467 goto out; 4739 goto out;
4468 } 4740 }
4469 op->func(ip, parent_ip, op, regs); 4741 op->func(ip, parent_ip, op, regs);
@@ -5084,6 +5356,12 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5084 /* Function graph doesn't use the .func field of global_ops */ 5356 /* Function graph doesn't use the .func field of global_ops */
5085 global_ops.flags |= FTRACE_OPS_FL_STUB; 5357 global_ops.flags |= FTRACE_OPS_FL_STUB;
5086 5358
5359#ifdef CONFIG_DYNAMIC_FTRACE
5360 /* Optimize function graph calling (if implemented by arch) */
5361 if (FTRACE_GRAPH_TRAMP_ADDR != 0)
5362 global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR;
5363#endif
5364
5087 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); 5365 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
5088 5366
5089out: 5367out:
@@ -5104,6 +5382,10 @@ void unregister_ftrace_graph(void)
5104 __ftrace_graph_entry = ftrace_graph_entry_stub; 5382 __ftrace_graph_entry = ftrace_graph_entry_stub;
5105 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); 5383 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
5106 global_ops.flags &= ~FTRACE_OPS_FL_STUB; 5384 global_ops.flags &= ~FTRACE_OPS_FL_STUB;
5385#ifdef CONFIG_DYNAMIC_FTRACE
5386 if (FTRACE_GRAPH_TRAMP_ADDR != 0)
5387 global_ops.trampoline = 0;
5388#endif
5107 unregister_pm_notifier(&ftrace_suspend_notifier); 5389 unregister_pm_notifier(&ftrace_suspend_notifier);
5108 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 5390 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5109 5391
@@ -5183,9 +5465,4 @@ void ftrace_graph_exit_task(struct task_struct *t)
5183 5465
5184 kfree(ret_stack); 5466 kfree(ret_stack);
5185} 5467}
5186
5187void ftrace_graph_stop(void)
5188{
5189 ftrace_stop();
5190}
5191#endif 5468#endif
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index ff7027199a9a..925f629658d6 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1689,22 +1689,14 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1689 if (!cpu_buffer->nr_pages_to_update) 1689 if (!cpu_buffer->nr_pages_to_update)
1690 continue; 1690 continue;
1691 1691
1692 /* The update must run on the CPU that is being updated. */ 1692 /* Can't run something on an offline CPU. */
1693 preempt_disable(); 1693 if (!cpu_online(cpu)) {
1694 if (cpu == smp_processor_id() || !cpu_online(cpu)) {
1695 rb_update_pages(cpu_buffer); 1694 rb_update_pages(cpu_buffer);
1696 cpu_buffer->nr_pages_to_update = 0; 1695 cpu_buffer->nr_pages_to_update = 0;
1697 } else { 1696 } else {
1698 /*
1699 * Can not disable preemption for schedule_work_on()
1700 * on PREEMPT_RT.
1701 */
1702 preempt_enable();
1703 schedule_work_on(cpu, 1697 schedule_work_on(cpu,
1704 &cpu_buffer->update_pages_work); 1698 &cpu_buffer->update_pages_work);
1705 preempt_disable();
1706 } 1699 }
1707 preempt_enable();
1708 } 1700 }
1709 1701
1710 /* wait for all the updates to complete */ 1702 /* wait for all the updates to complete */
@@ -1742,22 +1734,14 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1742 1734
1743 get_online_cpus(); 1735 get_online_cpus();
1744 1736
1745 preempt_disable(); 1737 /* Can't run something on an offline CPU. */
1746 /* The update must run on the CPU that is being updated. */ 1738 if (!cpu_online(cpu_id))
1747 if (cpu_id == smp_processor_id() || !cpu_online(cpu_id))
1748 rb_update_pages(cpu_buffer); 1739 rb_update_pages(cpu_buffer);
1749 else { 1740 else {
1750 /*
1751 * Can not disable preemption for schedule_work_on()
1752 * on PREEMPT_RT.
1753 */
1754 preempt_enable();
1755 schedule_work_on(cpu_id, 1741 schedule_work_on(cpu_id,
1756 &cpu_buffer->update_pages_work); 1742 &cpu_buffer->update_pages_work);
1757 wait_for_completion(&cpu_buffer->update_done); 1743 wait_for_completion(&cpu_buffer->update_done);
1758 preempt_disable();
1759 } 1744 }
1760 preempt_enable();
1761 1745
1762 cpu_buffer->nr_pages_to_update = 0; 1746 cpu_buffer->nr_pages_to_update = 0;
1763 put_online_cpus(); 1747 put_online_cpus();
@@ -3775,7 +3759,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3775 if (rb_per_cpu_empty(cpu_buffer)) 3759 if (rb_per_cpu_empty(cpu_buffer))
3776 return NULL; 3760 return NULL;
3777 3761
3778 if (iter->head >= local_read(&iter->head_page->page->commit)) { 3762 if (iter->head >= rb_page_size(iter->head_page)) {
3779 rb_inc_iter(iter); 3763 rb_inc_iter(iter);
3780 goto again; 3764 goto again;
3781 } 3765 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 291397e66669..8bb80fe08767 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -937,30 +937,6 @@ out:
937 return ret; 937 return ret;
938} 938}
939 939
940ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
941{
942 int len;
943 int ret;
944
945 if (!cnt)
946 return 0;
947
948 if (s->len <= s->readpos)
949 return -EBUSY;
950
951 len = s->len - s->readpos;
952 if (cnt > len)
953 cnt = len;
954 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
955 if (ret == cnt)
956 return -EFAULT;
957
958 cnt -= ret;
959
960 s->readpos += cnt;
961 return cnt;
962}
963
964static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 940static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
965{ 941{
966 int len; 942 int len;
@@ -3699,6 +3675,7 @@ static const char readme_msg[] =
3699#endif 3675#endif
3700#ifdef CONFIG_FUNCTION_GRAPH_TRACER 3676#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3701 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" 3677 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3678 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3702 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" 3679 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3703#endif 3680#endif
3704#ifdef CONFIG_TRACER_SNAPSHOT 3681#ifdef CONFIG_TRACER_SNAPSHOT
@@ -4238,10 +4215,9 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4238} 4215}
4239 4216
4240static ssize_t 4217static ssize_t
4241tracing_max_lat_read(struct file *filp, char __user *ubuf, 4218tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4242 size_t cnt, loff_t *ppos) 4219 size_t cnt, loff_t *ppos)
4243{ 4220{
4244 unsigned long *ptr = filp->private_data;
4245 char buf[64]; 4221 char buf[64];
4246 int r; 4222 int r;
4247 4223
@@ -4253,10 +4229,9 @@ tracing_max_lat_read(struct file *filp, char __user *ubuf,
4253} 4229}
4254 4230
4255static ssize_t 4231static ssize_t
4256tracing_max_lat_write(struct file *filp, const char __user *ubuf, 4232tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4257 size_t cnt, loff_t *ppos) 4233 size_t cnt, loff_t *ppos)
4258{ 4234{
4259 unsigned long *ptr = filp->private_data;
4260 unsigned long val; 4235 unsigned long val;
4261 int ret; 4236 int ret;
4262 4237
@@ -4269,6 +4244,52 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4269 return cnt; 4244 return cnt;
4270} 4245}
4271 4246
4247static ssize_t
4248tracing_thresh_read(struct file *filp, char __user *ubuf,
4249 size_t cnt, loff_t *ppos)
4250{
4251 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4252}
4253
4254static ssize_t
4255tracing_thresh_write(struct file *filp, const char __user *ubuf,
4256 size_t cnt, loff_t *ppos)
4257{
4258 struct trace_array *tr = filp->private_data;
4259 int ret;
4260
4261 mutex_lock(&trace_types_lock);
4262 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4263 if (ret < 0)
4264 goto out;
4265
4266 if (tr->current_trace->update_thresh) {
4267 ret = tr->current_trace->update_thresh(tr);
4268 if (ret < 0)
4269 goto out;
4270 }
4271
4272 ret = cnt;
4273out:
4274 mutex_unlock(&trace_types_lock);
4275
4276 return ret;
4277}
4278
4279static ssize_t
4280tracing_max_lat_read(struct file *filp, char __user *ubuf,
4281 size_t cnt, loff_t *ppos)
4282{
4283 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4284}
4285
4286static ssize_t
4287tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4288 size_t cnt, loff_t *ppos)
4289{
4290 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4291}
4292
4272static int tracing_open_pipe(struct inode *inode, struct file *filp) 4293static int tracing_open_pipe(struct inode *inode, struct file *filp)
4273{ 4294{
4274 struct trace_array *tr = inode->i_private; 4295 struct trace_array *tr = inode->i_private;
@@ -5170,6 +5191,13 @@ static int snapshot_raw_open(struct inode *inode, struct file *filp)
5170#endif /* CONFIG_TRACER_SNAPSHOT */ 5191#endif /* CONFIG_TRACER_SNAPSHOT */
5171 5192
5172 5193
5194static const struct file_operations tracing_thresh_fops = {
5195 .open = tracing_open_generic,
5196 .read = tracing_thresh_read,
5197 .write = tracing_thresh_write,
5198 .llseek = generic_file_llseek,
5199};
5200
5173static const struct file_operations tracing_max_lat_fops = { 5201static const struct file_operations tracing_max_lat_fops = {
5174 .open = tracing_open_generic, 5202 .open = tracing_open_generic,
5175 .read = tracing_max_lat_read, 5203 .read = tracing_max_lat_read,
@@ -6107,10 +6135,8 @@ destroy_trace_option_files(struct trace_option_dentry *topts)
6107 if (!topts) 6135 if (!topts)
6108 return; 6136 return;
6109 6137
6110 for (cnt = 0; topts[cnt].opt; cnt++) { 6138 for (cnt = 0; topts[cnt].opt; cnt++)
6111 if (topts[cnt].entry) 6139 debugfs_remove(topts[cnt].entry);
6112 debugfs_remove(topts[cnt].entry);
6113 }
6114 6140
6115 kfree(topts); 6141 kfree(topts);
6116} 6142}
@@ -6533,7 +6559,7 @@ static __init int tracer_init_debugfs(void)
6533 init_tracer_debugfs(&global_trace, d_tracer); 6559 init_tracer_debugfs(&global_trace, d_tracer);
6534 6560
6535 trace_create_file("tracing_thresh", 0644, d_tracer, 6561 trace_create_file("tracing_thresh", 0644, d_tracer,
6536 &tracing_thresh, &tracing_max_lat_fops); 6562 &global_trace, &tracing_thresh_fops);
6537 6563
6538 trace_create_file("README", 0444, d_tracer, 6564 trace_create_file("README", 0444, d_tracer,
6539 NULL, &tracing_readme_fops); 6565 NULL, &tracing_readme_fops);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 9258f5a815db..385391fb1d3b 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -339,6 +339,7 @@ struct tracer_flags {
339 * @reset: called when one switches to another tracer 339 * @reset: called when one switches to another tracer
340 * @start: called when tracing is unpaused (echo 1 > tracing_enabled) 340 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
341 * @stop: called when tracing is paused (echo 0 > tracing_enabled) 341 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
342 * @update_thresh: called when tracing_thresh is updated
342 * @open: called when the trace file is opened 343 * @open: called when the trace file is opened
343 * @pipe_open: called when the trace_pipe file is opened 344 * @pipe_open: called when the trace_pipe file is opened
344 * @close: called when the trace file is released 345 * @close: called when the trace file is released
@@ -357,6 +358,7 @@ struct tracer {
357 void (*reset)(struct trace_array *tr); 358 void (*reset)(struct trace_array *tr);
358 void (*start)(struct trace_array *tr); 359 void (*start)(struct trace_array *tr);
359 void (*stop)(struct trace_array *tr); 360 void (*stop)(struct trace_array *tr);
361 int (*update_thresh)(struct trace_array *tr);
360 void (*open)(struct trace_iterator *iter); 362 void (*open)(struct trace_iterator *iter);
361 void (*pipe_open)(struct trace_iterator *iter); 363 void (*pipe_open)(struct trace_iterator *iter);
362 void (*close)(struct trace_iterator *iter); 364 void (*close)(struct trace_iterator *iter);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 2de53628689f..3154eb39241d 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -8,6 +8,8 @@
8 * 8 *
9 */ 9 */
10 10
11#define pr_fmt(fmt) fmt
12
11#include <linux/workqueue.h> 13#include <linux/workqueue.h>
12#include <linux/spinlock.h> 14#include <linux/spinlock.h>
13#include <linux/kthread.h> 15#include <linux/kthread.h>
@@ -1491,7 +1493,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
1491 1493
1492 dir->entry = debugfs_create_dir(name, parent); 1494 dir->entry = debugfs_create_dir(name, parent);
1493 if (!dir->entry) { 1495 if (!dir->entry) {
1494 pr_warning("Failed to create system directory %s\n", name); 1496 pr_warn("Failed to create system directory %s\n", name);
1495 __put_system(system); 1497 __put_system(system);
1496 goto out_free; 1498 goto out_free;
1497 } 1499 }
@@ -1507,7 +1509,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
1507 if (!entry) { 1509 if (!entry) {
1508 kfree(system->filter); 1510 kfree(system->filter);
1509 system->filter = NULL; 1511 system->filter = NULL;
1510 pr_warning("Could not create debugfs '%s/filter' entry\n", name); 1512 pr_warn("Could not create debugfs '%s/filter' entry\n", name);
1511 } 1513 }
1512 1514
1513 trace_create_file("enable", 0644, dir->entry, dir, 1515 trace_create_file("enable", 0644, dir->entry, dir,
@@ -1522,8 +1524,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
1522 out_fail: 1524 out_fail:
1523 /* Only print this message if failed on memory allocation */ 1525 /* Only print this message if failed on memory allocation */
1524 if (!dir || !system) 1526 if (!dir || !system)
1525 pr_warning("No memory to create event subsystem %s\n", 1527 pr_warn("No memory to create event subsystem %s\n", name);
1526 name);
1527 return NULL; 1528 return NULL;
1528} 1529}
1529 1530
@@ -1551,8 +1552,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1551 name = ftrace_event_name(call); 1552 name = ftrace_event_name(call);
1552 file->dir = debugfs_create_dir(name, d_events); 1553 file->dir = debugfs_create_dir(name, d_events);
1553 if (!file->dir) { 1554 if (!file->dir) {
1554 pr_warning("Could not create debugfs '%s' directory\n", 1555 pr_warn("Could not create debugfs '%s' directory\n", name);
1555 name);
1556 return -1; 1556 return -1;
1557 } 1557 }
1558 1558
@@ -1575,8 +1575,8 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1575 if (list_empty(head)) { 1575 if (list_empty(head)) {
1576 ret = call->class->define_fields(call); 1576 ret = call->class->define_fields(call);
1577 if (ret < 0) { 1577 if (ret < 0) {
1578 pr_warning("Could not initialize trace point" 1578 pr_warn("Could not initialize trace point events/%s\n",
1579 " events/%s\n", name); 1579 name);
1580 return -1; 1580 return -1;
1581 } 1581 }
1582 } 1582 }
@@ -1649,8 +1649,7 @@ static int event_init(struct ftrace_event_call *call)
1649 if (call->class->raw_init) { 1649 if (call->class->raw_init) {
1650 ret = call->class->raw_init(call); 1650 ret = call->class->raw_init(call);
1651 if (ret < 0 && ret != -ENOSYS) 1651 if (ret < 0 && ret != -ENOSYS)
1652 pr_warn("Could not initialize trace events/%s\n", 1652 pr_warn("Could not initialize trace events/%s\n", name);
1653 name);
1654 } 1653 }
1655 1654
1656 return ret; 1655 return ret;
@@ -1895,8 +1894,8 @@ __trace_add_event_dirs(struct trace_array *tr)
1895 list_for_each_entry(call, &ftrace_events, list) { 1894 list_for_each_entry(call, &ftrace_events, list) {
1896 ret = __trace_add_new_event(call, tr); 1895 ret = __trace_add_new_event(call, tr);
1897 if (ret < 0) 1896 if (ret < 0)
1898 pr_warning("Could not create directory for event %s\n", 1897 pr_warn("Could not create directory for event %s\n",
1899 ftrace_event_name(call)); 1898 ftrace_event_name(call));
1900 } 1899 }
1901} 1900}
1902 1901
@@ -2208,8 +2207,8 @@ __trace_early_add_event_dirs(struct trace_array *tr)
2208 list_for_each_entry(file, &tr->events, list) { 2207 list_for_each_entry(file, &tr->events, list) {
2209 ret = event_create_dir(tr->event_dir, file); 2208 ret = event_create_dir(tr->event_dir, file);
2210 if (ret < 0) 2209 if (ret < 0)
2211 pr_warning("Could not create directory for event %s\n", 2210 pr_warn("Could not create directory for event %s\n",
2212 ftrace_event_name(file->event_call)); 2211 ftrace_event_name(file->event_call));
2213 } 2212 }
2214} 2213}
2215 2214
@@ -2232,8 +2231,8 @@ __trace_early_add_events(struct trace_array *tr)
2232 2231
2233 ret = __trace_early_add_new_event(call, tr); 2232 ret = __trace_early_add_new_event(call, tr);
2234 if (ret < 0) 2233 if (ret < 0)
2235 pr_warning("Could not create early event %s\n", 2234 pr_warn("Could not create early event %s\n",
2236 ftrace_event_name(call)); 2235 ftrace_event_name(call));
2237 } 2236 }
2238} 2237}
2239 2238
@@ -2280,13 +2279,13 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2280 entry = debugfs_create_file("set_event", 0644, parent, 2279 entry = debugfs_create_file("set_event", 0644, parent,
2281 tr, &ftrace_set_event_fops); 2280 tr, &ftrace_set_event_fops);
2282 if (!entry) { 2281 if (!entry) {
2283 pr_warning("Could not create debugfs 'set_event' entry\n"); 2282 pr_warn("Could not create debugfs 'set_event' entry\n");
2284 return -ENOMEM; 2283 return -ENOMEM;
2285 } 2284 }
2286 2285
2287 d_events = debugfs_create_dir("events", parent); 2286 d_events = debugfs_create_dir("events", parent);
2288 if (!d_events) { 2287 if (!d_events) {
2289 pr_warning("Could not create debugfs 'events' directory\n"); 2288 pr_warn("Could not create debugfs 'events' directory\n");
2290 return -ENOMEM; 2289 return -ENOMEM;
2291 } 2290 }
2292 2291
@@ -2462,11 +2461,10 @@ static __init int event_trace_init(void)
2462 entry = debugfs_create_file("available_events", 0444, d_tracer, 2461 entry = debugfs_create_file("available_events", 0444, d_tracer,
2463 tr, &ftrace_avail_fops); 2462 tr, &ftrace_avail_fops);
2464 if (!entry) 2463 if (!entry)
2465 pr_warning("Could not create debugfs " 2464 pr_warn("Could not create debugfs 'available_events' entry\n");
2466 "'available_events' entry\n");
2467 2465
2468 if (trace_define_common_fields()) 2466 if (trace_define_common_fields())
2469 pr_warning("tracing: Failed to allocate common fields"); 2467 pr_warn("tracing: Failed to allocate common fields");
2470 2468
2471 ret = early_event_add_tracer(d_tracer, tr); 2469 ret = early_event_add_tracer(d_tracer, tr);
2472 if (ret) 2470 if (ret)
@@ -2475,7 +2473,7 @@ static __init int event_trace_init(void)
2475#ifdef CONFIG_MODULES 2473#ifdef CONFIG_MODULES
2476 ret = register_module_notifier(&trace_module_nb); 2474 ret = register_module_notifier(&trace_module_nb);
2477 if (ret) 2475 if (ret)
2478 pr_warning("Failed to register trace events module notifier\n"); 2476 pr_warn("Failed to register trace events module notifier\n");
2479#endif 2477#endif
2480 return 0; 2478 return 0;
2481} 2479}
@@ -2579,7 +2577,7 @@ static __init void event_trace_self_tests(void)
2579 * it and the self test should not be on. 2577 * it and the self test should not be on.
2580 */ 2578 */
2581 if (file->flags & FTRACE_EVENT_FL_ENABLED) { 2579 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2582 pr_warning("Enabled event during self test!\n"); 2580 pr_warn("Enabled event during self test!\n");
2583 WARN_ON_ONCE(1); 2581 WARN_ON_ONCE(1);
2584 continue; 2582 continue;
2585 } 2583 }
@@ -2607,8 +2605,8 @@ static __init void event_trace_self_tests(void)
2607 2605
2608 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1); 2606 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2609 if (WARN_ON_ONCE(ret)) { 2607 if (WARN_ON_ONCE(ret)) {
2610 pr_warning("error enabling system %s\n", 2608 pr_warn("error enabling system %s\n",
2611 system->name); 2609 system->name);
2612 continue; 2610 continue;
2613 } 2611 }
2614 2612
@@ -2616,8 +2614,8 @@ static __init void event_trace_self_tests(void)
2616 2614
2617 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0); 2615 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2618 if (WARN_ON_ONCE(ret)) { 2616 if (WARN_ON_ONCE(ret)) {
2619 pr_warning("error disabling system %s\n", 2617 pr_warn("error disabling system %s\n",
2620 system->name); 2618 system->name);
2621 continue; 2619 continue;
2622 } 2620 }
2623 2621
@@ -2631,7 +2629,7 @@ static __init void event_trace_self_tests(void)
2631 2629
2632 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1); 2630 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2633 if (WARN_ON_ONCE(ret)) { 2631 if (WARN_ON_ONCE(ret)) {
2634 pr_warning("error enabling all events\n"); 2632 pr_warn("error enabling all events\n");
2635 return; 2633 return;
2636 } 2634 }
2637 2635
@@ -2640,7 +2638,7 @@ static __init void event_trace_self_tests(void)
2640 /* reset sysname */ 2638 /* reset sysname */
2641 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); 2639 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2642 if (WARN_ON_ONCE(ret)) { 2640 if (WARN_ON_ONCE(ret)) {
2643 pr_warning("error disabling all events\n"); 2641 pr_warn("error disabling all events\n");
2644 return; 2642 return;
2645 } 2643 }
2646 2644
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 4de3e57f723c..f0a0c982cde3 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -15,6 +15,33 @@
15#include "trace.h" 15#include "trace.h"
16#include "trace_output.h" 16#include "trace_output.h"
17 17
18static bool kill_ftrace_graph;
19
20/**
21 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
22 *
23 * ftrace_graph_stop() is called when a severe error is detected in
24 * the function graph tracing. This function is called by the critical
25 * paths of function graph to keep those paths from doing any more harm.
26 */
27bool ftrace_graph_is_dead(void)
28{
29 return kill_ftrace_graph;
30}
31
32/**
33 * ftrace_graph_stop - set to permanently disable function graph tracincg
34 *
35 * In case of an error int function graph tracing, this is called
36 * to try to keep function graph tracing from causing any more harm.
37 * Usually this is pretty severe and this is called to try to at least
38 * get a warning out to the user.
39 */
40void ftrace_graph_stop(void)
41{
42 kill_ftrace_graph = true;
43}
44
18/* When set, irq functions will be ignored */ 45/* When set, irq functions will be ignored */
19static int ftrace_graph_skip_irqs; 46static int ftrace_graph_skip_irqs;
20 47
@@ -92,6 +119,9 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
92 unsigned long long calltime; 119 unsigned long long calltime;
93 int index; 120 int index;
94 121
122 if (unlikely(ftrace_graph_is_dead()))
123 return -EBUSY;
124
95 if (!current->ret_stack) 125 if (!current->ret_stack)
96 return -EBUSY; 126 return -EBUSY;
97 127
@@ -323,7 +353,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
323 return ret; 353 return ret;
324} 354}
325 355
326int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) 356static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
327{ 357{
328 if (tracing_thresh) 358 if (tracing_thresh)
329 return 1; 359 return 1;
@@ -412,7 +442,7 @@ void set_graph_array(struct trace_array *tr)
412 smp_mb(); 442 smp_mb();
413} 443}
414 444
415void trace_graph_thresh_return(struct ftrace_graph_ret *trace) 445static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
416{ 446{
417 if (tracing_thresh && 447 if (tracing_thresh &&
418 (trace->rettime - trace->calltime < tracing_thresh)) 448 (trace->rettime - trace->calltime < tracing_thresh))
@@ -445,6 +475,12 @@ static void graph_trace_reset(struct trace_array *tr)
445 unregister_ftrace_graph(); 475 unregister_ftrace_graph();
446} 476}
447 477
478static int graph_trace_update_thresh(struct trace_array *tr)
479{
480 graph_trace_reset(tr);
481 return graph_trace_init(tr);
482}
483
448static int max_bytes_for_cpu; 484static int max_bytes_for_cpu;
449 485
450static enum print_line_t 486static enum print_line_t
@@ -1399,7 +1435,7 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1399 seq_printf(s, " | | | |\n"); 1435 seq_printf(s, " | | | |\n");
1400} 1436}
1401 1437
1402void print_graph_headers(struct seq_file *s) 1438static void print_graph_headers(struct seq_file *s)
1403{ 1439{
1404 print_graph_headers_flags(s, tracer_flags.val); 1440 print_graph_headers_flags(s, tracer_flags.val);
1405} 1441}
@@ -1495,6 +1531,7 @@ static struct trace_event graph_trace_ret_event = {
1495 1531
1496static struct tracer graph_trace __tracer_data = { 1532static struct tracer graph_trace __tracer_data = {
1497 .name = "function_graph", 1533 .name = "function_graph",
1534 .update_thresh = graph_trace_update_thresh,
1498 .open = graph_trace_open, 1535 .open = graph_trace_open,
1499 .pipe_open = graph_trace_open, 1536 .pipe_open = graph_trace_open,
1500 .close = graph_trace_close, 1537 .close = graph_trace_close,
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index f3dad80c20b2..c6977d5a9b12 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -20,23 +20,6 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
20 20
21static int next_event_type = __TRACE_LAST_TYPE + 1; 21static int next_event_type = __TRACE_LAST_TYPE + 1;
22 22
23int trace_print_seq(struct seq_file *m, struct trace_seq *s)
24{
25 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
26 int ret;
27
28 ret = seq_write(m, s->buffer, len);
29
30 /*
31 * Only reset this buffer if we successfully wrote to the
32 * seq_file buffer.
33 */
34 if (!ret)
35 trace_seq_init(s);
36
37 return ret;
38}
39
40enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter) 23enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
41{ 24{
42 struct trace_seq *s = &iter->seq; 25 struct trace_seq *s = &iter->seq;
@@ -85,257 +68,6 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
85 return TRACE_TYPE_HANDLED; 68 return TRACE_TYPE_HANDLED;
86} 69}
87 70
88/**
89 * trace_seq_printf - sequence printing of trace information
90 * @s: trace sequence descriptor
91 * @fmt: printf format string
92 *
93 * It returns 0 if the trace oversizes the buffer's free
94 * space, 1 otherwise.
95 *
96 * The tracer may use either sequence operations or its own
97 * copy to user routines. To simplify formating of a trace
98 * trace_seq_printf is used to store strings into a special
99 * buffer (@s). Then the output may be either used by
100 * the sequencer or pulled into another buffer.
101 */
102int
103trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
104{
105 int len = (PAGE_SIZE - 1) - s->len;
106 va_list ap;
107 int ret;
108
109 if (s->full || !len)
110 return 0;
111
112 va_start(ap, fmt);
113 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
114 va_end(ap);
115
116 /* If we can't write it all, don't bother writing anything */
117 if (ret >= len) {
118 s->full = 1;
119 return 0;
120 }
121
122 s->len += ret;
123
124 return 1;
125}
126EXPORT_SYMBOL_GPL(trace_seq_printf);
127
128/**
129 * trace_seq_bitmask - put a list of longs as a bitmask print output
130 * @s: trace sequence descriptor
131 * @maskp: points to an array of unsigned longs that represent a bitmask
132 * @nmaskbits: The number of bits that are valid in @maskp
133 *
134 * It returns 0 if the trace oversizes the buffer's free
135 * space, 1 otherwise.
136 *
137 * Writes a ASCII representation of a bitmask string into @s.
138 */
139int
140trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
141 int nmaskbits)
142{
143 int len = (PAGE_SIZE - 1) - s->len;
144 int ret;
145
146 if (s->full || !len)
147 return 0;
148
149 ret = bitmap_scnprintf(s->buffer, len, maskp, nmaskbits);
150 s->len += ret;
151
152 return 1;
153}
154EXPORT_SYMBOL_GPL(trace_seq_bitmask);
155
156/**
157 * trace_seq_vprintf - sequence printing of trace information
158 * @s: trace sequence descriptor
159 * @fmt: printf format string
160 *
161 * The tracer may use either sequence operations or its own
162 * copy to user routines. To simplify formating of a trace
163 * trace_seq_printf is used to store strings into a special
164 * buffer (@s). Then the output may be either used by
165 * the sequencer or pulled into another buffer.
166 */
167int
168trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
169{
170 int len = (PAGE_SIZE - 1) - s->len;
171 int ret;
172
173 if (s->full || !len)
174 return 0;
175
176 ret = vsnprintf(s->buffer + s->len, len, fmt, args);
177
178 /* If we can't write it all, don't bother writing anything */
179 if (ret >= len) {
180 s->full = 1;
181 return 0;
182 }
183
184 s->len += ret;
185
186 return len;
187}
188EXPORT_SYMBOL_GPL(trace_seq_vprintf);
189
190int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
191{
192 int len = (PAGE_SIZE - 1) - s->len;
193 int ret;
194
195 if (s->full || !len)
196 return 0;
197
198 ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
199
200 /* If we can't write it all, don't bother writing anything */
201 if (ret >= len) {
202 s->full = 1;
203 return 0;
204 }
205
206 s->len += ret;
207
208 return len;
209}
210
211/**
212 * trace_seq_puts - trace sequence printing of simple string
213 * @s: trace sequence descriptor
214 * @str: simple string to record
215 *
216 * The tracer may use either the sequence operations or its own
217 * copy to user routines. This function records a simple string
218 * into a special buffer (@s) for later retrieval by a sequencer
219 * or other mechanism.
220 */
221int trace_seq_puts(struct trace_seq *s, const char *str)
222{
223 int len = strlen(str);
224
225 if (s->full)
226 return 0;
227
228 if (len > ((PAGE_SIZE - 1) - s->len)) {
229 s->full = 1;
230 return 0;
231 }
232
233 memcpy(s->buffer + s->len, str, len);
234 s->len += len;
235
236 return len;
237}
238
239int trace_seq_putc(struct trace_seq *s, unsigned char c)
240{
241 if (s->full)
242 return 0;
243
244 if (s->len >= (PAGE_SIZE - 1)) {
245 s->full = 1;
246 return 0;
247 }
248
249 s->buffer[s->len++] = c;
250
251 return 1;
252}
253EXPORT_SYMBOL(trace_seq_putc);
254
255int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
256{
257 if (s->full)
258 return 0;
259
260 if (len > ((PAGE_SIZE - 1) - s->len)) {
261 s->full = 1;
262 return 0;
263 }
264
265 memcpy(s->buffer + s->len, mem, len);
266 s->len += len;
267
268 return len;
269}
270
271int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
272{
273 unsigned char hex[HEX_CHARS];
274 const unsigned char *data = mem;
275 int i, j;
276
277 if (s->full)
278 return 0;
279
280#ifdef __BIG_ENDIAN
281 for (i = 0, j = 0; i < len; i++) {
282#else
283 for (i = len-1, j = 0; i >= 0; i--) {
284#endif
285 hex[j++] = hex_asc_hi(data[i]);
286 hex[j++] = hex_asc_lo(data[i]);
287 }
288 hex[j++] = ' ';
289
290 return trace_seq_putmem(s, hex, j);
291}
292
293void *trace_seq_reserve(struct trace_seq *s, size_t len)
294{
295 void *ret;
296
297 if (s->full)
298 return NULL;
299
300 if (len > ((PAGE_SIZE - 1) - s->len)) {
301 s->full = 1;
302 return NULL;
303 }
304
305 ret = s->buffer + s->len;
306 s->len += len;
307
308 return ret;
309}
310
311int trace_seq_path(struct trace_seq *s, const struct path *path)
312{
313 unsigned char *p;
314
315 if (s->full)
316 return 0;
317
318 if (s->len >= (PAGE_SIZE - 1)) {
319 s->full = 1;
320 return 0;
321 }
322
323 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
324 if (!IS_ERR(p)) {
325 p = mangle_path(s->buffer + s->len, p, "\n");
326 if (p) {
327 s->len = p - s->buffer;
328 return 1;
329 }
330 } else {
331 s->buffer[s->len++] = '?';
332 return 1;
333 }
334
335 s->full = 1;
336 return 0;
337}
338
339const char * 71const char *
340ftrace_print_flags_seq(struct trace_seq *p, const char *delim, 72ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
341 unsigned long flags, 73 unsigned long flags,
@@ -343,7 +75,7 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
343{ 75{
344 unsigned long mask; 76 unsigned long mask;
345 const char *str; 77 const char *str;
346 const char *ret = p->buffer + p->len; 78 const char *ret = trace_seq_buffer_ptr(p);
347 int i, first = 1; 79 int i, first = 1;
348 80
349 for (i = 0; flag_array[i].name && flags; i++) { 81 for (i = 0; flag_array[i].name && flags; i++) {
@@ -379,7 +111,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
379 const struct trace_print_flags *symbol_array) 111 const struct trace_print_flags *symbol_array)
380{ 112{
381 int i; 113 int i;
382 const char *ret = p->buffer + p->len; 114 const char *ret = trace_seq_buffer_ptr(p);
383 115
384 for (i = 0; symbol_array[i].name; i++) { 116 for (i = 0; symbol_array[i].name; i++) {
385 117
@@ -390,7 +122,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
390 break; 122 break;
391 } 123 }
392 124
393 if (ret == (const char *)(p->buffer + p->len)) 125 if (ret == (const char *)(trace_seq_buffer_ptr(p)))
394 trace_seq_printf(p, "0x%lx", val); 126 trace_seq_printf(p, "0x%lx", val);
395 127
396 trace_seq_putc(p, 0); 128 trace_seq_putc(p, 0);
@@ -405,7 +137,7 @@ ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
405 const struct trace_print_flags_u64 *symbol_array) 137 const struct trace_print_flags_u64 *symbol_array)
406{ 138{
407 int i; 139 int i;
408 const char *ret = p->buffer + p->len; 140 const char *ret = trace_seq_buffer_ptr(p);
409 141
410 for (i = 0; symbol_array[i].name; i++) { 142 for (i = 0; symbol_array[i].name; i++) {
411 143
@@ -416,7 +148,7 @@ ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
416 break; 148 break;
417 } 149 }
418 150
419 if (ret == (const char *)(p->buffer + p->len)) 151 if (ret == (const char *)(trace_seq_buffer_ptr(p)))
420 trace_seq_printf(p, "0x%llx", val); 152 trace_seq_printf(p, "0x%llx", val);
421 153
422 trace_seq_putc(p, 0); 154 trace_seq_putc(p, 0);
@@ -430,7 +162,7 @@ const char *
430ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, 162ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
431 unsigned int bitmask_size) 163 unsigned int bitmask_size)
432{ 164{
433 const char *ret = p->buffer + p->len; 165 const char *ret = trace_seq_buffer_ptr(p);
434 166
435 trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8); 167 trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
436 trace_seq_putc(p, 0); 168 trace_seq_putc(p, 0);
@@ -443,7 +175,7 @@ const char *
443ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) 175ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
444{ 176{
445 int i; 177 int i;
446 const char *ret = p->buffer + p->len; 178 const char *ret = trace_seq_buffer_ptr(p);
447 179
448 for (i = 0; i < buf_len; i++) 180 for (i = 0; i < buf_len; i++)
449 trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]); 181 trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]);
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
index 127a9d8c8357..80b25b585a70 100644
--- a/kernel/trace/trace_output.h
+++ b/kernel/trace/trace_output.h
@@ -35,9 +35,6 @@ trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry);
35extern int __unregister_ftrace_event(struct trace_event *event); 35extern int __unregister_ftrace_event(struct trace_event *event);
36extern struct rw_semaphore trace_event_sem; 36extern struct rw_semaphore trace_event_sem;
37 37
38#define MAX_MEMHEX_BYTES 8
39#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
40
41#define SEQ_PUT_FIELD_RET(s, x) \ 38#define SEQ_PUT_FIELD_RET(s, x) \
42do { \ 39do { \
43 if (!trace_seq_putmem(s, &(x), sizeof(x))) \ 40 if (!trace_seq_putmem(s, &(x), sizeof(x))) \
@@ -46,7 +43,6 @@ do { \
46 43
47#define SEQ_PUT_HEX_FIELD_RET(s, x) \ 44#define SEQ_PUT_HEX_FIELD_RET(s, x) \
48do { \ 45do { \
49 BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \
50 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ 46 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
51 return TRACE_TYPE_PARTIAL_LINE; \ 47 return TRACE_TYPE_PARTIAL_LINE; \
52} while (0) 48} while (0)
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
new file mode 100644
index 000000000000..1f24ed99dca2
--- /dev/null
+++ b/kernel/trace/trace_seq.c
@@ -0,0 +1,428 @@
1/*
2 * trace_seq.c
3 *
4 * Copyright (C) 2008-2014 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 * The trace_seq is a handy tool that allows you to pass a descriptor around
7 * to a buffer that other functions can write to. It is similar to the
8 * seq_file functionality but has some differences.
9 *
10 * To use it, the trace_seq must be initialized with trace_seq_init().
11 * This will set up the counters within the descriptor. You can call
12 * trace_seq_init() more than once to reset the trace_seq to start
13 * from scratch.
14 *
15 * The buffer size is currently PAGE_SIZE, although it may become dynamic
16 * in the future.
17 *
18 * A write to the buffer will either succed or fail. That is, unlike
19 * sprintf() there will not be a partial write (well it may write into
20 * the buffer but it wont update the pointers). This allows users to
21 * try to write something into the trace_seq buffer and if it fails
22 * they can flush it and try again.
23 *
24 */
25#include <linux/uaccess.h>
26#include <linux/seq_file.h>
27#include <linux/trace_seq.h>
28
29/* How much buffer is left on the trace_seq? */
30#define TRACE_SEQ_BUF_LEFT(s) ((PAGE_SIZE - 1) - (s)->len)
31
32/* How much buffer is written? */
33#define TRACE_SEQ_BUF_USED(s) min((s)->len, (unsigned int)(PAGE_SIZE - 1))
34
35/**
36 * trace_print_seq - move the contents of trace_seq into a seq_file
37 * @m: the seq_file descriptor that is the destination
38 * @s: the trace_seq descriptor that is the source.
39 *
40 * Returns 0 on success and non zero on error. If it succeeds to
41 * write to the seq_file it will reset the trace_seq, otherwise
42 * it does not modify the trace_seq to let the caller try again.
43 */
44int trace_print_seq(struct seq_file *m, struct trace_seq *s)
45{
46 unsigned int len = TRACE_SEQ_BUF_USED(s);
47 int ret;
48
49 ret = seq_write(m, s->buffer, len);
50
51 /*
52 * Only reset this buffer if we successfully wrote to the
53 * seq_file buffer. This lets the caller try again or
54 * do something else with the contents.
55 */
56 if (!ret)
57 trace_seq_init(s);
58
59 return ret;
60}
61
62/**
63 * trace_seq_printf - sequence printing of trace information
64 * @s: trace sequence descriptor
65 * @fmt: printf format string
66 *
67 * The tracer may use either sequence operations or its own
68 * copy to user routines. To simplify formating of a trace
69 * trace_seq_printf() is used to store strings into a special
70 * buffer (@s). Then the output may be either used by
71 * the sequencer or pulled into another buffer.
72 *
73 * Returns 1 if we successfully written all the contents to
74 * the buffer.
75 * Returns 0 if we the length to write is bigger than the
76 * reserved buffer space. In this case, nothing gets written.
77 */
78int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
79{
80 unsigned int len = TRACE_SEQ_BUF_LEFT(s);
81 va_list ap;
82 int ret;
83
84 if (s->full || !len)
85 return 0;
86
87 va_start(ap, fmt);
88 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
89 va_end(ap);
90
91 /* If we can't write it all, don't bother writing anything */
92 if (ret >= len) {
93 s->full = 1;
94 return 0;
95 }
96
97 s->len += ret;
98
99 return 1;
100}
101EXPORT_SYMBOL_GPL(trace_seq_printf);
102
103/**
104 * trace_seq_bitmask - write a bitmask array in its ASCII representation
105 * @s: trace sequence descriptor
106 * @maskp: points to an array of unsigned longs that represent a bitmask
107 * @nmaskbits: The number of bits that are valid in @maskp
108 *
109 * Writes a ASCII representation of a bitmask string into @s.
110 *
111 * Returns 1 if we successfully written all the contents to
112 * the buffer.
113 * Returns 0 if we the length to write is bigger than the
114 * reserved buffer space. In this case, nothing gets written.
115 */
116int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
117 int nmaskbits)
118{
119 unsigned int len = TRACE_SEQ_BUF_LEFT(s);
120 int ret;
121
122 if (s->full || !len)
123 return 0;
124
125 ret = bitmap_scnprintf(s->buffer, len, maskp, nmaskbits);
126 s->len += ret;
127
128 return 1;
129}
130EXPORT_SYMBOL_GPL(trace_seq_bitmask);
131
132/**
133 * trace_seq_vprintf - sequence printing of trace information
134 * @s: trace sequence descriptor
135 * @fmt: printf format string
136 *
137 * The tracer may use either sequence operations or its own
138 * copy to user routines. To simplify formating of a trace
139 * trace_seq_printf is used to store strings into a special
140 * buffer (@s). Then the output may be either used by
141 * the sequencer or pulled into another buffer.
142 *
143 * Returns how much it wrote to the buffer.
144 */
145int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
146{
147 unsigned int len = TRACE_SEQ_BUF_LEFT(s);
148 int ret;
149
150 if (s->full || !len)
151 return 0;
152
153 ret = vsnprintf(s->buffer + s->len, len, fmt, args);
154
155 /* If we can't write it all, don't bother writing anything */
156 if (ret >= len) {
157 s->full = 1;
158 return 0;
159 }
160
161 s->len += ret;
162
163 return len;
164}
165EXPORT_SYMBOL_GPL(trace_seq_vprintf);
166
167/**
168 * trace_seq_bprintf - Write the printf string from binary arguments
169 * @s: trace sequence descriptor
170 * @fmt: The format string for the @binary arguments
171 * @binary: The binary arguments for @fmt.
172 *
173 * When recording in a fast path, a printf may be recorded with just
174 * saving the format and the arguments as they were passed to the
175 * function, instead of wasting cycles converting the arguments into
176 * ASCII characters. Instead, the arguments are saved in a 32 bit
177 * word array that is defined by the format string constraints.
178 *
179 * This function will take the format and the binary array and finish
180 * the conversion into the ASCII string within the buffer.
181 *
182 * Returns how much it wrote to the buffer.
183 */
184int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
185{
186 unsigned int len = TRACE_SEQ_BUF_LEFT(s);
187 int ret;
188
189 if (s->full || !len)
190 return 0;
191
192 ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
193
194 /* If we can't write it all, don't bother writing anything */
195 if (ret >= len) {
196 s->full = 1;
197 return 0;
198 }
199
200 s->len += ret;
201
202 return len;
203}
204EXPORT_SYMBOL_GPL(trace_seq_bprintf);
205
206/**
207 * trace_seq_puts - trace sequence printing of simple string
208 * @s: trace sequence descriptor
209 * @str: simple string to record
210 *
211 * The tracer may use either the sequence operations or its own
212 * copy to user routines. This function records a simple string
213 * into a special buffer (@s) for later retrieval by a sequencer
214 * or other mechanism.
215 *
216 * Returns how much it wrote to the buffer.
217 */
218int trace_seq_puts(struct trace_seq *s, const char *str)
219{
220 unsigned int len = strlen(str);
221
222 if (s->full)
223 return 0;
224
225 if (len > TRACE_SEQ_BUF_LEFT(s)) {
226 s->full = 1;
227 return 0;
228 }
229
230 memcpy(s->buffer + s->len, str, len);
231 s->len += len;
232
233 return len;
234}
235EXPORT_SYMBOL_GPL(trace_seq_puts);
236
237/**
238 * trace_seq_putc - trace sequence printing of simple character
239 * @s: trace sequence descriptor
240 * @c: simple character to record
241 *
242 * The tracer may use either the sequence operations or its own
243 * copy to user routines. This function records a simple charater
244 * into a special buffer (@s) for later retrieval by a sequencer
245 * or other mechanism.
246 *
247 * Returns how much it wrote to the buffer.
248 */
249int trace_seq_putc(struct trace_seq *s, unsigned char c)
250{
251 if (s->full)
252 return 0;
253
254 if (TRACE_SEQ_BUF_LEFT(s) < 1) {
255 s->full = 1;
256 return 0;
257 }
258
259 s->buffer[s->len++] = c;
260
261 return 1;
262}
263EXPORT_SYMBOL_GPL(trace_seq_putc);
264
265/**
266 * trace_seq_putmem - write raw data into the trace_seq buffer
267 * @s: trace sequence descriptor
268 * @mem: The raw memory to copy into the buffer
269 * @len: The length of the raw memory to copy (in bytes)
270 *
271 * There may be cases where raw memory needs to be written into the
272 * buffer and a strcpy() would not work. Using this function allows
273 * for such cases.
274 *
275 * Returns how much it wrote to the buffer.
276 */
277int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len)
278{
279 if (s->full)
280 return 0;
281
282 if (len > TRACE_SEQ_BUF_LEFT(s)) {
283 s->full = 1;
284 return 0;
285 }
286
287 memcpy(s->buffer + s->len, mem, len);
288 s->len += len;
289
290 return len;
291}
292EXPORT_SYMBOL_GPL(trace_seq_putmem);
293
294#define MAX_MEMHEX_BYTES 8U
295#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
296
297/**
298 * trace_seq_putmem_hex - write raw memory into the buffer in ASCII hex
299 * @s: trace sequence descriptor
300 * @mem: The raw memory to write its hex ASCII representation of
301 * @len: The length of the raw memory to copy (in bytes)
302 *
303 * This is similar to trace_seq_putmem() except instead of just copying the
304 * raw memory into the buffer it writes its ASCII representation of it
305 * in hex characters.
306 *
307 * Returns how much it wrote to the buffer.
308 */
309int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
310 unsigned int len)
311{
312 unsigned char hex[HEX_CHARS];
313 const unsigned char *data = mem;
314 unsigned int start_len;
315 int i, j;
316 int cnt = 0;
317
318 if (s->full)
319 return 0;
320
321 while (len) {
322 start_len = min(len, HEX_CHARS - 1);
323#ifdef __BIG_ENDIAN
324 for (i = 0, j = 0; i < start_len; i++) {
325#else
326 for (i = start_len-1, j = 0; i >= 0; i--) {
327#endif
328 hex[j++] = hex_asc_hi(data[i]);
329 hex[j++] = hex_asc_lo(data[i]);
330 }
331 if (WARN_ON_ONCE(j == 0 || j/2 > len))
332 break;
333
334 /* j increments twice per loop */
335 len -= j / 2;
336 hex[j++] = ' ';
337
338 cnt += trace_seq_putmem(s, hex, j);
339 }
340 return cnt;
341}
342EXPORT_SYMBOL_GPL(trace_seq_putmem_hex);
343
344/**
345 * trace_seq_path - copy a path into the sequence buffer
346 * @s: trace sequence descriptor
347 * @path: path to write into the sequence buffer.
348 *
349 * Write a path name into the sequence buffer.
350 *
351 * Returns 1 if we successfully written all the contents to
352 * the buffer.
353 * Returns 0 if we the length to write is bigger than the
354 * reserved buffer space. In this case, nothing gets written.
355 */
356int trace_seq_path(struct trace_seq *s, const struct path *path)
357{
358 unsigned char *p;
359
360 if (s->full)
361 return 0;
362
363 if (TRACE_SEQ_BUF_LEFT(s) < 1) {
364 s->full = 1;
365 return 0;
366 }
367
368 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
369 if (!IS_ERR(p)) {
370 p = mangle_path(s->buffer + s->len, p, "\n");
371 if (p) {
372 s->len = p - s->buffer;
373 return 1;
374 }
375 } else {
376 s->buffer[s->len++] = '?';
377 return 1;
378 }
379
380 s->full = 1;
381 return 0;
382}
383EXPORT_SYMBOL_GPL(trace_seq_path);
384
385/**
386 * trace_seq_to_user - copy the squence buffer to user space
387 * @s: trace sequence descriptor
388 * @ubuf: The userspace memory location to copy to
389 * @cnt: The amount to copy
390 *
391 * Copies the sequence buffer into the userspace memory pointed to
392 * by @ubuf. It starts from the last read position (@s->readpos)
393 * and writes up to @cnt characters or till it reaches the end of
394 * the content in the buffer (@s->len), which ever comes first.
395 *
396 * On success, it returns a positive number of the number of bytes
397 * it copied.
398 *
399 * On failure it returns -EBUSY if all of the content in the
400 * sequence has been already read, which includes nothing in the
401 * sequenc (@s->len == @s->readpos).
402 *
403 * Returns -EFAULT if the copy to userspace fails.
404 */
405int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt)
406{
407 int len;
408 int ret;
409
410 if (!cnt)
411 return 0;
412
413 if (s->len <= s->readpos)
414 return -EBUSY;
415
416 len = s->len - s->readpos;
417 if (cnt > len)
418 cnt = len;
419 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
420 if (ret == cnt)
421 return -EFAULT;
422
423 cnt -= ret;
424
425 s->readpos += cnt;
426 return cnt;
427}
428EXPORT_SYMBOL_GPL(trace_seq_to_user);