aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/power/disk.c13
-rw-r--r--kernel/power/main.c5
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/trace/Kconfig19
-rw-r--r--kernel/trace/ftrace.c101
-rw-r--r--kernel/trace/ring_buffer.c79
-rw-r--r--kernel/trace/trace.c15
-rw-r--r--kernel/trace/trace_branch.c74
-rw-r--r--kernel/trace/trace_mmiotrace.c16
11 files changed, 260 insertions, 67 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 35c8ec2ba03a..e5ae36ebe8af 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1127,7 +1127,6 @@ NORET_TYPE void do_exit(long code)
1127 preempt_disable(); 1127 preempt_disable();
1128 /* causes final put_task_struct in finish_task_switch(). */ 1128 /* causes final put_task_struct in finish_task_switch(). */
1129 tsk->state = TASK_DEAD; 1129 tsk->state = TASK_DEAD;
1130
1131 schedule(); 1130 schedule();
1132 BUG(); 1131 BUG();
1133 /* Avoid "noreturn function does return". */ 1132 /* Avoid "noreturn function does return". */
diff --git a/kernel/fork.c b/kernel/fork.c
index ac62f43ee430..d6e1a3205f62 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -47,6 +47,7 @@
47#include <linux/mount.h> 47#include <linux/mount.h>
48#include <linux/audit.h> 48#include <linux/audit.h>
49#include <linux/memcontrol.h> 49#include <linux/memcontrol.h>
50#include <linux/ftrace.h>
50#include <linux/profile.h> 51#include <linux/profile.h>
51#include <linux/rmap.h> 52#include <linux/rmap.h>
52#include <linux/acct.h> 53#include <linux/acct.h>
@@ -139,6 +140,7 @@ void free_task(struct task_struct *tsk)
139 prop_local_destroy_single(&tsk->dirties); 140 prop_local_destroy_single(&tsk->dirties);
140 free_thread_info(tsk->stack); 141 free_thread_info(tsk->stack);
141 rt_mutex_debug_task_free(tsk); 142 rt_mutex_debug_task_free(tsk);
143 ftrace_retfunc_exit_task(tsk);
142 free_task_struct(tsk); 144 free_task_struct(tsk);
143} 145}
144EXPORT_SYMBOL(free_task); 146EXPORT_SYMBOL(free_task);
@@ -1269,6 +1271,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1269 total_forks++; 1271 total_forks++;
1270 spin_unlock(&current->sighand->siglock); 1272 spin_unlock(&current->sighand->siglock);
1271 write_unlock_irq(&tasklist_lock); 1273 write_unlock_irq(&tasklist_lock);
1274 ftrace_retfunc_init_task(p);
1272 proc_fork_connector(p); 1275 proc_fork_connector(p);
1273 cgroup_post_fork(p); 1276 cgroup_post_fork(p);
1274 return p; 1277 return p;
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index c9d74083746f..f77d3819ef57 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -22,7 +22,6 @@
22#include <linux/console.h> 22#include <linux/console.h>
23#include <linux/cpu.h> 23#include <linux/cpu.h>
24#include <linux/freezer.h> 24#include <linux/freezer.h>
25#include <linux/ftrace.h>
26 25
27#include "power.h" 26#include "power.h"
28 27
@@ -257,7 +256,7 @@ static int create_image(int platform_mode)
257 256
258int hibernation_snapshot(int platform_mode) 257int hibernation_snapshot(int platform_mode)
259{ 258{
260 int error, ftrace_save; 259 int error;
261 260
262 /* Free memory before shutting down devices. */ 261 /* Free memory before shutting down devices. */
263 error = swsusp_shrink_memory(); 262 error = swsusp_shrink_memory();
@@ -269,7 +268,6 @@ int hibernation_snapshot(int platform_mode)
269 goto Close; 268 goto Close;
270 269
271 suspend_console(); 270 suspend_console();
272 ftrace_save = __ftrace_enabled_save();
273 error = device_suspend(PMSG_FREEZE); 271 error = device_suspend(PMSG_FREEZE);
274 if (error) 272 if (error)
275 goto Recover_platform; 273 goto Recover_platform;
@@ -299,7 +297,6 @@ int hibernation_snapshot(int platform_mode)
299 Resume_devices: 297 Resume_devices:
300 device_resume(in_suspend ? 298 device_resume(in_suspend ?
301 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); 299 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
302 __ftrace_enabled_restore(ftrace_save);
303 resume_console(); 300 resume_console();
304 Close: 301 Close:
305 platform_end(platform_mode); 302 platform_end(platform_mode);
@@ -370,11 +367,10 @@ static int resume_target_kernel(void)
370 367
371int hibernation_restore(int platform_mode) 368int hibernation_restore(int platform_mode)
372{ 369{
373 int error, ftrace_save; 370 int error;
374 371
375 pm_prepare_console(); 372 pm_prepare_console();
376 suspend_console(); 373 suspend_console();
377 ftrace_save = __ftrace_enabled_save();
378 error = device_suspend(PMSG_QUIESCE); 374 error = device_suspend(PMSG_QUIESCE);
379 if (error) 375 if (error)
380 goto Finish; 376 goto Finish;
@@ -389,7 +385,6 @@ int hibernation_restore(int platform_mode)
389 platform_restore_cleanup(platform_mode); 385 platform_restore_cleanup(platform_mode);
390 device_resume(PMSG_RECOVER); 386 device_resume(PMSG_RECOVER);
391 Finish: 387 Finish:
392 __ftrace_enabled_restore(ftrace_save);
393 resume_console(); 388 resume_console();
394 pm_restore_console(); 389 pm_restore_console();
395 return error; 390 return error;
@@ -402,7 +397,7 @@ int hibernation_restore(int platform_mode)
402 397
403int hibernation_platform_enter(void) 398int hibernation_platform_enter(void)
404{ 399{
405 int error, ftrace_save; 400 int error;
406 401
407 if (!hibernation_ops) 402 if (!hibernation_ops)
408 return -ENOSYS; 403 return -ENOSYS;
@@ -417,7 +412,6 @@ int hibernation_platform_enter(void)
417 goto Close; 412 goto Close;
418 413
419 suspend_console(); 414 suspend_console();
420 ftrace_save = __ftrace_enabled_save();
421 error = device_suspend(PMSG_HIBERNATE); 415 error = device_suspend(PMSG_HIBERNATE);
422 if (error) { 416 if (error) {
423 if (hibernation_ops->recover) 417 if (hibernation_ops->recover)
@@ -452,7 +446,6 @@ int hibernation_platform_enter(void)
452 hibernation_ops->finish(); 446 hibernation_ops->finish();
453 Resume_devices: 447 Resume_devices:
454 device_resume(PMSG_RESTORE); 448 device_resume(PMSG_RESTORE);
455 __ftrace_enabled_restore(ftrace_save);
456 resume_console(); 449 resume_console();
457 Close: 450 Close:
458 hibernation_ops->end(); 451 hibernation_ops->end();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index b8f7ce9473e8..613f16941b85 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -22,7 +22,6 @@
22#include <linux/freezer.h> 22#include <linux/freezer.h>
23#include <linux/vmstat.h> 23#include <linux/vmstat.h>
24#include <linux/syscalls.h> 24#include <linux/syscalls.h>
25#include <linux/ftrace.h>
26 25
27#include "power.h" 26#include "power.h"
28 27
@@ -317,7 +316,7 @@ static int suspend_enter(suspend_state_t state)
317 */ 316 */
318int suspend_devices_and_enter(suspend_state_t state) 317int suspend_devices_and_enter(suspend_state_t state)
319{ 318{
320 int error, ftrace_save; 319 int error;
321 320
322 if (!suspend_ops) 321 if (!suspend_ops)
323 return -ENOSYS; 322 return -ENOSYS;
@@ -328,7 +327,6 @@ int suspend_devices_and_enter(suspend_state_t state)
328 goto Close; 327 goto Close;
329 } 328 }
330 suspend_console(); 329 suspend_console();
331 ftrace_save = __ftrace_enabled_save();
332 suspend_test_start(); 330 suspend_test_start();
333 error = device_suspend(PMSG_SUSPEND); 331 error = device_suspend(PMSG_SUSPEND);
334 if (error) { 332 if (error) {
@@ -360,7 +358,6 @@ int suspend_devices_and_enter(suspend_state_t state)
360 suspend_test_start(); 358 suspend_test_start();
361 device_resume(PMSG_RESUME); 359 device_resume(PMSG_RESUME);
362 suspend_test_finish("resume devices"); 360 suspend_test_finish("resume devices");
363 __ftrace_enabled_restore(ftrace_save);
364 resume_console(); 361 resume_console();
365 Close: 362 Close:
366 if (suspend_ops->end) 363 if (suspend_ops->end)
diff --git a/kernel/sched.c b/kernel/sched.c
index 4de56108c86f..388d9db044ab 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5901,6 +5901,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5901 * The idle tasks have their own, simple scheduling class: 5901 * The idle tasks have their own, simple scheduling class:
5902 */ 5902 */
5903 idle->sched_class = &idle_sched_class; 5903 idle->sched_class = &idle_sched_class;
5904 ftrace_retfunc_init_task(idle);
5904} 5905}
5905 5906
5906/* 5907/*
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 87fc34a1bb91..9cbf7761f498 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -169,14 +169,29 @@ config TRACE_BRANCH_PROFILING
169 This tracer profiles all the the likely and unlikely macros 169 This tracer profiles all the the likely and unlikely macros
170 in the kernel. It will display the results in: 170 in the kernel. It will display the results in:
171 171
172 /debugfs/tracing/profile_likely 172 /debugfs/tracing/profile_annotated_branch
173 /debugfs/tracing/profile_unlikely
174 173
175 Note: this will add a significant overhead, only turn this 174 Note: this will add a significant overhead, only turn this
176 on if you need to profile the system's use of these macros. 175 on if you need to profile the system's use of these macros.
177 176
178 Say N if unsure. 177 Say N if unsure.
179 178
179config PROFILE_ALL_BRANCHES
180 bool "Profile all if conditionals"
181 depends on TRACE_BRANCH_PROFILING
182 help
183 This tracer profiles all branch conditions. Every if ()
184 taken in the kernel is recorded whether it hit or miss.
185 The results will be displayed in:
186
187 /debugfs/tracing/profile_branch
188
189 This configuration, when enabled, will impose a great overhead
190 on the system. This should only be enabled when the system
191 is to be analyzed
192
193 Say N if unsure.
194
180config TRACING_BRANCHES 195config TRACING_BRANCHES
181 bool 196 bool
182 help 197 help
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f212da486689..53042f118f23 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1498,10 +1498,77 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1498 1498
1499#ifdef CONFIG_FUNCTION_RET_TRACER 1499#ifdef CONFIG_FUNCTION_RET_TRACER
1500 1500
1501static atomic_t ftrace_retfunc_active;
1502
1501/* The callback that hooks the return of a function */ 1503/* The callback that hooks the return of a function */
1502trace_function_return_t ftrace_function_return = 1504trace_function_return_t ftrace_function_return =
1503 (trace_function_return_t)ftrace_stub; 1505 (trace_function_return_t)ftrace_stub;
1504 1506
1507
1508/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1509static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1510{
1511 int i;
1512 int ret = 0;
1513 unsigned long flags;
1514 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1515 struct task_struct *g, *t;
1516
1517 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1518 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1519 * sizeof(struct ftrace_ret_stack),
1520 GFP_KERNEL);
1521 if (!ret_stack_list[i]) {
1522 start = 0;
1523 end = i;
1524 ret = -ENOMEM;
1525 goto free;
1526 }
1527 }
1528
1529 read_lock_irqsave(&tasklist_lock, flags);
1530 do_each_thread(g, t) {
1531 if (start == end) {
1532 ret = -EAGAIN;
1533 goto unlock;
1534 }
1535
1536 if (t->ret_stack == NULL) {
1537 t->ret_stack = ret_stack_list[start++];
1538 t->curr_ret_stack = -1;
1539 atomic_set(&t->trace_overrun, 0);
1540 }
1541 } while_each_thread(g, t);
1542
1543unlock:
1544 read_unlock_irqrestore(&tasklist_lock, flags);
1545free:
1546 for (i = start; i < end; i++)
1547 kfree(ret_stack_list[i]);
1548 return ret;
1549}
1550
1551/* Allocate a return stack for each task */
1552static int start_return_tracing(void)
1553{
1554 struct ftrace_ret_stack **ret_stack_list;
1555 int ret;
1556
1557 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
1558 sizeof(struct ftrace_ret_stack *),
1559 GFP_KERNEL);
1560
1561 if (!ret_stack_list)
1562 return -ENOMEM;
1563
1564 do {
1565 ret = alloc_retstack_tasklist(ret_stack_list);
1566 } while (ret == -EAGAIN);
1567
1568 kfree(ret_stack_list);
1569 return ret;
1570}
1571
1505int register_ftrace_return(trace_function_return_t func) 1572int register_ftrace_return(trace_function_return_t func)
1506{ 1573{
1507 int ret = 0; 1574 int ret = 0;
@@ -1516,7 +1583,12 @@ int register_ftrace_return(trace_function_return_t func)
1516 ret = -EBUSY; 1583 ret = -EBUSY;
1517 goto out; 1584 goto out;
1518 } 1585 }
1519 1586 atomic_inc(&ftrace_retfunc_active);
1587 ret = start_return_tracing();
1588 if (ret) {
1589 atomic_dec(&ftrace_retfunc_active);
1590 goto out;
1591 }
1520 ftrace_tracing_type = FTRACE_TYPE_RETURN; 1592 ftrace_tracing_type = FTRACE_TYPE_RETURN;
1521 ftrace_function_return = func; 1593 ftrace_function_return = func;
1522 ftrace_startup(); 1594 ftrace_startup();
@@ -1530,6 +1602,7 @@ void unregister_ftrace_return(void)
1530{ 1602{
1531 mutex_lock(&ftrace_sysctl_lock); 1603 mutex_lock(&ftrace_sysctl_lock);
1532 1604
1605 atomic_dec(&ftrace_retfunc_active);
1533 ftrace_function_return = (trace_function_return_t)ftrace_stub; 1606 ftrace_function_return = (trace_function_return_t)ftrace_stub;
1534 ftrace_shutdown(); 1607 ftrace_shutdown();
1535 /* Restore normal tracing type */ 1608 /* Restore normal tracing type */
@@ -1537,6 +1610,32 @@ void unregister_ftrace_return(void)
1537 1610
1538 mutex_unlock(&ftrace_sysctl_lock); 1611 mutex_unlock(&ftrace_sysctl_lock);
1539} 1612}
1613
1614/* Allocate a return stack for newly created task */
1615void ftrace_retfunc_init_task(struct task_struct *t)
1616{
1617 if (atomic_read(&ftrace_retfunc_active)) {
1618 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1619 * sizeof(struct ftrace_ret_stack),
1620 GFP_KERNEL);
1621 if (!t->ret_stack)
1622 return;
1623 t->curr_ret_stack = -1;
1624 atomic_set(&t->trace_overrun, 0);
1625 } else
1626 t->ret_stack = NULL;
1627}
1628
1629void ftrace_retfunc_exit_task(struct task_struct *t)
1630{
1631 struct ftrace_ret_stack *ret_stack = t->ret_stack;
1632
1633 t->ret_stack = NULL;
1634 /* NULL must become visible to IRQs before we free it: */
1635 barrier();
1636
1637 kfree(ret_stack);
1638}
1540#endif 1639#endif
1541 1640
1542 1641
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 85ced143c2c4..e206951603c1 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -18,8 +18,46 @@
18 18
19#include "trace.h" 19#include "trace.h"
20 20
21/* Global flag to disable all recording to ring buffers */ 21/*
22static int ring_buffers_off __read_mostly; 22 * A fast way to enable or disable all ring buffers is to
23 * call tracing_on or tracing_off. Turning off the ring buffers
24 * prevents all ring buffers from being recorded to.
25 * Turning this switch on, makes it OK to write to the
26 * ring buffer, if the ring buffer is enabled itself.
27 *
28 * There's three layers that must be on in order to write
29 * to the ring buffer.
30 *
31 * 1) This global flag must be set.
32 * 2) The ring buffer must be enabled for recording.
33 * 3) The per cpu buffer must be enabled for recording.
34 *
35 * In case of an anomaly, this global flag has a bit set that
36 * will permantly disable all ring buffers.
37 */
38
39/*
40 * Global flag to disable all recording to ring buffers
41 * This has two bits: ON, DISABLED
42 *
43 * ON DISABLED
44 * ---- ----------
45 * 0 0 : ring buffers are off
46 * 1 0 : ring buffers are on
47 * X 1 : ring buffers are permanently disabled
48 */
49
50enum {
51 RB_BUFFERS_ON_BIT = 0,
52 RB_BUFFERS_DISABLED_BIT = 1,
53};
54
55enum {
56 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
57 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
58};
59
60static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
23 61
24/** 62/**
25 * tracing_on - enable all tracing buffers 63 * tracing_on - enable all tracing buffers
@@ -29,7 +67,7 @@ static int ring_buffers_off __read_mostly;
29 */ 67 */
30void tracing_on(void) 68void tracing_on(void)
31{ 69{
32 ring_buffers_off = 0; 70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
33} 71}
34 72
35/** 73/**
@@ -42,7 +80,18 @@ void tracing_on(void)
42 */ 80 */
43void tracing_off(void) 81void tracing_off(void)
44{ 82{
45 ring_buffers_off = 1; 83 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
84}
85
86/**
87 * tracing_off_permanent - permanently disable ring buffers
88 *
89 * This function, once called, will disable all ring buffers
90 * permanenty.
91 */
92void tracing_off_permanent(void)
93{
94 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
46} 95}
47 96
48#include "trace.h" 97#include "trace.h"
@@ -1185,7 +1234,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1185 struct ring_buffer_event *event; 1234 struct ring_buffer_event *event;
1186 int cpu, resched; 1235 int cpu, resched;
1187 1236
1188 if (ring_buffers_off) 1237 if (ring_buffer_flags != RB_BUFFERS_ON)
1189 return NULL; 1238 return NULL;
1190 1239
1191 if (atomic_read(&buffer->record_disabled)) 1240 if (atomic_read(&buffer->record_disabled))
@@ -1297,7 +1346,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1297 int ret = -EBUSY; 1346 int ret = -EBUSY;
1298 int cpu, resched; 1347 int cpu, resched;
1299 1348
1300 if (ring_buffers_off) 1349 if (ring_buffer_flags != RB_BUFFERS_ON)
1301 return -EBUSY; 1350 return -EBUSY;
1302 1351
1303 if (atomic_read(&buffer->record_disabled)) 1352 if (atomic_read(&buffer->record_disabled))
@@ -2178,12 +2227,14 @@ static ssize_t
2178rb_simple_read(struct file *filp, char __user *ubuf, 2227rb_simple_read(struct file *filp, char __user *ubuf,
2179 size_t cnt, loff_t *ppos) 2228 size_t cnt, loff_t *ppos)
2180{ 2229{
2181 int *p = filp->private_data; 2230 long *p = filp->private_data;
2182 char buf[64]; 2231 char buf[64];
2183 int r; 2232 int r;
2184 2233
2185 /* !ring_buffers_off == tracing_on */ 2234 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2186 r = sprintf(buf, "%d\n", !*p); 2235 r = sprintf(buf, "permanently disabled\n");
2236 else
2237 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
2187 2238
2188 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2239 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2189} 2240}
@@ -2192,7 +2243,7 @@ static ssize_t
2192rb_simple_write(struct file *filp, const char __user *ubuf, 2243rb_simple_write(struct file *filp, const char __user *ubuf,
2193 size_t cnt, loff_t *ppos) 2244 size_t cnt, loff_t *ppos)
2194{ 2245{
2195 int *p = filp->private_data; 2246 long *p = filp->private_data;
2196 char buf[64]; 2247 char buf[64];
2197 long val; 2248 long val;
2198 int ret; 2249 int ret;
@@ -2209,8 +2260,10 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
2209 if (ret < 0) 2260 if (ret < 0)
2210 return ret; 2261 return ret;
2211 2262
2212 /* !ring_buffers_off == tracing_on */ 2263 if (val)
2213 *p = !val; 2264 set_bit(RB_BUFFERS_ON_BIT, p);
2265 else
2266 clear_bit(RB_BUFFERS_ON_BIT, p);
2214 2267
2215 (*ppos)++; 2268 (*ppos)++;
2216 2269
@@ -2232,7 +2285,7 @@ static __init int rb_init_debugfs(void)
2232 d_tracer = tracing_init_dentry(); 2285 d_tracer = tracing_init_dentry();
2233 2286
2234 entry = debugfs_create_file("tracing_on", 0644, d_tracer, 2287 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2235 &ring_buffers_off, &rb_simple_fops); 2288 &ring_buffer_flags, &rb_simple_fops);
2236 if (!entry) 2289 if (!entry)
2237 pr_warning("Could not create debugfs 'tracing_on' entry\n"); 2290 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2238 2291
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 48d1536f1ca4..a45b59e53fbc 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -686,6 +686,21 @@ static int trace_stop_count;
686static DEFINE_SPINLOCK(tracing_start_lock); 686static DEFINE_SPINLOCK(tracing_start_lock);
687 687
688/** 688/**
689 * ftrace_off_permanent - disable all ftrace code permanently
690 *
691 * This should only be called when a serious anomally has
692 * been detected. This will turn off the function tracing,
693 * ring buffers, and other tracing utilites. It takes no
694 * locks and can be called from any context.
695 */
696void ftrace_off_permanent(void)
697{
698 tracing_disabled = 1;
699 ftrace_stop();
700 tracing_off_permanent();
701}
702
703/**
689 * tracing_start - quick start of the tracer 704 * tracing_start - quick start of the tracer
690 * 705 *
691 * If tracing is enabled but was stopped by tracing_stop, 706 * If tracing is enabled but was stopped by tracing_stop,
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 23f9b02ce967..877ee88e6a74 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -185,12 +185,13 @@ EXPORT_SYMBOL(ftrace_likely_update);
185struct ftrace_pointer { 185struct ftrace_pointer {
186 void *start; 186 void *start;
187 void *stop; 187 void *stop;
188 int hit;
188}; 189};
189 190
190static void * 191static void *
191t_next(struct seq_file *m, void *v, loff_t *pos) 192t_next(struct seq_file *m, void *v, loff_t *pos)
192{ 193{
193 struct ftrace_pointer *f = m->private; 194 const struct ftrace_pointer *f = m->private;
194 struct ftrace_branch_data *p = v; 195 struct ftrace_branch_data *p = v;
195 196
196 (*pos)++; 197 (*pos)++;
@@ -223,13 +224,17 @@ static void t_stop(struct seq_file *m, void *p)
223 224
224static int t_show(struct seq_file *m, void *v) 225static int t_show(struct seq_file *m, void *v)
225{ 226{
227 const struct ftrace_pointer *fp = m->private;
226 struct ftrace_branch_data *p = v; 228 struct ftrace_branch_data *p = v;
227 const char *f; 229 const char *f;
228 unsigned long percent; 230 long percent;
229 231
230 if (v == (void *)1) { 232 if (v == (void *)1) {
231 seq_printf(m, " correct incorrect %% " 233 if (fp->hit)
232 " Function " 234 seq_printf(m, " miss hit %% ");
235 else
236 seq_printf(m, " correct incorrect %% ");
237 seq_printf(m, " Function "
233 " File Line\n" 238 " File Line\n"
234 " ------- --------- - " 239 " ------- --------- - "
235 " -------- " 240 " -------- "
@@ -243,13 +248,20 @@ static int t_show(struct seq_file *m, void *v)
243 f--; 248 f--;
244 f++; 249 f++;
245 250
251 /*
252 * The miss is overlayed on correct, and hit on incorrect.
253 */
246 if (p->correct) { 254 if (p->correct) {
247 percent = p->incorrect * 100; 255 percent = p->incorrect * 100;
248 percent /= p->correct + p->incorrect; 256 percent /= p->correct + p->incorrect;
249 } else 257 } else
250 percent = p->incorrect ? 100 : 0; 258 percent = p->incorrect ? 100 : -1;
251 259
252 seq_printf(m, "%8lu %8lu %3lu ", p->correct, p->incorrect, percent); 260 seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
261 if (percent < 0)
262 seq_printf(m, " X ");
263 else
264 seq_printf(m, "%3ld ", percent);
253 seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); 265 seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
254 return 0; 266 return 0;
255} 267}
@@ -261,7 +273,7 @@ static struct seq_operations tracing_likely_seq_ops = {
261 .show = t_show, 273 .show = t_show,
262}; 274};
263 275
264static int tracing_likely_open(struct inode *inode, struct file *file) 276static int tracing_branch_open(struct inode *inode, struct file *file)
265{ 277{
266 int ret; 278 int ret;
267 279
@@ -274,25 +286,30 @@ static int tracing_likely_open(struct inode *inode, struct file *file)
274 return ret; 286 return ret;
275} 287}
276 288
277static struct file_operations tracing_likely_fops = { 289static const struct file_operations tracing_branch_fops = {
278 .open = tracing_likely_open, 290 .open = tracing_branch_open,
279 .read = seq_read, 291 .read = seq_read,
280 .llseek = seq_lseek, 292 .llseek = seq_lseek,
281}; 293};
282 294
283extern unsigned long __start_likely_profile[]; 295#ifdef CONFIG_PROFILE_ALL_BRANCHES
284extern unsigned long __stop_likely_profile[]; 296extern unsigned long __start_branch_profile[];
285extern unsigned long __start_unlikely_profile[]; 297extern unsigned long __stop_branch_profile[];
286extern unsigned long __stop_unlikely_profile[];
287 298
288static struct ftrace_pointer ftrace_likely_pos = { 299static const struct ftrace_pointer ftrace_branch_pos = {
289 .start = __start_likely_profile, 300 .start = __start_branch_profile,
290 .stop = __stop_likely_profile, 301 .stop = __stop_branch_profile,
302 .hit = 1,
291}; 303};
292 304
293static struct ftrace_pointer ftrace_unlikely_pos = { 305#endif /* CONFIG_PROFILE_ALL_BRANCHES */
294 .start = __start_unlikely_profile, 306
295 .stop = __stop_unlikely_profile, 307extern unsigned long __start_annotated_branch_profile[];
308extern unsigned long __stop_annotated_branch_profile[];
309
310static const struct ftrace_pointer ftrace_annotated_branch_pos = {
311 .start = __start_annotated_branch_profile,
312 .stop = __stop_annotated_branch_profile,
296}; 313};
297 314
298static __init int ftrace_branch_init(void) 315static __init int ftrace_branch_init(void)
@@ -302,18 +319,21 @@ static __init int ftrace_branch_init(void)
302 319
303 d_tracer = tracing_init_dentry(); 320 d_tracer = tracing_init_dentry();
304 321
305 entry = debugfs_create_file("profile_likely", 0444, d_tracer, 322 entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer,
306 &ftrace_likely_pos, 323 (void *)&ftrace_annotated_branch_pos,
307 &tracing_likely_fops); 324 &tracing_branch_fops);
308 if (!entry) 325 if (!entry)
309 pr_warning("Could not create debugfs 'profile_likely' entry\n"); 326 pr_warning("Could not create debugfs "
327 "'profile_annotatet_branch' entry\n");
310 328
311 entry = debugfs_create_file("profile_unlikely", 0444, d_tracer, 329#ifdef CONFIG_PROFILE_ALL_BRANCHES
312 &ftrace_unlikely_pos, 330 entry = debugfs_create_file("profile_branch", 0444, d_tracer,
313 &tracing_likely_fops); 331 (void *)&ftrace_branch_pos,
332 &tracing_branch_fops);
314 if (!entry) 333 if (!entry)
315 pr_warning("Could not create debugfs" 334 pr_warning("Could not create debugfs"
316 " 'profile_unlikely' entry\n"); 335 " 'profile_branch' entry\n");
336#endif
317 337
318 return 0; 338 return 0;
319} 339}
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 433d650eda9f..2a98a206acc2 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -18,12 +18,14 @@ struct header_iter {
18 18
19static struct trace_array *mmio_trace_array; 19static struct trace_array *mmio_trace_array;
20static bool overrun_detected; 20static bool overrun_detected;
21static unsigned long prev_overruns;
21 22
22static void mmio_reset_data(struct trace_array *tr) 23static void mmio_reset_data(struct trace_array *tr)
23{ 24{
24 int cpu; 25 int cpu;
25 26
26 overrun_detected = false; 27 overrun_detected = false;
28 prev_overruns = 0;
27 tr->time_start = ftrace_now(tr->cpu); 29 tr->time_start = ftrace_now(tr->cpu);
28 30
29 for_each_online_cpu(cpu) 31 for_each_online_cpu(cpu)
@@ -123,16 +125,12 @@ static void mmio_close(struct trace_iterator *iter)
123 125
124static unsigned long count_overruns(struct trace_iterator *iter) 126static unsigned long count_overruns(struct trace_iterator *iter)
125{ 127{
126 int cpu;
127 unsigned long cnt = 0; 128 unsigned long cnt = 0;
128/* FIXME: */ 129 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
129#if 0 130
130 for_each_online_cpu(cpu) { 131 if (over > prev_overruns)
131 cnt += iter->overrun[cpu]; 132 cnt = over - prev_overruns;
132 iter->overrun[cpu] = 0; 133 prev_overruns = over;
133 }
134#endif
135 (void)cpu;
136 return cnt; 134 return cnt;
137} 135}
138 136