aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/manage.c68
-rw-r--r--kernel/irq/migration.c11
-rw-r--r--kernel/irq/proc.c2
-rw-r--r--kernel/lockdep.c5
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/profile.c4
-rw-r--r--kernel/ptrace.c4
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/sysctl.c10
-rw-r--r--kernel/trace/ftrace.c15
-rw-r--r--kernel/trace/ring_buffer.c311
-rw-r--r--kernel/trace/trace.c16
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_branch.c4
-rw-r--r--kernel/trace/trace_functions_graph.c168
-rw-r--r--kernel/trace/trace_stack.c13
20 files changed, 502 insertions, 152 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5a732c5ef08b..8ea32e8d68b0 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -462,7 +462,7 @@ out:
462 * It must be called by the arch code on the new cpu, before the new cpu 462 * It must be called by the arch code on the new cpu, before the new cpu
463 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 463 * enables interrupts and before the "boot" cpu returns from __cpu_up().
464 */ 464 */
465void notify_cpu_starting(unsigned int cpu) 465void __cpuinit notify_cpu_starting(unsigned int cpu)
466{ 466{
467 unsigned long val = CPU_STARTING; 467 unsigned long val = CPU_STARTING;
468 468
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index da7ff6137f37..96c0ba13b8cd 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -585,7 +585,7 @@ static int generate_sched_domains(cpumask_t **domains,
585 int i, j, k; /* indices for partition finding loops */ 585 int i, j, k; /* indices for partition finding loops */
586 cpumask_t *doms; /* resulting partition; i.e. sched domains */ 586 cpumask_t *doms; /* resulting partition; i.e. sched domains */
587 struct sched_domain_attr *dattr; /* attributes for custom domains */ 587 struct sched_domain_attr *dattr; /* attributes for custom domains */
588 int ndoms; /* number of sched domains in result */ 588 int ndoms = 0; /* number of sched domains in result */
589 int nslot; /* next empty doms[] cpumask_t slot */ 589 int nslot; /* next empty doms[] cpumask_t slot */
590 590
591 doms = NULL; 591 doms = NULL;
diff --git a/kernel/fork.c b/kernel/fork.c
index 5f82a999c032..7407ab319875 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1137,6 +1137,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1137 } 1137 }
1138 } 1138 }
1139 1139
1140 ftrace_graph_init_task(p);
1141
1140 p->pid = pid_nr(pid); 1142 p->pid = pid_nr(pid);
1141 p->tgid = p->pid; 1143 p->tgid = p->pid;
1142 if (clone_flags & CLONE_THREAD) 1144 if (clone_flags & CLONE_THREAD)
@@ -1145,7 +1147,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1145 if (current->nsproxy != p->nsproxy) { 1147 if (current->nsproxy != p->nsproxy) {
1146 retval = ns_cgroup_clone(p, pid); 1148 retval = ns_cgroup_clone(p, pid);
1147 if (retval) 1149 if (retval)
1148 goto bad_fork_free_pid; 1150 goto bad_fork_free_graph;
1149 } 1151 }
1150 1152
1151 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 1153 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
@@ -1238,7 +1240,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1238 spin_unlock(&current->sighand->siglock); 1240 spin_unlock(&current->sighand->siglock);
1239 write_unlock_irq(&tasklist_lock); 1241 write_unlock_irq(&tasklist_lock);
1240 retval = -ERESTARTNOINTR; 1242 retval = -ERESTARTNOINTR;
1241 goto bad_fork_free_pid; 1243 goto bad_fork_free_graph;
1242 } 1244 }
1243 1245
1244 if (clone_flags & CLONE_THREAD) { 1246 if (clone_flags & CLONE_THREAD) {
@@ -1271,11 +1273,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1271 total_forks++; 1273 total_forks++;
1272 spin_unlock(&current->sighand->siglock); 1274 spin_unlock(&current->sighand->siglock);
1273 write_unlock_irq(&tasklist_lock); 1275 write_unlock_irq(&tasklist_lock);
1274 ftrace_graph_init_task(p);
1275 proc_fork_connector(p); 1276 proc_fork_connector(p);
1276 cgroup_post_fork(p); 1277 cgroup_post_fork(p);
1277 return p; 1278 return p;
1278 1279
1280bad_fork_free_graph:
1281 ftrace_graph_exit_task(p);
1279bad_fork_free_pid: 1282bad_fork_free_pid:
1280 if (pid != &init_struct_pid) 1283 if (pid != &init_struct_pid)
1281 free_pid(pid); 1284 free_pid(pid);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index c9767e641980..64c1c7253dae 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -25,6 +25,8 @@ static inline void unregister_handler_proc(unsigned int irq,
25 struct irqaction *action) { } 25 struct irqaction *action) { }
26#endif 26#endif
27 27
28extern int irq_select_affinity_usr(unsigned int irq);
29
28/* 30/*
29 * Debugging printout: 31 * Debugging printout:
30 */ 32 */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c498a1b8c621..801addda3c43 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -82,24 +82,27 @@ int irq_can_set_affinity(unsigned int irq)
82int irq_set_affinity(unsigned int irq, cpumask_t cpumask) 82int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
83{ 83{
84 struct irq_desc *desc = irq_to_desc(irq); 84 struct irq_desc *desc = irq_to_desc(irq);
85 unsigned long flags;
85 86
86 if (!desc->chip->set_affinity) 87 if (!desc->chip->set_affinity)
87 return -EINVAL; 88 return -EINVAL;
88 89
90 spin_lock_irqsave(&desc->lock, flags);
91
89#ifdef CONFIG_GENERIC_PENDING_IRQ 92#ifdef CONFIG_GENERIC_PENDING_IRQ
90 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 93 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
91 unsigned long flags;
92
93 spin_lock_irqsave(&desc->lock, flags);
94 desc->affinity = cpumask; 94 desc->affinity = cpumask;
95 desc->chip->set_affinity(irq, cpumask); 95 desc->chip->set_affinity(irq, cpumask);
96 spin_unlock_irqrestore(&desc->lock, flags); 96 } else {
97 } else 97 desc->status |= IRQ_MOVE_PENDING;
98 set_pending_irq(irq, cpumask); 98 desc->pending_mask = cpumask;
99 }
99#else 100#else
100 desc->affinity = cpumask; 101 desc->affinity = cpumask;
101 desc->chip->set_affinity(irq, cpumask); 102 desc->chip->set_affinity(irq, cpumask);
102#endif 103#endif
104 desc->status |= IRQ_AFFINITY_SET;
105 spin_unlock_irqrestore(&desc->lock, flags);
103 return 0; 106 return 0;
104} 107}
105 108
@@ -107,24 +110,59 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
107/* 110/*
108 * Generic version of the affinity autoselector. 111 * Generic version of the affinity autoselector.
109 */ 112 */
110int irq_select_affinity(unsigned int irq) 113int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
111{ 114{
112 cpumask_t mask; 115 cpumask_t mask;
113 struct irq_desc *desc;
114 116
115 if (!irq_can_set_affinity(irq)) 117 if (!irq_can_set_affinity(irq))
116 return 0; 118 return 0;
117 119
118 cpus_and(mask, cpu_online_map, irq_default_affinity); 120 cpus_and(mask, cpu_online_map, irq_default_affinity);
119 121
120 desc = irq_to_desc(irq); 122 /*
123 * Preserve an userspace affinity setup, but make sure that
124 * one of the targets is online.
125 */
126 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
127 if (cpus_intersects(desc->affinity, cpu_online_map))
128 mask = desc->affinity;
129 else
130 desc->status &= ~IRQ_AFFINITY_SET;
131 }
132
121 desc->affinity = mask; 133 desc->affinity = mask;
122 desc->chip->set_affinity(irq, mask); 134 desc->chip->set_affinity(irq, mask);
123 135
124 return 0; 136 return 0;
125} 137}
138#else
139static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d)
140{
141 return irq_select_affinity(irq);
142}
126#endif 143#endif
127 144
145/*
146 * Called when affinity is set via /proc/irq
147 */
148int irq_select_affinity_usr(unsigned int irq)
149{
150 struct irq_desc *desc = irq_to_desc(irq);
151 unsigned long flags;
152 int ret;
153
154 spin_lock_irqsave(&desc->lock, flags);
155 ret = do_irq_select_affinity(irq, desc);
156 spin_unlock_irqrestore(&desc->lock, flags);
157
158 return ret;
159}
160
161#else
162static inline int do_irq_select_affinity(int irq, struct irq_desc *desc)
163{
164 return 0;
165}
128#endif 166#endif
129 167
130/** 168/**
@@ -327,7 +365,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
327 * IRQF_TRIGGER_* but the PIC does not support multiple 365 * IRQF_TRIGGER_* but the PIC does not support multiple
328 * flow-types? 366 * flow-types?
329 */ 367 */
330 pr_warning("No set_type function for IRQ %d (%s)\n", irq, 368 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
331 chip ? (chip->name ? : "unknown") : "unknown"); 369 chip ? (chip->name ? : "unknown") : "unknown");
332 return 0; 370 return 0;
333 } 371 }
@@ -445,8 +483,12 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
445 /* Undo nested disables: */ 483 /* Undo nested disables: */
446 desc->depth = 1; 484 desc->depth = 1;
447 485
486 /* Exclude IRQ from balancing if requested */
487 if (new->flags & IRQF_NOBALANCING)
488 desc->status |= IRQ_NO_BALANCING;
489
448 /* Set default affinity mask once everything is setup */ 490 /* Set default affinity mask once everything is setup */
449 irq_select_affinity(irq); 491 do_irq_select_affinity(irq, desc);
450 492
451 } else if ((new->flags & IRQF_TRIGGER_MASK) 493 } else if ((new->flags & IRQF_TRIGGER_MASK)
452 && (new->flags & IRQF_TRIGGER_MASK) 494 && (new->flags & IRQF_TRIGGER_MASK)
@@ -459,10 +501,6 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
459 501
460 *p = new; 502 *p = new;
461 503
462 /* Exclude IRQ from balancing */
463 if (new->flags & IRQF_NOBALANCING)
464 desc->status |= IRQ_NO_BALANCING;
465
466 /* Reset broken irq detection when installing new handler */ 504 /* Reset broken irq detection when installing new handler */
467 desc->irq_count = 0; 505 desc->irq_count = 0;
468 desc->irqs_unhandled = 0; 506 desc->irqs_unhandled = 0;
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 90b920d3f52b..9db681d95814 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -1,17 +1,6 @@
1 1
2#include <linux/irq.h> 2#include <linux/irq.h>
3 3
4void set_pending_irq(unsigned int irq, cpumask_t mask)
5{
6 struct irq_desc *desc = irq_to_desc(irq);
7 unsigned long flags;
8
9 spin_lock_irqsave(&desc->lock, flags);
10 desc->status |= IRQ_MOVE_PENDING;
11 desc->pending_mask = mask;
12 spin_unlock_irqrestore(&desc->lock, flags);
13}
14
15void move_masked_irq(int irq) 4void move_masked_irq(int irq)
16{ 5{
17 struct irq_desc *desc = irq_to_desc(irq); 6 struct irq_desc *desc = irq_to_desc(irq);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 4d161c70ba55..d257e7d6a8a4 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -62,7 +62,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
62 if (!cpus_intersects(new_value, cpu_online_map)) 62 if (!cpus_intersects(new_value, cpu_online_map))
63 /* Special case for empty set - allow the architecture 63 /* Special case for empty set - allow the architecture
64 code to set default SMP affinity. */ 64 code to set default SMP affinity. */
65 return irq_select_affinity(irq) ? -EINVAL : count; 65 return irq_select_affinity_usr(irq) ? -EINVAL : count;
66 66
67 irq_set_affinity(irq, new_value); 67 irq_set_affinity(irq, new_value);
68 68
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 06e157119d2b..74b1878b8bb8 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -25,6 +25,7 @@
25 * Thanks to Arjan van de Ven for coming up with the initial idea of 25 * Thanks to Arjan van de Ven for coming up with the initial idea of
26 * mapping lock dependencies runtime. 26 * mapping lock dependencies runtime.
27 */ 27 */
28#define DISABLE_BRANCH_PROFILING
28#include <linux/mutex.h> 29#include <linux/mutex.h>
29#include <linux/sched.h> 30#include <linux/sched.h>
30#include <linux/delay.h> 31#include <linux/delay.h>
@@ -3276,10 +3277,10 @@ void __init lockdep_info(void)
3276{ 3277{
3277 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); 3278 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3278 3279
3279 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); 3280 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
3280 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); 3281 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
3281 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); 3282 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
3282 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); 3283 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
3283 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); 3284 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
3284 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); 3285 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
3285 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); 3286 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
diff --git a/kernel/panic.c b/kernel/panic.c
index 6513aac8e992..4d5088355bfe 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -167,6 +167,7 @@ static const struct tnt tnts[] = {
167 * 'M' - System experienced a machine check exception. 167 * 'M' - System experienced a machine check exception.
168 * 'B' - System has hit bad_page. 168 * 'B' - System has hit bad_page.
169 * 'U' - Userspace-defined naughtiness. 169 * 'U' - Userspace-defined naughtiness.
170 * 'D' - Kernel has oopsed before
170 * 'A' - ACPI table overridden. 171 * 'A' - ACPI table overridden.
171 * 'W' - Taint on warning. 172 * 'W' - Taint on warning.
172 * 'C' - modules from drivers/staging are loaded. 173 * 'C' - modules from drivers/staging are loaded.
diff --git a/kernel/profile.c b/kernel/profile.c
index 7f93a5042d3b..60adefb59b5e 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -351,7 +351,7 @@ out:
351 put_cpu(); 351 put_cpu();
352} 352}
353 353
354static int __devinit profile_cpu_callback(struct notifier_block *info, 354static int __cpuinit profile_cpu_callback(struct notifier_block *info,
355 unsigned long action, void *__cpu) 355 unsigned long action, void *__cpu)
356{ 356{
357 int node, cpu = (unsigned long)__cpu; 357 int node, cpu = (unsigned long)__cpu;
@@ -596,7 +596,7 @@ out_cleanup:
596#define create_hash_tables() ({ 0; }) 596#define create_hash_tables() ({ 0; })
597#endif 597#endif
598 598
599int create_proc_profile(void) 599int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
600{ 600{
601 struct proc_dir_entry *entry; 601 struct proc_dir_entry *entry;
602 602
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 1e68e4c39e2c..4c8bcd7dd8e0 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -612,7 +612,7 @@ int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
612 return (copied == sizeof(data)) ? 0 : -EIO; 612 return (copied == sizeof(data)) ? 0 : -EIO;
613} 613}
614 614
615#if defined CONFIG_COMPAT && defined __ARCH_WANT_COMPAT_SYS_PTRACE 615#if defined CONFIG_COMPAT
616#include <linux/compat.h> 616#include <linux/compat.h>
617 617
618int compat_ptrace_request(struct task_struct *child, compat_long_t request, 618int compat_ptrace_request(struct task_struct *child, compat_long_t request,
@@ -709,4 +709,4 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
709 unlock_kernel(); 709 unlock_kernel();
710 return ret; 710 return ret;
711} 711}
712#endif /* CONFIG_COMPAT && __ARCH_WANT_COMPAT_SYS_PTRACE */ 712#endif /* CONFIG_COMPAT */
diff --git a/kernel/sched.c b/kernel/sched.c
index 52490bf6b884..7729c4bbc8ba 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1459,9 +1459,10 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1459static unsigned long cpu_avg_load_per_task(int cpu) 1459static unsigned long cpu_avg_load_per_task(int cpu)
1460{ 1460{
1461 struct rq *rq = cpu_rq(cpu); 1461 struct rq *rq = cpu_rq(cpu);
1462 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
1462 1463
1463 if (rq->nr_running) 1464 if (nr_running)
1464 rq->avg_load_per_task = rq->load.weight / rq->nr_running; 1465 rq->avg_load_per_task = rq->load.weight / nr_running;
1465 else 1466 else
1466 rq->avg_load_per_task = 0; 1467 rq->avg_load_per_task = 0;
1467 1468
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 65d4a9ba79e4..c83f566e940a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -176,6 +176,9 @@ extern struct ctl_table random_table[];
176#ifdef CONFIG_INOTIFY_USER 176#ifdef CONFIG_INOTIFY_USER
177extern struct ctl_table inotify_table[]; 177extern struct ctl_table inotify_table[];
178#endif 178#endif
179#ifdef CONFIG_EPOLL
180extern struct ctl_table epoll_table[];
181#endif
179 182
180#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT 183#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
181int sysctl_legacy_va_layout; 184int sysctl_legacy_va_layout;
@@ -1335,6 +1338,13 @@ static struct ctl_table fs_table[] = {
1335 .child = inotify_table, 1338 .child = inotify_table,
1336 }, 1339 },
1337#endif 1340#endif
1341#ifdef CONFIG_EPOLL
1342 {
1343 .procname = "epoll",
1344 .mode = 0555,
1345 .child = epoll_table,
1346 },
1347#endif
1338#endif 1348#endif
1339 { 1349 {
1340 .ctl_name = KERN_SETUID_DUMPABLE, 1350 .ctl_name = KERN_SETUID_DUMPABLE,
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2e78628443e8..65b9e863056b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1636,11 +1636,15 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1636 1636
1637static atomic_t ftrace_graph_active; 1637static atomic_t ftrace_graph_active;
1638 1638
1639int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1640{
1641 return 0;
1642}
1643
1639/* The callbacks that hook a function */ 1644/* The callbacks that hook a function */
1640trace_func_graph_ret_t ftrace_graph_return = 1645trace_func_graph_ret_t ftrace_graph_return =
1641 (trace_func_graph_ret_t)ftrace_stub; 1646 (trace_func_graph_ret_t)ftrace_stub;
1642trace_func_graph_ent_t ftrace_graph_entry = 1647trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1643 (trace_func_graph_ent_t)ftrace_stub;
1644 1648
1645/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 1649/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1646static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) 1650static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
@@ -1738,7 +1742,7 @@ void unregister_ftrace_graph(void)
1738 1742
1739 atomic_dec(&ftrace_graph_active); 1743 atomic_dec(&ftrace_graph_active);
1740 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 1744 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
1741 ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub; 1745 ftrace_graph_entry = ftrace_graph_entry_stub;
1742 ftrace_shutdown(FTRACE_STOP_FUNC_RET); 1746 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
1743 1747
1744 mutex_unlock(&ftrace_sysctl_lock); 1748 mutex_unlock(&ftrace_sysctl_lock);
@@ -1769,5 +1773,10 @@ void ftrace_graph_exit_task(struct task_struct *t)
1769 1773
1770 kfree(ret_stack); 1774 kfree(ret_stack);
1771} 1775}
1776
1777void ftrace_graph_stop(void)
1778{
1779 ftrace_stop();
1780}
1772#endif 1781#endif
1773 1782
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index e206951603c1..7f69cfeaadf7 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -195,20 +195,24 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
195#define TS_MASK ((1ULL << TS_SHIFT) - 1) 195#define TS_MASK ((1ULL << TS_SHIFT) - 1)
196#define TS_DELTA_TEST (~TS_MASK) 196#define TS_DELTA_TEST (~TS_MASK)
197 197
198/* 198struct buffer_data_page {
199 * This hack stolen from mm/slob.c.
200 * We can store per page timing information in the page frame of the page.
201 * Thanks to Peter Zijlstra for suggesting this idea.
202 */
203struct buffer_page {
204 u64 time_stamp; /* page time stamp */ 199 u64 time_stamp; /* page time stamp */
205 local_t write; /* index for next write */
206 local_t commit; /* write commited index */ 200 local_t commit; /* write commited index */
201 unsigned char data[]; /* data of buffer page */
202};
203
204struct buffer_page {
205 local_t write; /* index for next write */
207 unsigned read; /* index for next read */ 206 unsigned read; /* index for next read */
208 struct list_head list; /* list of free pages */ 207 struct list_head list; /* list of free pages */
209 void *page; /* Actual data page */ 208 struct buffer_data_page *page; /* Actual data page */
210}; 209};
211 210
211static void rb_init_page(struct buffer_data_page *bpage)
212{
213 local_set(&bpage->commit, 0);
214}
215
212/* 216/*
213 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 217 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
214 * this issue out. 218 * this issue out.
@@ -230,7 +234,7 @@ static inline int test_time_stamp(u64 delta)
230 return 0; 234 return 0;
231} 235}
232 236
233#define BUF_PAGE_SIZE PAGE_SIZE 237#define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page))
234 238
235/* 239/*
236 * head_page == tail_page && head == tail then buffer is empty. 240 * head_page == tail_page && head == tail then buffer is empty.
@@ -294,19 +298,19 @@ struct ring_buffer_iter {
294static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 298static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
295{ 299{
296 struct list_head *head = &cpu_buffer->pages; 300 struct list_head *head = &cpu_buffer->pages;
297 struct buffer_page *page, *tmp; 301 struct buffer_page *bpage, *tmp;
298 302
299 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) 303 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
300 return -1; 304 return -1;
301 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) 305 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
302 return -1; 306 return -1;
303 307
304 list_for_each_entry_safe(page, tmp, head, list) { 308 list_for_each_entry_safe(bpage, tmp, head, list) {
305 if (RB_WARN_ON(cpu_buffer, 309 if (RB_WARN_ON(cpu_buffer,
306 page->list.next->prev != &page->list)) 310 bpage->list.next->prev != &bpage->list))
307 return -1; 311 return -1;
308 if (RB_WARN_ON(cpu_buffer, 312 if (RB_WARN_ON(cpu_buffer,
309 page->list.prev->next != &page->list)) 313 bpage->list.prev->next != &bpage->list))
310 return -1; 314 return -1;
311 } 315 }
312 316
@@ -317,22 +321,23 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
317 unsigned nr_pages) 321 unsigned nr_pages)
318{ 322{
319 struct list_head *head = &cpu_buffer->pages; 323 struct list_head *head = &cpu_buffer->pages;
320 struct buffer_page *page, *tmp; 324 struct buffer_page *bpage, *tmp;
321 unsigned long addr; 325 unsigned long addr;
322 LIST_HEAD(pages); 326 LIST_HEAD(pages);
323 unsigned i; 327 unsigned i;
324 328
325 for (i = 0; i < nr_pages; i++) { 329 for (i = 0; i < nr_pages; i++) {
326 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), 330 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
327 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); 331 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
328 if (!page) 332 if (!bpage)
329 goto free_pages; 333 goto free_pages;
330 list_add(&page->list, &pages); 334 list_add(&bpage->list, &pages);
331 335
332 addr = __get_free_page(GFP_KERNEL); 336 addr = __get_free_page(GFP_KERNEL);
333 if (!addr) 337 if (!addr)
334 goto free_pages; 338 goto free_pages;
335 page->page = (void *)addr; 339 bpage->page = (void *)addr;
340 rb_init_page(bpage->page);
336 } 341 }
337 342
338 list_splice(&pages, head); 343 list_splice(&pages, head);
@@ -342,9 +347,9 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
342 return 0; 347 return 0;
343 348
344 free_pages: 349 free_pages:
345 list_for_each_entry_safe(page, tmp, &pages, list) { 350 list_for_each_entry_safe(bpage, tmp, &pages, list) {
346 list_del_init(&page->list); 351 list_del_init(&bpage->list);
347 free_buffer_page(page); 352 free_buffer_page(bpage);
348 } 353 }
349 return -ENOMEM; 354 return -ENOMEM;
350} 355}
@@ -353,7 +358,7 @@ static struct ring_buffer_per_cpu *
353rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) 358rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
354{ 359{
355 struct ring_buffer_per_cpu *cpu_buffer; 360 struct ring_buffer_per_cpu *cpu_buffer;
356 struct buffer_page *page; 361 struct buffer_page *bpage;
357 unsigned long addr; 362 unsigned long addr;
358 int ret; 363 int ret;
359 364
@@ -368,16 +373,17 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
368 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 373 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
369 INIT_LIST_HEAD(&cpu_buffer->pages); 374 INIT_LIST_HEAD(&cpu_buffer->pages);
370 375
371 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), 376 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
372 GFP_KERNEL, cpu_to_node(cpu)); 377 GFP_KERNEL, cpu_to_node(cpu));
373 if (!page) 378 if (!bpage)
374 goto fail_free_buffer; 379 goto fail_free_buffer;
375 380
376 cpu_buffer->reader_page = page; 381 cpu_buffer->reader_page = bpage;
377 addr = __get_free_page(GFP_KERNEL); 382 addr = __get_free_page(GFP_KERNEL);
378 if (!addr) 383 if (!addr)
379 goto fail_free_reader; 384 goto fail_free_reader;
380 page->page = (void *)addr; 385 bpage->page = (void *)addr;
386 rb_init_page(bpage->page);
381 387
382 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 388 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
383 389
@@ -402,14 +408,14 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
402static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 408static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
403{ 409{
404 struct list_head *head = &cpu_buffer->pages; 410 struct list_head *head = &cpu_buffer->pages;
405 struct buffer_page *page, *tmp; 411 struct buffer_page *bpage, *tmp;
406 412
407 list_del_init(&cpu_buffer->reader_page->list); 413 list_del_init(&cpu_buffer->reader_page->list);
408 free_buffer_page(cpu_buffer->reader_page); 414 free_buffer_page(cpu_buffer->reader_page);
409 415
410 list_for_each_entry_safe(page, tmp, head, list) { 416 list_for_each_entry_safe(bpage, tmp, head, list) {
411 list_del_init(&page->list); 417 list_del_init(&bpage->list);
412 free_buffer_page(page); 418 free_buffer_page(bpage);
413 } 419 }
414 kfree(cpu_buffer); 420 kfree(cpu_buffer);
415} 421}
@@ -506,7 +512,7 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
506static void 512static void
507rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) 513rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
508{ 514{
509 struct buffer_page *page; 515 struct buffer_page *bpage;
510 struct list_head *p; 516 struct list_head *p;
511 unsigned i; 517 unsigned i;
512 518
@@ -517,9 +523,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
517 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) 523 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
518 return; 524 return;
519 p = cpu_buffer->pages.next; 525 p = cpu_buffer->pages.next;
520 page = list_entry(p, struct buffer_page, list); 526 bpage = list_entry(p, struct buffer_page, list);
521 list_del_init(&page->list); 527 list_del_init(&bpage->list);
522 free_buffer_page(page); 528 free_buffer_page(bpage);
523 } 529 }
524 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) 530 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
525 return; 531 return;
@@ -536,7 +542,7 @@ static void
536rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, 542rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
537 struct list_head *pages, unsigned nr_pages) 543 struct list_head *pages, unsigned nr_pages)
538{ 544{
539 struct buffer_page *page; 545 struct buffer_page *bpage;
540 struct list_head *p; 546 struct list_head *p;
541 unsigned i; 547 unsigned i;
542 548
@@ -547,9 +553,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
547 if (RB_WARN_ON(cpu_buffer, list_empty(pages))) 553 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
548 return; 554 return;
549 p = pages->next; 555 p = pages->next;
550 page = list_entry(p, struct buffer_page, list); 556 bpage = list_entry(p, struct buffer_page, list);
551 list_del_init(&page->list); 557 list_del_init(&bpage->list);
552 list_add_tail(&page->list, &cpu_buffer->pages); 558 list_add_tail(&bpage->list, &cpu_buffer->pages);
553 } 559 }
554 rb_reset_cpu(cpu_buffer); 560 rb_reset_cpu(cpu_buffer);
555 561
@@ -576,7 +582,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
576{ 582{
577 struct ring_buffer_per_cpu *cpu_buffer; 583 struct ring_buffer_per_cpu *cpu_buffer;
578 unsigned nr_pages, rm_pages, new_pages; 584 unsigned nr_pages, rm_pages, new_pages;
579 struct buffer_page *page, *tmp; 585 struct buffer_page *bpage, *tmp;
580 unsigned long buffer_size; 586 unsigned long buffer_size;
581 unsigned long addr; 587 unsigned long addr;
582 LIST_HEAD(pages); 588 LIST_HEAD(pages);
@@ -637,16 +643,17 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
637 643
638 for_each_buffer_cpu(buffer, cpu) { 644 for_each_buffer_cpu(buffer, cpu) {
639 for (i = 0; i < new_pages; i++) { 645 for (i = 0; i < new_pages; i++) {
640 page = kzalloc_node(ALIGN(sizeof(*page), 646 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
641 cache_line_size()), 647 cache_line_size()),
642 GFP_KERNEL, cpu_to_node(cpu)); 648 GFP_KERNEL, cpu_to_node(cpu));
643 if (!page) 649 if (!bpage)
644 goto free_pages; 650 goto free_pages;
645 list_add(&page->list, &pages); 651 list_add(&bpage->list, &pages);
646 addr = __get_free_page(GFP_KERNEL); 652 addr = __get_free_page(GFP_KERNEL);
647 if (!addr) 653 if (!addr)
648 goto free_pages; 654 goto free_pages;
649 page->page = (void *)addr; 655 bpage->page = (void *)addr;
656 rb_init_page(bpage->page);
650 } 657 }
651 } 658 }
652 659
@@ -667,9 +674,9 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
667 return size; 674 return size;
668 675
669 free_pages: 676 free_pages:
670 list_for_each_entry_safe(page, tmp, &pages, list) { 677 list_for_each_entry_safe(bpage, tmp, &pages, list) {
671 list_del_init(&page->list); 678 list_del_init(&bpage->list);
672 free_buffer_page(page); 679 free_buffer_page(bpage);
673 } 680 }
674 mutex_unlock(&buffer->mutex); 681 mutex_unlock(&buffer->mutex);
675 return -ENOMEM; 682 return -ENOMEM;
@@ -680,9 +687,15 @@ static inline int rb_null_event(struct ring_buffer_event *event)
680 return event->type == RINGBUF_TYPE_PADDING; 687 return event->type == RINGBUF_TYPE_PADDING;
681} 688}
682 689
683static inline void *__rb_page_index(struct buffer_page *page, unsigned index) 690static inline void *
691__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
684{ 692{
685 return page->page + index; 693 return bpage->data + index;
694}
695
696static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
697{
698 return bpage->page->data + index;
686} 699}
687 700
688static inline struct ring_buffer_event * 701static inline struct ring_buffer_event *
@@ -712,7 +725,7 @@ static inline unsigned rb_page_write(struct buffer_page *bpage)
712 725
713static inline unsigned rb_page_commit(struct buffer_page *bpage) 726static inline unsigned rb_page_commit(struct buffer_page *bpage)
714{ 727{
715 return local_read(&bpage->commit); 728 return local_read(&bpage->page->commit);
716} 729}
717 730
718/* Size is determined by what has been commited */ 731/* Size is determined by what has been commited */
@@ -758,14 +771,14 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
758} 771}
759 772
760static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, 773static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
761 struct buffer_page **page) 774 struct buffer_page **bpage)
762{ 775{
763 struct list_head *p = (*page)->list.next; 776 struct list_head *p = (*bpage)->list.next;
764 777
765 if (p == &cpu_buffer->pages) 778 if (p == &cpu_buffer->pages)
766 p = p->next; 779 p = p->next;
767 780
768 *page = list_entry(p, struct buffer_page, list); 781 *bpage = list_entry(p, struct buffer_page, list);
769} 782}
770 783
771static inline unsigned 784static inline unsigned
@@ -804,14 +817,15 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
804 if (RB_WARN_ON(cpu_buffer, 817 if (RB_WARN_ON(cpu_buffer,
805 cpu_buffer->commit_page == cpu_buffer->tail_page)) 818 cpu_buffer->commit_page == cpu_buffer->tail_page))
806 return; 819 return;
807 cpu_buffer->commit_page->commit = 820 cpu_buffer->commit_page->page->commit =
808 cpu_buffer->commit_page->write; 821 cpu_buffer->commit_page->write;
809 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 822 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
810 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; 823 cpu_buffer->write_stamp =
824 cpu_buffer->commit_page->page->time_stamp;
811 } 825 }
812 826
813 /* Now set the commit to the event's index */ 827 /* Now set the commit to the event's index */
814 local_set(&cpu_buffer->commit_page->commit, index); 828 local_set(&cpu_buffer->commit_page->page->commit, index);
815} 829}
816 830
817static inline void 831static inline void
@@ -826,16 +840,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
826 * assign the commit to the tail. 840 * assign the commit to the tail.
827 */ 841 */
828 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 842 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
829 cpu_buffer->commit_page->commit = 843 cpu_buffer->commit_page->page->commit =
830 cpu_buffer->commit_page->write; 844 cpu_buffer->commit_page->write;
831 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 845 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
832 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; 846 cpu_buffer->write_stamp =
847 cpu_buffer->commit_page->page->time_stamp;
833 /* add barrier to keep gcc from optimizing too much */ 848 /* add barrier to keep gcc from optimizing too much */
834 barrier(); 849 barrier();
835 } 850 }
836 while (rb_commit_index(cpu_buffer) != 851 while (rb_commit_index(cpu_buffer) !=
837 rb_page_write(cpu_buffer->commit_page)) { 852 rb_page_write(cpu_buffer->commit_page)) {
838 cpu_buffer->commit_page->commit = 853 cpu_buffer->commit_page->page->commit =
839 cpu_buffer->commit_page->write; 854 cpu_buffer->commit_page->write;
840 barrier(); 855 barrier();
841 } 856 }
@@ -843,7 +858,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
843 858
844static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 859static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
845{ 860{
846 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp; 861 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
847 cpu_buffer->reader_page->read = 0; 862 cpu_buffer->reader_page->read = 0;
848} 863}
849 864
@@ -862,7 +877,7 @@ static inline void rb_inc_iter(struct ring_buffer_iter *iter)
862 else 877 else
863 rb_inc_page(cpu_buffer, &iter->head_page); 878 rb_inc_page(cpu_buffer, &iter->head_page);
864 879
865 iter->read_stamp = iter->head_page->time_stamp; 880 iter->read_stamp = iter->head_page->page->time_stamp;
866 iter->head = 0; 881 iter->head = 0;
867} 882}
868 883
@@ -998,12 +1013,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
998 */ 1013 */
999 if (tail_page == cpu_buffer->tail_page) { 1014 if (tail_page == cpu_buffer->tail_page) {
1000 local_set(&next_page->write, 0); 1015 local_set(&next_page->write, 0);
1001 local_set(&next_page->commit, 0); 1016 local_set(&next_page->page->commit, 0);
1002 cpu_buffer->tail_page = next_page; 1017 cpu_buffer->tail_page = next_page;
1003 1018
1004 /* reread the time stamp */ 1019 /* reread the time stamp */
1005 *ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1020 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1006 cpu_buffer->tail_page->time_stamp = *ts; 1021 cpu_buffer->tail_page->page->time_stamp = *ts;
1007 } 1022 }
1008 1023
1009 /* 1024 /*
@@ -1048,7 +1063,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1048 * this page's time stamp. 1063 * this page's time stamp.
1049 */ 1064 */
1050 if (!tail && rb_is_commit(cpu_buffer, event)) 1065 if (!tail && rb_is_commit(cpu_buffer, event))
1051 cpu_buffer->commit_page->time_stamp = *ts; 1066 cpu_buffer->commit_page->page->time_stamp = *ts;
1052 1067
1053 return event; 1068 return event;
1054 1069
@@ -1099,7 +1114,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1099 event->time_delta = *delta & TS_MASK; 1114 event->time_delta = *delta & TS_MASK;
1100 event->array[0] = *delta >> TS_SHIFT; 1115 event->array[0] = *delta >> TS_SHIFT;
1101 } else { 1116 } else {
1102 cpu_buffer->commit_page->time_stamp = *ts; 1117 cpu_buffer->commit_page->page->time_stamp = *ts;
1103 event->time_delta = 0; 1118 event->time_delta = 0;
1104 event->array[0] = 0; 1119 event->array[0] = 0;
1105 } 1120 }
@@ -1552,7 +1567,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
1552 if (iter->head) 1567 if (iter->head)
1553 iter->read_stamp = cpu_buffer->read_stamp; 1568 iter->read_stamp = cpu_buffer->read_stamp;
1554 else 1569 else
1555 iter->read_stamp = iter->head_page->time_stamp; 1570 iter->read_stamp = iter->head_page->page->time_stamp;
1556} 1571}
1557 1572
1558/** 1573/**
@@ -1696,7 +1711,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1696 cpu_buffer->reader_page->list.prev = reader->list.prev; 1711 cpu_buffer->reader_page->list.prev = reader->list.prev;
1697 1712
1698 local_set(&cpu_buffer->reader_page->write, 0); 1713 local_set(&cpu_buffer->reader_page->write, 0);
1699 local_set(&cpu_buffer->reader_page->commit, 0); 1714 local_set(&cpu_buffer->reader_page->page->commit, 0);
1700 1715
1701 /* Make the reader page now replace the head */ 1716 /* Make the reader page now replace the head */
1702 reader->list.prev->next = &cpu_buffer->reader_page->list; 1717 reader->list.prev->next = &cpu_buffer->reader_page->list;
@@ -2088,7 +2103,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2088 cpu_buffer->head_page 2103 cpu_buffer->head_page
2089 = list_entry(cpu_buffer->pages.next, struct buffer_page, list); 2104 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2090 local_set(&cpu_buffer->head_page->write, 0); 2105 local_set(&cpu_buffer->head_page->write, 0);
2091 local_set(&cpu_buffer->head_page->commit, 0); 2106 local_set(&cpu_buffer->head_page->page->commit, 0);
2092 2107
2093 cpu_buffer->head_page->read = 0; 2108 cpu_buffer->head_page->read = 0;
2094 2109
@@ -2097,7 +2112,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2097 2112
2098 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 2113 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2099 local_set(&cpu_buffer->reader_page->write, 0); 2114 local_set(&cpu_buffer->reader_page->write, 0);
2100 local_set(&cpu_buffer->reader_page->commit, 0); 2115 local_set(&cpu_buffer->reader_page->page->commit, 0);
2101 cpu_buffer->reader_page->read = 0; 2116 cpu_buffer->reader_page->read = 0;
2102 2117
2103 cpu_buffer->overrun = 0; 2118 cpu_buffer->overrun = 0;
@@ -2223,6 +2238,166 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2223 return 0; 2238 return 0;
2224} 2239}
2225 2240
2241static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2242 struct buffer_data_page *bpage)
2243{
2244 struct ring_buffer_event *event;
2245 unsigned long head;
2246
2247 __raw_spin_lock(&cpu_buffer->lock);
2248 for (head = 0; head < local_read(&bpage->commit);
2249 head += rb_event_length(event)) {
2250
2251 event = __rb_data_page_index(bpage, head);
2252 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2253 return;
2254 /* Only count data entries */
2255 if (event->type != RINGBUF_TYPE_DATA)
2256 continue;
2257 cpu_buffer->entries--;
2258 }
2259 __raw_spin_unlock(&cpu_buffer->lock);
2260}
2261
2262/**
2263 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2264 * @buffer: the buffer to allocate for.
2265 *
2266 * This function is used in conjunction with ring_buffer_read_page.
2267 * When reading a full page from the ring buffer, these functions
2268 * can be used to speed up the process. The calling function should
2269 * allocate a few pages first with this function. Then when it
2270 * needs to get pages from the ring buffer, it passes the result
2271 * of this function into ring_buffer_read_page, which will swap
2272 * the page that was allocated, with the read page of the buffer.
2273 *
2274 * Returns:
2275 * The page allocated, or NULL on error.
2276 */
2277void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2278{
2279 unsigned long addr;
2280 struct buffer_data_page *bpage;
2281
2282 addr = __get_free_page(GFP_KERNEL);
2283 if (!addr)
2284 return NULL;
2285
2286 bpage = (void *)addr;
2287
2288 return bpage;
2289}
2290
2291/**
2292 * ring_buffer_free_read_page - free an allocated read page
2293 * @buffer: the buffer the page was allocate for
2294 * @data: the page to free
2295 *
2296 * Free a page allocated from ring_buffer_alloc_read_page.
2297 */
2298void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2299{
2300 free_page((unsigned long)data);
2301}
2302
2303/**
2304 * ring_buffer_read_page - extract a page from the ring buffer
2305 * @buffer: buffer to extract from
2306 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2307 * @cpu: the cpu of the buffer to extract
2308 * @full: should the extraction only happen when the page is full.
2309 *
2310 * This function will pull out a page from the ring buffer and consume it.
2311 * @data_page must be the address of the variable that was returned
2312 * from ring_buffer_alloc_read_page. This is because the page might be used
2313 * to swap with a page in the ring buffer.
2314 *
2315 * for example:
2316 * rpage = ring_buffer_alloc_page(buffer);
2317 * if (!rpage)
2318 * return error;
2319 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
2320 * if (ret)
2321 * process_page(rpage);
2322 *
2323 * When @full is set, the function will not return true unless
2324 * the writer is off the reader page.
2325 *
2326 * Note: it is up to the calling functions to handle sleeps and wakeups.
2327 * The ring buffer can be used anywhere in the kernel and can not
2328 * blindly call wake_up. The layer that uses the ring buffer must be
2329 * responsible for that.
2330 *
2331 * Returns:
2332 * 1 if data has been transferred
2333 * 0 if no data has been transferred.
2334 */
2335int ring_buffer_read_page(struct ring_buffer *buffer,
2336 void **data_page, int cpu, int full)
2337{
2338 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2339 struct ring_buffer_event *event;
2340 struct buffer_data_page *bpage;
2341 unsigned long flags;
2342 int ret = 0;
2343
2344 if (!data_page)
2345 return 0;
2346
2347 bpage = *data_page;
2348 if (!bpage)
2349 return 0;
2350
2351 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2352
2353 /*
2354 * rb_buffer_peek will get the next ring buffer if
2355 * the current reader page is empty.
2356 */
2357 event = rb_buffer_peek(buffer, cpu, NULL);
2358 if (!event)
2359 goto out;
2360
2361 /* check for data */
2362 if (!local_read(&cpu_buffer->reader_page->page->commit))
2363 goto out;
2364 /*
2365 * If the writer is already off of the read page, then simply
2366 * switch the read page with the given page. Otherwise
2367 * we need to copy the data from the reader to the writer.
2368 */
2369 if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
2370 unsigned int read = cpu_buffer->reader_page->read;
2371
2372 if (full)
2373 goto out;
2374 /* The writer is still on the reader page, we must copy */
2375 bpage = cpu_buffer->reader_page->page;
2376 memcpy(bpage->data,
2377 cpu_buffer->reader_page->page->data + read,
2378 local_read(&bpage->commit) - read);
2379
2380 /* consume what was read */
2381 cpu_buffer->reader_page += read;
2382
2383 } else {
2384 /* swap the pages */
2385 rb_init_page(bpage);
2386 bpage = cpu_buffer->reader_page->page;
2387 cpu_buffer->reader_page->page = *data_page;
2388 cpu_buffer->reader_page->read = 0;
2389 *data_page = bpage;
2390 }
2391 ret = 1;
2392
2393 /* update the entry counter */
2394 rb_remove_entries(cpu_buffer, bpage);
2395 out:
2396 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2397
2398 return ret;
2399}
2400
2226static ssize_t 2401static ssize_t
2227rb_simple_read(struct file *filp, char __user *ubuf, 2402rb_simple_read(struct file *filp, char __user *ubuf,
2228 size_t cnt, loff_t *ppos) 2403 size_t cnt, loff_t *ppos)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 91887a280ab9..8b6409a62b54 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1200,7 +1200,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1200} 1200}
1201 1201
1202#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1202#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1203void trace_graph_entry(struct ftrace_graph_ent *trace) 1203int trace_graph_entry(struct ftrace_graph_ent *trace)
1204{ 1204{
1205 struct trace_array *tr = &global_trace; 1205 struct trace_array *tr = &global_trace;
1206 struct trace_array_cpu *data; 1206 struct trace_array_cpu *data;
@@ -1209,7 +1209,7 @@ void trace_graph_entry(struct ftrace_graph_ent *trace)
1209 int cpu; 1209 int cpu;
1210 int pc; 1210 int pc;
1211 1211
1212 raw_local_irq_save(flags); 1212 local_irq_save(flags);
1213 cpu = raw_smp_processor_id(); 1213 cpu = raw_smp_processor_id();
1214 data = tr->data[cpu]; 1214 data = tr->data[cpu];
1215 disabled = atomic_inc_return(&data->disabled); 1215 disabled = atomic_inc_return(&data->disabled);
@@ -1218,7 +1218,9 @@ void trace_graph_entry(struct ftrace_graph_ent *trace)
1218 __trace_graph_entry(tr, data, trace, flags, pc); 1218 __trace_graph_entry(tr, data, trace, flags, pc);
1219 } 1219 }
1220 atomic_dec(&data->disabled); 1220 atomic_dec(&data->disabled);
1221 raw_local_irq_restore(flags); 1221 local_irq_restore(flags);
1222
1223 return 1;
1222} 1224}
1223 1225
1224void trace_graph_return(struct ftrace_graph_ret *trace) 1226void trace_graph_return(struct ftrace_graph_ret *trace)
@@ -1230,7 +1232,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
1230 int cpu; 1232 int cpu;
1231 int pc; 1233 int pc;
1232 1234
1233 raw_local_irq_save(flags); 1235 local_irq_save(flags);
1234 cpu = raw_smp_processor_id(); 1236 cpu = raw_smp_processor_id();
1235 data = tr->data[cpu]; 1237 data = tr->data[cpu];
1236 disabled = atomic_inc_return(&data->disabled); 1238 disabled = atomic_inc_return(&data->disabled);
@@ -1239,7 +1241,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
1239 __trace_graph_return(tr, data, trace, flags, pc); 1241 __trace_graph_return(tr, data, trace, flags, pc);
1240 } 1242 }
1241 atomic_dec(&data->disabled); 1243 atomic_dec(&data->disabled);
1242 raw_local_irq_restore(flags); 1244 local_irq_restore(flags);
1243} 1245}
1244#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1246#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1245 1247
@@ -2645,7 +2647,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2645 if (err) 2647 if (err)
2646 goto err_unlock; 2648 goto err_unlock;
2647 2649
2648 raw_local_irq_disable(); 2650 local_irq_disable();
2649 __raw_spin_lock(&ftrace_max_lock); 2651 __raw_spin_lock(&ftrace_max_lock);
2650 for_each_tracing_cpu(cpu) { 2652 for_each_tracing_cpu(cpu) {
2651 /* 2653 /*
@@ -2662,7 +2664,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2662 } 2664 }
2663 } 2665 }
2664 __raw_spin_unlock(&ftrace_max_lock); 2666 __raw_spin_unlock(&ftrace_max_lock);
2665 raw_local_irq_enable(); 2667 local_irq_enable();
2666 2668
2667 tracing_cpumask = tracing_cpumask_new; 2669 tracing_cpumask = tracing_cpumask_new;
2668 2670
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f96f4e787ff3..0565ae9a2210 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -412,7 +412,7 @@ void trace_function(struct trace_array *tr,
412 unsigned long flags, int pc); 412 unsigned long flags, int pc);
413 413
414void trace_graph_return(struct ftrace_graph_ret *trace); 414void trace_graph_return(struct ftrace_graph_ret *trace);
415void trace_graph_entry(struct ftrace_graph_ent *trace); 415int trace_graph_entry(struct ftrace_graph_ent *trace);
416void trace_bts(struct trace_array *tr, 416void trace_bts(struct trace_array *tr,
417 unsigned long from, 417 unsigned long from,
418 unsigned long to); 418 unsigned long to);
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index bc972753568d..6c00feb3bac7 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -42,7 +42,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
42 if (unlikely(!tr)) 42 if (unlikely(!tr))
43 return; 43 return;
44 44
45 raw_local_irq_save(flags); 45 local_irq_save(flags);
46 cpu = raw_smp_processor_id(); 46 cpu = raw_smp_processor_id();
47 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) 47 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
48 goto out; 48 goto out;
@@ -74,7 +74,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
74 74
75 out: 75 out:
76 atomic_dec(&tr->data[cpu]->disabled); 76 atomic_dec(&tr->data[cpu]->disabled);
77 raw_local_irq_restore(flags); 77 local_irq_restore(flags);
78} 78}
79 79
80static inline 80static inline
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 894b50bca313..c66578f2fdc2 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -19,6 +19,7 @@
19#define TRACE_GRAPH_PRINT_OVERRUN 0x1 19#define TRACE_GRAPH_PRINT_OVERRUN 0x1
20#define TRACE_GRAPH_PRINT_CPU 0x2 20#define TRACE_GRAPH_PRINT_CPU 0x2
21#define TRACE_GRAPH_PRINT_OVERHEAD 0x4 21#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
22#define TRACE_GRAPH_PRINT_PROC 0x8
22 23
23static struct tracer_opt trace_opts[] = { 24static struct tracer_opt trace_opts[] = {
24 /* Display overruns ? */ 25 /* Display overruns ? */
@@ -27,11 +28,13 @@ static struct tracer_opt trace_opts[] = {
27 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, 28 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
28 /* Display Overhead ? */ 29 /* Display Overhead ? */
29 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, 30 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
31 /* Display proc name/pid */
32 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
30 { } /* Empty entry */ 33 { } /* Empty entry */
31}; 34};
32 35
33static struct tracer_flags tracer_flags = { 36static struct tracer_flags tracer_flags = {
34 /* Don't display overruns by default */ 37 /* Don't display overruns and proc by default */
35 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD, 38 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
36 .opts = trace_opts 39 .opts = trace_opts
37}; 40};
@@ -104,23 +107,63 @@ print_graph_cpu(struct trace_seq *s, int cpu)
104 return TRACE_TYPE_HANDLED; 107 return TRACE_TYPE_HANDLED;
105} 108}
106 109
110#define TRACE_GRAPH_PROCINFO_LENGTH 14
111
112static enum print_line_t
113print_graph_proc(struct trace_seq *s, pid_t pid)
114{
115 int i;
116 int ret;
117 int len;
118 char comm[8];
119 int spaces = 0;
120 /* sign + log10(MAX_INT) + '\0' */
121 char pid_str[11];
122
123 strncpy(comm, trace_find_cmdline(pid), 7);
124 comm[7] = '\0';
125 sprintf(pid_str, "%d", pid);
126
127 /* 1 stands for the "-" character */
128 len = strlen(comm) + strlen(pid_str) + 1;
129
130 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
131 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
132
133 /* First spaces to align center */
134 for (i = 0; i < spaces / 2; i++) {
135 ret = trace_seq_printf(s, " ");
136 if (!ret)
137 return TRACE_TYPE_PARTIAL_LINE;
138 }
139
140 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
141 if (!ret)
142 return TRACE_TYPE_PARTIAL_LINE;
143
144 /* Last spaces to align center */
145 for (i = 0; i < spaces - (spaces / 2); i++) {
146 ret = trace_seq_printf(s, " ");
147 if (!ret)
148 return TRACE_TYPE_PARTIAL_LINE;
149 }
150 return TRACE_TYPE_HANDLED;
151}
152
107 153
108/* If the pid changed since the last trace, output this event */ 154/* If the pid changed since the last trace, output this event */
109static int verif_pid(struct trace_seq *s, pid_t pid, int cpu) 155static enum print_line_t
156verif_pid(struct trace_seq *s, pid_t pid, int cpu)
110{ 157{
111 char *comm, *prev_comm;
112 pid_t prev_pid; 158 pid_t prev_pid;
113 int ret; 159 int ret;
114 160
115 if (last_pid[cpu] != -1 && last_pid[cpu] == pid) 161 if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
116 return 1; 162 return TRACE_TYPE_HANDLED;
117 163
118 prev_pid = last_pid[cpu]; 164 prev_pid = last_pid[cpu];
119 last_pid[cpu] = pid; 165 last_pid[cpu] = pid;
120 166
121 comm = trace_find_cmdline(pid);
122 prev_comm = trace_find_cmdline(prev_pid);
123
124/* 167/*
125 * Context-switch trace line: 168 * Context-switch trace line:
126 169
@@ -130,11 +173,31 @@ static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
130 173
131 */ 174 */
132 ret = trace_seq_printf(s, 175 ret = trace_seq_printf(s,
133 " ------------------------------------------\n"); 176 "\n ------------------------------------------\n |");
134 ret += trace_seq_printf(s, " | %d) %s-%d => %s-%d\n", 177 if (!ret)
135 cpu, prev_comm, prev_pid, comm, pid); 178 TRACE_TYPE_PARTIAL_LINE;
136 ret += trace_seq_printf(s, 179
137 " ------------------------------------------\n\n"); 180 ret = print_graph_cpu(s, cpu);
181 if (ret == TRACE_TYPE_PARTIAL_LINE)
182 TRACE_TYPE_PARTIAL_LINE;
183
184 ret = print_graph_proc(s, prev_pid);
185 if (ret == TRACE_TYPE_PARTIAL_LINE)
186 TRACE_TYPE_PARTIAL_LINE;
187
188 ret = trace_seq_printf(s, " => ");
189 if (!ret)
190 TRACE_TYPE_PARTIAL_LINE;
191
192 ret = print_graph_proc(s, pid);
193 if (ret == TRACE_TYPE_PARTIAL_LINE)
194 TRACE_TYPE_PARTIAL_LINE;
195
196 ret = trace_seq_printf(s,
197 "\n ------------------------------------------\n\n");
198 if (!ret)
199 TRACE_TYPE_PARTIAL_LINE;
200
138 return ret; 201 return ret;
139} 202}
140 203
@@ -169,11 +232,50 @@ trace_branch_is_leaf(struct trace_iterator *iter,
169} 232}
170 233
171 234
172static inline int 235static enum print_line_t
173print_graph_duration(unsigned long long duration, struct trace_seq *s) 236print_graph_duration(unsigned long long duration, struct trace_seq *s)
174{ 237{
175 unsigned long nsecs_rem = do_div(duration, 1000); 238 unsigned long nsecs_rem = do_div(duration, 1000);
176 return trace_seq_printf(s, "%4llu.%3lu us | ", duration, nsecs_rem); 239 /* log10(ULONG_MAX) + '\0' */
240 char msecs_str[21];
241 char nsecs_str[5];
242 int ret, len;
243 int i;
244
245 sprintf(msecs_str, "%lu", (unsigned long) duration);
246
247 /* Print msecs */
248 ret = trace_seq_printf(s, msecs_str);
249 if (!ret)
250 return TRACE_TYPE_PARTIAL_LINE;
251
252 len = strlen(msecs_str);
253
254 /* Print nsecs (we don't want to exceed 7 numbers) */
255 if (len < 7) {
256 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
257 ret = trace_seq_printf(s, ".%s", nsecs_str);
258 if (!ret)
259 return TRACE_TYPE_PARTIAL_LINE;
260 len += strlen(nsecs_str);
261 }
262
263 ret = trace_seq_printf(s, " us ");
264 if (!ret)
265 return TRACE_TYPE_PARTIAL_LINE;
266
267 /* Print remaining spaces to fit the row's width */
268 for (i = len; i < 7; i++) {
269 ret = trace_seq_printf(s, " ");
270 if (!ret)
271 return TRACE_TYPE_PARTIAL_LINE;
272 }
273
274 ret = trace_seq_printf(s, "| ");
275 if (!ret)
276 return TRACE_TYPE_PARTIAL_LINE;
277 return TRACE_TYPE_HANDLED;
278
177} 279}
178 280
179/* Signal a overhead of time execution to the output */ 281/* Signal a overhead of time execution to the output */
@@ -210,10 +312,6 @@ print_graph_entry_leaf(struct trace_iterator *iter,
210 call = &entry->graph_ent; 312 call = &entry->graph_ent;
211 duration = graph_ret->rettime - graph_ret->calltime; 313 duration = graph_ret->rettime - graph_ret->calltime;
212 314
213 /* Must not exceed 8 characters: 9999.999 us */
214 if (duration > 10000000ULL)
215 duration = 9999999ULL;
216
217 /* Overhead */ 315 /* Overhead */
218 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 316 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
219 ret = print_graph_overhead(duration, s); 317 ret = print_graph_overhead(duration, s);
@@ -223,7 +321,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
223 321
224 /* Duration */ 322 /* Duration */
225 ret = print_graph_duration(duration, s); 323 ret = print_graph_duration(duration, s);
226 if (!ret) 324 if (ret == TRACE_TYPE_PARTIAL_LINE)
227 return TRACE_TYPE_PARTIAL_LINE; 325 return TRACE_TYPE_PARTIAL_LINE;
228 326
229 /* Function */ 327 /* Function */
@@ -288,12 +386,23 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
288 struct trace_entry *ent = iter->ent; 386 struct trace_entry *ent = iter->ent;
289 387
290 /* Pid */ 388 /* Pid */
291 if (!verif_pid(s, ent->pid, cpu)) 389 if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
292 return TRACE_TYPE_PARTIAL_LINE; 390 return TRACE_TYPE_PARTIAL_LINE;
293 391
294 /* Cpu */ 392 /* Cpu */
295 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 393 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
296 ret = print_graph_cpu(s, cpu); 394 ret = print_graph_cpu(s, cpu);
395 if (ret == TRACE_TYPE_PARTIAL_LINE)
396 return TRACE_TYPE_PARTIAL_LINE;
397 }
398
399 /* Proc */
400 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
401 ret = print_graph_proc(s, ent->pid);
402 if (ret == TRACE_TYPE_PARTIAL_LINE)
403 return TRACE_TYPE_PARTIAL_LINE;
404
405 ret = trace_seq_printf(s, " | ");
297 if (!ret) 406 if (!ret)
298 return TRACE_TYPE_PARTIAL_LINE; 407 return TRACE_TYPE_PARTIAL_LINE;
299 } 408 }
@@ -313,17 +422,24 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
313 int ret; 422 int ret;
314 unsigned long long duration = trace->rettime - trace->calltime; 423 unsigned long long duration = trace->rettime - trace->calltime;
315 424
316 /* Must not exceed 8 characters: xxxx.yyy us */
317 if (duration > 10000000ULL)
318 duration = 9999999ULL;
319
320 /* Pid */ 425 /* Pid */
321 if (!verif_pid(s, ent->pid, cpu)) 426 if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
322 return TRACE_TYPE_PARTIAL_LINE; 427 return TRACE_TYPE_PARTIAL_LINE;
323 428
324 /* Cpu */ 429 /* Cpu */
325 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 430 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
326 ret = print_graph_cpu(s, cpu); 431 ret = print_graph_cpu(s, cpu);
432 if (ret == TRACE_TYPE_PARTIAL_LINE)
433 return TRACE_TYPE_PARTIAL_LINE;
434 }
435
436 /* Proc */
437 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
438 ret = print_graph_proc(s, ent->pid);
439 if (ret == TRACE_TYPE_PARTIAL_LINE)
440 return TRACE_TYPE_PARTIAL_LINE;
441
442 ret = trace_seq_printf(s, " | ");
327 if (!ret) 443 if (!ret)
328 return TRACE_TYPE_PARTIAL_LINE; 444 return TRACE_TYPE_PARTIAL_LINE;
329 } 445 }
@@ -337,7 +453,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
337 453
338 /* Duration */ 454 /* Duration */
339 ret = print_graph_duration(duration, s); 455 ret = print_graph_duration(duration, s);
340 if (!ret) 456 if (ret == TRACE_TYPE_PARTIAL_LINE)
341 return TRACE_TYPE_PARTIAL_LINE; 457 return TRACE_TYPE_PARTIAL_LINE;
342 458
343 /* Closing brace */ 459 /* Closing brace */
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index fde3be15c642..0b863f2cbc8e 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -48,7 +48,7 @@ static inline void check_stack(void)
48 if (!object_is_on_stack(&this_size)) 48 if (!object_is_on_stack(&this_size))
49 return; 49 return;
50 50
51 raw_local_irq_save(flags); 51 local_irq_save(flags);
52 __raw_spin_lock(&max_stack_lock); 52 __raw_spin_lock(&max_stack_lock);
53 53
54 /* a race could have already updated it */ 54 /* a race could have already updated it */
@@ -78,6 +78,7 @@ static inline void check_stack(void)
78 * on a new max, so it is far from a fast path. 78 * on a new max, so it is far from a fast path.
79 */ 79 */
80 while (i < max_stack_trace.nr_entries) { 80 while (i < max_stack_trace.nr_entries) {
81 int found = 0;
81 82
82 stack_dump_index[i] = this_size; 83 stack_dump_index[i] = this_size;
83 p = start; 84 p = start;
@@ -86,17 +87,19 @@ static inline void check_stack(void)
86 if (*p == stack_dump_trace[i]) { 87 if (*p == stack_dump_trace[i]) {
87 this_size = stack_dump_index[i++] = 88 this_size = stack_dump_index[i++] =
88 (top - p) * sizeof(unsigned long); 89 (top - p) * sizeof(unsigned long);
90 found = 1;
89 /* Start the search from here */ 91 /* Start the search from here */
90 start = p + 1; 92 start = p + 1;
91 } 93 }
92 } 94 }
93 95
94 i++; 96 if (!found)
97 i++;
95 } 98 }
96 99
97 out: 100 out:
98 __raw_spin_unlock(&max_stack_lock); 101 __raw_spin_unlock(&max_stack_lock);
99 raw_local_irq_restore(flags); 102 local_irq_restore(flags);
100} 103}
101 104
102static void 105static void
@@ -162,11 +165,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
162 if (ret < 0) 165 if (ret < 0)
163 return ret; 166 return ret;
164 167
165 raw_local_irq_save(flags); 168 local_irq_save(flags);
166 __raw_spin_lock(&max_stack_lock); 169 __raw_spin_lock(&max_stack_lock);
167 *ptr = val; 170 *ptr = val;
168 __raw_spin_unlock(&max_stack_lock); 171 __raw_spin_unlock(&max_stack_lock);
169 raw_local_irq_restore(flags); 172 local_irq_restore(flags);
170 173
171 return count; 174 return count;
172} 175}