aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup_freezer.c51
-rw-r--r--kernel/freezer.c20
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/resource.c6
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/signal.c3
-rw-r--r--kernel/trace/Kconfig4
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/ring_buffer.c56
-rw-r--r--kernel/trace/trace.c48
-rw-r--r--kernel/trace/trace.h20
12 files changed, 144 insertions, 78 deletions
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index e9505695449..7fa476f01d0 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -162,9 +162,13 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
162 struct task_struct *task) 162 struct task_struct *task)
163{ 163{
164 struct freezer *freezer; 164 struct freezer *freezer;
165 int retval;
166 165
167 /* Anything frozen can't move or be moved to/from */ 166 /*
167 * Anything frozen can't move or be moved to/from.
168 *
169 * Since orig_freezer->state == FROZEN means that @task has been
170 * frozen, so it's sufficient to check the latter condition.
171 */
168 172
169 if (is_task_frozen_enough(task)) 173 if (is_task_frozen_enough(task))
170 return -EBUSY; 174 return -EBUSY;
@@ -173,13 +177,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
173 if (freezer->state == CGROUP_FROZEN) 177 if (freezer->state == CGROUP_FROZEN)
174 return -EBUSY; 178 return -EBUSY;
175 179
176 retval = 0; 180 return 0;
177 task_lock(task);
178 freezer = task_freezer(task);
179 if (freezer->state == CGROUP_FROZEN)
180 retval = -EBUSY;
181 task_unlock(task);
182 return retval;
183} 181}
184 182
185static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) 183static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
@@ -190,8 +188,9 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
190 freezer = task_freezer(task); 188 freezer = task_freezer(task);
191 task_unlock(task); 189 task_unlock(task);
192 190
193 BUG_ON(freezer->state == CGROUP_FROZEN);
194 spin_lock_irq(&freezer->lock); 191 spin_lock_irq(&freezer->lock);
192 BUG_ON(freezer->state == CGROUP_FROZEN);
193
195 /* Locking avoids race with FREEZING -> THAWED transitions. */ 194 /* Locking avoids race with FREEZING -> THAWED transitions. */
196 if (freezer->state == CGROUP_FREEZING) 195 if (freezer->state == CGROUP_FREEZING)
197 freeze_task(task, true); 196 freeze_task(task, true);
@@ -276,25 +275,18 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
276 return num_cant_freeze_now ? -EBUSY : 0; 275 return num_cant_freeze_now ? -EBUSY : 0;
277} 276}
278 277
279static int unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer) 278static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
280{ 279{
281 struct cgroup_iter it; 280 struct cgroup_iter it;
282 struct task_struct *task; 281 struct task_struct *task;
283 282
284 cgroup_iter_start(cgroup, &it); 283 cgroup_iter_start(cgroup, &it);
285 while ((task = cgroup_iter_next(cgroup, &it))) { 284 while ((task = cgroup_iter_next(cgroup, &it))) {
286 int do_wake; 285 thaw_process(task);
287
288 task_lock(task);
289 do_wake = __thaw_process(task);
290 task_unlock(task);
291 if (do_wake)
292 wake_up_process(task);
293 } 286 }
294 cgroup_iter_end(cgroup, &it); 287 cgroup_iter_end(cgroup, &it);
295 freezer->state = CGROUP_THAWED;
296 288
297 return 0; 289 freezer->state = CGROUP_THAWED;
298} 290}
299 291
300static int freezer_change_state(struct cgroup *cgroup, 292static int freezer_change_state(struct cgroup *cgroup,
@@ -304,27 +296,22 @@ static int freezer_change_state(struct cgroup *cgroup,
304 int retval = 0; 296 int retval = 0;
305 297
306 freezer = cgroup_freezer(cgroup); 298 freezer = cgroup_freezer(cgroup);
299
307 spin_lock_irq(&freezer->lock); 300 spin_lock_irq(&freezer->lock);
301
308 update_freezer_state(cgroup, freezer); 302 update_freezer_state(cgroup, freezer);
309 if (goal_state == freezer->state) 303 if (goal_state == freezer->state)
310 goto out; 304 goto out;
311 switch (freezer->state) { 305
306 switch (goal_state) {
312 case CGROUP_THAWED: 307 case CGROUP_THAWED:
313 retval = try_to_freeze_cgroup(cgroup, freezer); 308 unfreeze_cgroup(cgroup, freezer);
314 break; 309 break;
315 case CGROUP_FREEZING:
316 if (goal_state == CGROUP_FROZEN) {
317 /* Userspace is retrying after
318 * "/bin/echo FROZEN > freezer.state" returned -EBUSY */
319 retval = try_to_freeze_cgroup(cgroup, freezer);
320 break;
321 }
322 /* state == FREEZING and goal_state == THAWED, so unfreeze */
323 case CGROUP_FROZEN: 310 case CGROUP_FROZEN:
324 retval = unfreeze_cgroup(cgroup, freezer); 311 retval = try_to_freeze_cgroup(cgroup, freezer);
325 break; 312 break;
326 default: 313 default:
327 break; 314 BUG();
328 } 315 }
329out: 316out:
330 spin_unlock_irq(&freezer->lock); 317 spin_unlock_irq(&freezer->lock);
diff --git a/kernel/freezer.c b/kernel/freezer.c
index ba6248b323e..2f4936cf708 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -121,16 +121,7 @@ void cancel_freezing(struct task_struct *p)
121 } 121 }
122} 122}
123 123
124/* 124static int __thaw_process(struct task_struct *p)
125 * Wake up a frozen process
126 *
127 * task_lock() is needed to prevent the race with refrigerator() which may
128 * occur if the freezing of tasks fails. Namely, without the lock, if the
129 * freezing of tasks failed, thaw_tasks() might have run before a task in
130 * refrigerator() could call frozen_process(), in which case the task would be
131 * frozen and no one would thaw it.
132 */
133int __thaw_process(struct task_struct *p)
134{ 125{
135 if (frozen(p)) { 126 if (frozen(p)) {
136 p->flags &= ~PF_FROZEN; 127 p->flags &= ~PF_FROZEN;
@@ -140,6 +131,15 @@ int __thaw_process(struct task_struct *p)
140 return 0; 131 return 0;
141} 132}
142 133
134/*
135 * Wake up a frozen process
136 *
137 * task_lock() is needed to prevent the race with refrigerator() which may
138 * occur if the freezing of tasks fails. Namely, without the lock, if the
139 * freezing of tasks failed, thaw_tasks() might have run before a task in
140 * refrigerator() could call frozen_process(), in which case the task would be
141 * frozen and no one would thaw it.
142 */
143int thaw_process(struct task_struct *p) 143int thaw_process(struct task_struct *p)
144{ 144{
145 task_lock(p); 145 task_lock(p);
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index dcd165f92a8..23bd4daeb96 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -96,7 +96,7 @@ config SUSPEND
96 96
97config PM_TEST_SUSPEND 97config PM_TEST_SUSPEND
98 bool "Test suspend/resume and wakealarm during bootup" 98 bool "Test suspend/resume and wakealarm during bootup"
99 depends on SUSPEND && PM_DEBUG && RTC_LIB=y 99 depends on SUSPEND && PM_DEBUG && RTC_CLASS=y
100 ---help--- 100 ---help---
101 This option will let you suspend your machine during bootup, and 101 This option will let you suspend your machine during bootup, and
102 make it wake up a few seconds later using an RTC wakeup alarm. 102 make it wake up a few seconds later using an RTC wakeup alarm.
diff --git a/kernel/profile.c b/kernel/profile.c
index a9e422df6bf..9830a037d8d 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -102,7 +102,7 @@ int profile_setup(char *str)
102__setup("profile=", profile_setup); 102__setup("profile=", profile_setup);
103 103
104 104
105int profile_init(void) 105int __ref profile_init(void)
106{ 106{
107 int buffer_bytes; 107 int buffer_bytes;
108 if (!prof_on) 108 if (!prof_on)
diff --git a/kernel/resource.c b/kernel/resource.c
index 7fec0e42723..4337063663e 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -17,6 +17,7 @@
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <linux/device.h> 19#include <linux/device.h>
20#include <linux/pfn.h>
20#include <asm/io.h> 21#include <asm/io.h>
21 22
22 23
@@ -522,7 +523,7 @@ static void __init __reserve_region_with_split(struct resource *root,
522{ 523{
523 struct resource *parent = root; 524 struct resource *parent = root;
524 struct resource *conflict; 525 struct resource *conflict;
525 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); 526 struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
526 527
527 if (!res) 528 if (!res)
528 return; 529 return;
@@ -849,7 +850,8 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
849 continue; 850 continue;
850 if (p->end < addr) 851 if (p->end < addr)
851 continue; 852 continue;
852 if (p->start <= addr && (p->end >= addr + size - 1)) 853 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
854 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
853 continue; 855 continue;
854 printk(KERN_WARNING "resource map sanity check conflict: " 856 printk(KERN_WARNING "resource map sanity check conflict: "
855 "0x%llx 0x%llx 0x%llx 0x%llx %s\n", 857 "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index ad958c1ec70..5ae17762ec3 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -319,7 +319,7 @@ static int __init init_sched_debug_procfs(void)
319{ 319{
320 struct proc_dir_entry *pe; 320 struct proc_dir_entry *pe;
321 321
322 pe = proc_create("sched_debug", 0644, NULL, &sched_debug_fops); 322 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
323 if (!pe) 323 if (!pe)
324 return -ENOMEM; 324 return -ENOMEM;
325 return 0; 325 return 0;
diff --git a/kernel/signal.c b/kernel/signal.c
index 105217da5c8..4530fc65445 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1144,7 +1144,8 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1144 struct task_struct * p; 1144 struct task_struct * p;
1145 1145
1146 for_each_process(p) { 1146 for_each_process(p) {
1147 if (p->pid > 1 && !same_thread_group(p, current)) { 1147 if (task_pid_vnr(p) > 1 &&
1148 !same_thread_group(p, current)) {
1148 int err = group_send_sig_info(sig, info, p); 1149 int err = group_send_sig_info(sig, info, p);
1149 ++count; 1150 ++count;
1150 if (err != -EPERM) 1151 if (err != -EPERM)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index e0cea282e0c..33dbefd471e 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -8,7 +8,6 @@ config NOP_TRACER
8 8
9config HAVE_FUNCTION_TRACER 9config HAVE_FUNCTION_TRACER
10 bool 10 bool
11 select NOP_TRACER
12 11
13config HAVE_DYNAMIC_FTRACE 12config HAVE_DYNAMIC_FTRACE
14 bool 13 bool
@@ -26,8 +25,9 @@ config TRACING
26 bool 25 bool
27 select DEBUG_FS 26 select DEBUG_FS
28 select RING_BUFFER 27 select RING_BUFFER
29 select STACKTRACE 28 select STACKTRACE if STACKTRACE_SUPPORT
30 select TRACEPOINTS 29 select TRACEPOINTS
30 select NOP_TRACER
31 31
32menu "Tracers" 32menu "Tracers"
33 33
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7618c528756..4a39d24568c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1339,6 +1339,14 @@ void __init ftrace_init(void)
1339} 1339}
1340 1340
1341#else 1341#else
1342
1343static int __init ftrace_nodyn_init(void)
1344{
1345 ftrace_enabled = 1;
1346 return 0;
1347}
1348device_initcall(ftrace_nodyn_init);
1349
1342# define ftrace_startup() do { } while (0) 1350# define ftrace_startup() do { } while (0)
1343# define ftrace_shutdown() do { } while (0) 1351# define ftrace_shutdown() do { } while (0)
1344# define ftrace_startup_sysctl() do { } while (0) 1352# define ftrace_startup_sysctl() do { } while (0)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index cedf4e26828..3f338063864 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1022,8 +1022,23 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1022 struct ring_buffer_event *event; 1022 struct ring_buffer_event *event;
1023 u64 ts, delta; 1023 u64 ts, delta;
1024 int commit = 0; 1024 int commit = 0;
1025 int nr_loops = 0;
1025 1026
1026 again: 1027 again:
1028 /*
1029 * We allow for interrupts to reenter here and do a trace.
1030 * If one does, it will cause this original code to loop
1031 * back here. Even with heavy interrupts happening, this
1032 * should only happen a few times in a row. If this happens
1033 * 1000 times in a row, there must be either an interrupt
1034 * storm or we have something buggy.
1035 * Bail!
1036 */
1037 if (unlikely(++nr_loops > 1000)) {
1038 RB_WARN_ON(cpu_buffer, 1);
1039 return NULL;
1040 }
1041
1027 ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1042 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1028 1043
1029 /* 1044 /*
@@ -1532,10 +1547,23 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1532{ 1547{
1533 struct buffer_page *reader = NULL; 1548 struct buffer_page *reader = NULL;
1534 unsigned long flags; 1549 unsigned long flags;
1550 int nr_loops = 0;
1535 1551
1536 spin_lock_irqsave(&cpu_buffer->lock, flags); 1552 spin_lock_irqsave(&cpu_buffer->lock, flags);
1537 1553
1538 again: 1554 again:
1555 /*
1556 * This should normally only loop twice. But because the
1557 * start of the reader inserts an empty page, it causes
1558 * a case where we will loop three times. There should be no
1559 * reason to loop four times (that I know of).
1560 */
1561 if (unlikely(++nr_loops > 3)) {
1562 RB_WARN_ON(cpu_buffer, 1);
1563 reader = NULL;
1564 goto out;
1565 }
1566
1539 reader = cpu_buffer->reader_page; 1567 reader = cpu_buffer->reader_page;
1540 1568
1541 /* If there's more to read, return this page */ 1569 /* If there's more to read, return this page */
@@ -1665,6 +1693,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1665 struct ring_buffer_per_cpu *cpu_buffer; 1693 struct ring_buffer_per_cpu *cpu_buffer;
1666 struct ring_buffer_event *event; 1694 struct ring_buffer_event *event;
1667 struct buffer_page *reader; 1695 struct buffer_page *reader;
1696 int nr_loops = 0;
1668 1697
1669 if (!cpu_isset(cpu, buffer->cpumask)) 1698 if (!cpu_isset(cpu, buffer->cpumask))
1670 return NULL; 1699 return NULL;
@@ -1672,6 +1701,19 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1672 cpu_buffer = buffer->buffers[cpu]; 1701 cpu_buffer = buffer->buffers[cpu];
1673 1702
1674 again: 1703 again:
1704 /*
1705 * We repeat when a timestamp is encountered. It is possible
1706 * to get multiple timestamps from an interrupt entering just
1707 * as one timestamp is about to be written. The max times
1708 * that this can happen is the number of nested interrupts we
1709 * can have. Nesting 10 deep of interrupts is clearly
1710 * an anomaly.
1711 */
1712 if (unlikely(++nr_loops > 10)) {
1713 RB_WARN_ON(cpu_buffer, 1);
1714 return NULL;
1715 }
1716
1675 reader = rb_get_reader_page(cpu_buffer); 1717 reader = rb_get_reader_page(cpu_buffer);
1676 if (!reader) 1718 if (!reader)
1677 return NULL; 1719 return NULL;
@@ -1722,6 +1764,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1722 struct ring_buffer *buffer; 1764 struct ring_buffer *buffer;
1723 struct ring_buffer_per_cpu *cpu_buffer; 1765 struct ring_buffer_per_cpu *cpu_buffer;
1724 struct ring_buffer_event *event; 1766 struct ring_buffer_event *event;
1767 int nr_loops = 0;
1725 1768
1726 if (ring_buffer_iter_empty(iter)) 1769 if (ring_buffer_iter_empty(iter))
1727 return NULL; 1770 return NULL;
@@ -1730,6 +1773,19 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1730 buffer = cpu_buffer->buffer; 1773 buffer = cpu_buffer->buffer;
1731 1774
1732 again: 1775 again:
1776 /*
1777 * We repeat when a timestamp is encountered. It is possible
1778 * to get multiple timestamps from an interrupt entering just
1779 * as one timestamp is about to be written. The max times
1780 * that this can happen is the number of nested interrupts we
1781 * can have. Nesting 10 deep of interrupts is clearly
1782 * an anomaly.
1783 */
1784 if (unlikely(++nr_loops > 10)) {
1785 RB_WARN_ON(cpu_buffer, 1);
1786 return NULL;
1787 }
1788
1733 if (rb_per_cpu_empty(cpu_buffer)) 1789 if (rb_per_cpu_empty(cpu_buffer))
1734 return NULL; 1790 return NULL;
1735 1791
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a610ca77155..9f3b478f917 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -656,7 +656,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
656 entry->preempt_count = pc & 0xff; 656 entry->preempt_count = pc & 0xff;
657 entry->pid = (tsk) ? tsk->pid : 0; 657 entry->pid = (tsk) ? tsk->pid : 0;
658 entry->flags = 658 entry->flags =
659#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
659 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 660 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
661#else
662 TRACE_FLAG_IRQS_NOSUPPORT |
663#endif
660 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 664 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
661 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 665 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
662 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 666 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
@@ -701,6 +705,7 @@ static void ftrace_trace_stack(struct trace_array *tr,
701 unsigned long flags, 705 unsigned long flags,
702 int skip, int pc) 706 int skip, int pc)
703{ 707{
708#ifdef CONFIG_STACKTRACE
704 struct ring_buffer_event *event; 709 struct ring_buffer_event *event;
705 struct stack_entry *entry; 710 struct stack_entry *entry;
706 struct stack_trace trace; 711 struct stack_trace trace;
@@ -726,6 +731,7 @@ static void ftrace_trace_stack(struct trace_array *tr,
726 731
727 save_stack_trace(&trace); 732 save_stack_trace(&trace);
728 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 733 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
734#endif
729} 735}
730 736
731void __trace_stack(struct trace_array *tr, 737void __trace_stack(struct trace_array *tr,
@@ -1082,17 +1088,20 @@ static void s_stop(struct seq_file *m, void *p)
1082 mutex_unlock(&trace_types_lock); 1088 mutex_unlock(&trace_types_lock);
1083} 1089}
1084 1090
1085#define KRETPROBE_MSG "[unknown/kretprobe'd]"
1086
1087#ifdef CONFIG_KRETPROBES 1091#ifdef CONFIG_KRETPROBES
1088static inline int kretprobed(unsigned long addr) 1092static inline const char *kretprobed(const char *name)
1089{ 1093{
1090 return addr == (unsigned long)kretprobe_trampoline; 1094 static const char tramp_name[] = "kretprobe_trampoline";
1095 int size = sizeof(tramp_name);
1096
1097 if (strncmp(tramp_name, name, size) == 0)
1098 return "[unknown/kretprobe'd]";
1099 return name;
1091} 1100}
1092#else 1101#else
1093static inline int kretprobed(unsigned long addr) 1102static inline const char *kretprobed(const char *name)
1094{ 1103{
1095 return 0; 1104 return name;
1096} 1105}
1097#endif /* CONFIG_KRETPROBES */ 1106#endif /* CONFIG_KRETPROBES */
1098 1107
@@ -1101,10 +1110,13 @@ seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
1101{ 1110{
1102#ifdef CONFIG_KALLSYMS 1111#ifdef CONFIG_KALLSYMS
1103 char str[KSYM_SYMBOL_LEN]; 1112 char str[KSYM_SYMBOL_LEN];
1113 const char *name;
1104 1114
1105 kallsyms_lookup(address, NULL, NULL, NULL, str); 1115 kallsyms_lookup(address, NULL, NULL, NULL, str);
1106 1116
1107 return trace_seq_printf(s, fmt, str); 1117 name = kretprobed(str);
1118
1119 return trace_seq_printf(s, fmt, name);
1108#endif 1120#endif
1109 return 1; 1121 return 1;
1110} 1122}
@@ -1115,9 +1127,12 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1115{ 1127{
1116#ifdef CONFIG_KALLSYMS 1128#ifdef CONFIG_KALLSYMS
1117 char str[KSYM_SYMBOL_LEN]; 1129 char str[KSYM_SYMBOL_LEN];
1130 const char *name;
1118 1131
1119 sprint_symbol(str, address); 1132 sprint_symbol(str, address);
1120 return trace_seq_printf(s, fmt, str); 1133 name = kretprobed(str);
1134
1135 return trace_seq_printf(s, fmt, name);
1121#endif 1136#endif
1122 return 1; 1137 return 1;
1123} 1138}
@@ -1244,7 +1259,8 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1244 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); 1259 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1245 trace_seq_printf(s, "%3d", cpu); 1260 trace_seq_printf(s, "%3d", cpu);
1246 trace_seq_printf(s, "%c%c", 1261 trace_seq_printf(s, "%c%c",
1247 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', 1262 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
1263 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
1248 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); 1264 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
1249 1265
1250 hardirq = entry->flags & TRACE_FLAG_HARDIRQ; 1266 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
@@ -1370,10 +1386,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1370 1386
1371 seq_print_ip_sym(s, field->ip, sym_flags); 1387 seq_print_ip_sym(s, field->ip, sym_flags);
1372 trace_seq_puts(s, " ("); 1388 trace_seq_puts(s, " (");
1373 if (kretprobed(field->parent_ip)) 1389 seq_print_ip_sym(s, field->parent_ip, sym_flags);
1374 trace_seq_puts(s, KRETPROBE_MSG);
1375 else
1376 seq_print_ip_sym(s, field->parent_ip, sym_flags);
1377 trace_seq_puts(s, ")\n"); 1390 trace_seq_puts(s, ")\n");
1378 break; 1391 break;
1379 } 1392 }
@@ -1489,12 +1502,9 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1489 ret = trace_seq_printf(s, " <-"); 1502 ret = trace_seq_printf(s, " <-");
1490 if (!ret) 1503 if (!ret)
1491 return TRACE_TYPE_PARTIAL_LINE; 1504 return TRACE_TYPE_PARTIAL_LINE;
1492 if (kretprobed(field->parent_ip)) 1505 ret = seq_print_ip_sym(s,
1493 ret = trace_seq_puts(s, KRETPROBE_MSG); 1506 field->parent_ip,
1494 else 1507 sym_flags);
1495 ret = seq_print_ip_sym(s,
1496 field->parent_ip,
1497 sym_flags);
1498 if (!ret) 1508 if (!ret)
1499 return TRACE_TYPE_PARTIAL_LINE; 1509 return TRACE_TYPE_PARTIAL_LINE;
1500 } 1510 }
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6889ca48f1f..8465ad05270 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -120,18 +120,20 @@ struct trace_boot {
120/* 120/*
121 * trace_flag_type is an enumeration that holds different 121 * trace_flag_type is an enumeration that holds different
122 * states when a trace occurs. These are: 122 * states when a trace occurs. These are:
123 * IRQS_OFF - interrupts were disabled 123 * IRQS_OFF - interrupts were disabled
124 * NEED_RESCED - reschedule is requested 124 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
125 * HARDIRQ - inside an interrupt handler 125 * NEED_RESCED - reschedule is requested
126 * SOFTIRQ - inside a softirq handler 126 * HARDIRQ - inside an interrupt handler
127 * CONT - multiple entries hold the trace item 127 * SOFTIRQ - inside a softirq handler
128 * CONT - multiple entries hold the trace item
128 */ 129 */
129enum trace_flag_type { 130enum trace_flag_type {
130 TRACE_FLAG_IRQS_OFF = 0x01, 131 TRACE_FLAG_IRQS_OFF = 0x01,
131 TRACE_FLAG_NEED_RESCHED = 0x02, 132 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
132 TRACE_FLAG_HARDIRQ = 0x04, 133 TRACE_FLAG_NEED_RESCHED = 0x04,
133 TRACE_FLAG_SOFTIRQ = 0x08, 134 TRACE_FLAG_HARDIRQ = 0x08,
134 TRACE_FLAG_CONT = 0x10, 135 TRACE_FLAG_SOFTIRQ = 0x10,
136 TRACE_FLAG_CONT = 0x20,
135}; 137};
136 138
137#define TRACE_BUF_SIZE 1024 139#define TRACE_BUF_SIZE 1024