aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-11-26 09:07:02 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-26 09:07:02 -0500
commit6c869e772c72d509d0db243a56c205ef48a29baf (patch)
tree9a290f1742526a8816f94560cb09bc0a09c910de
parente4e91ac410356da3a518188f371e9d3b52ee38ee (diff)
parentee6dcfa40a50fe12a3ae0fb4d2653c66c3ed6556 (diff)
Merge branch 'perf/urgent' into perf/core
Conflicts: arch/x86/kernel/apic/hw_nmi.c Merge reason: Resolve conflict, queue up dependent patch. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event.c20
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/hw_breakpoint.c4
-rw-r--r--include/linux/hw_breakpoint.h4
-rw-r--r--include/linux/perf_event.h30
-rw-r--r--kernel/hw_breakpoint.c3
-rw-r--r--kernel/irq_work.c4
-rw-r--r--kernel/perf_event.c93
-rw-r--r--tools/perf/builtin-record.c17
-rw-r--r--tools/perf/util/symbol.c4
12 files changed, 144 insertions, 46 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e8327686d3c5..e330da21b84f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -21,7 +21,7 @@ config X86
21 select HAVE_UNSTABLE_SCHED_CLOCK 21 select HAVE_UNSTABLE_SCHED_CLOCK
22 select HAVE_IDE 22 select HAVE_IDE
23 select HAVE_OPROFILE 23 select HAVE_OPROFILE
24 select HAVE_PERF_EVENTS if (!M386 && !M486) 24 select HAVE_PERF_EVENTS
25 select HAVE_IRQ_WORK 25 select HAVE_IRQ_WORK
26 select HAVE_IOREMAP_PROT 26 select HAVE_IOREMAP_PROT
27 select HAVE_KPROBES 27 select HAVE_KPROBES
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index 3e25afe9a62a..a0e71cb4fa9c 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -17,9 +17,6 @@
17#include <linux/nmi.h> 17#include <linux/nmi.h>
18#include <linux/module.h> 18#include <linux/module.h>
19 19
20/* For reliability, we're prepared to waste bits here. */
21static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
22
23#ifdef CONFIG_HARDLOCKUP_DETECTOR 20#ifdef CONFIG_HARDLOCKUP_DETECTOR
24u64 hw_nmi_get_sample_period(void) 21u64 hw_nmi_get_sample_period(void)
25{ 22{
@@ -27,6 +24,10 @@ u64 hw_nmi_get_sample_period(void)
27} 24}
28#endif 25#endif
29 26
27
28/* For reliability, we're prepared to waste bits here. */
29static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
30
30#ifdef arch_trigger_all_cpu_backtrace 31#ifdef arch_trigger_all_cpu_backtrace
31void arch_trigger_all_cpu_backtrace(void) 32void arch_trigger_all_cpu_backtrace(void)
32{ 33{
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 5273c7b90b8b..7c1a4c35fd41 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -372,6 +372,20 @@ static void release_pmc_hardware(void) {}
372 372
373#endif 373#endif
374 374
375static bool check_hw_exists(void)
376{
377 u64 val, val_new = 0;
378 int ret = 0;
379
380 val = 0xabcdUL;
381 ret |= checking_wrmsrl(x86_pmu.perfctr, val);
382 ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
383 if (ret || val != val_new)
384 return false;
385
386 return true;
387}
388
375static void reserve_ds_buffers(void); 389static void reserve_ds_buffers(void);
376static void release_ds_buffers(void); 390static void release_ds_buffers(void);
377 391
@@ -1363,6 +1377,12 @@ void __init init_hw_perf_events(void)
1363 1377
1364 pmu_check_apic(); 1378 pmu_check_apic();
1365 1379
1380 /* sanity check that the hardware exists or is emulated */
1381 if (!check_hw_exists()) {
1382 pr_cont("Broken PMU hardware detected, software events only.\n");
1383 return;
1384 }
1385
1366 pr_cont("%s PMU driver.\n", x86_pmu.name); 1386 pr_cont("%s PMU driver.\n", x86_pmu.name);
1367 1387
1368 if (x86_pmu.quirks) 1388 if (x86_pmu.quirks)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index fe2690d71c0c..e3ba417e8697 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -295,6 +295,7 @@ ENDPROC(native_usergs_sysret64)
295 .endm 295 .endm
296 296
297/* save partial stack frame */ 297/* save partial stack frame */
298 .pushsection .kprobes.text, "ax"
298ENTRY(save_args) 299ENTRY(save_args)
299 XCPT_FRAME 300 XCPT_FRAME
300 cld 301 cld
@@ -334,6 +335,7 @@ ENTRY(save_args)
334 ret 335 ret
335 CFI_ENDPROC 336 CFI_ENDPROC
336END(save_args) 337END(save_args)
338 .popsection
337 339
338ENTRY(save_rest) 340ENTRY(save_rest)
339 PARTIAL_FRAME 1 REST_SKIP+8 341 PARTIAL_FRAME 1 REST_SKIP+8
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index ff15c9dcc25d..42c594254507 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -433,6 +433,10 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
433 dr6_p = (unsigned long *)ERR_PTR(args->err); 433 dr6_p = (unsigned long *)ERR_PTR(args->err);
434 dr6 = *dr6_p; 434 dr6 = *dr6_p;
435 435
436 /* If it's a single step, TRAP bits are random */
437 if (dr6 & DR_STEP)
438 return NOTIFY_DONE;
439
436 /* Do an early return if no trap bits are set in DR6 */ 440 /* Do an early return if no trap bits are set in DR6 */
437 if ((dr6 & DR_TRAP_BITS) == 0) 441 if ((dr6 & DR_TRAP_BITS) == 0)
438 return NOTIFY_DONE; 442 return NOTIFY_DONE;
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index a2d6ea49ec56..d1e55fed2c7d 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -33,6 +33,8 @@ enum bp_type_idx {
33 33
34#ifdef CONFIG_HAVE_HW_BREAKPOINT 34#ifdef CONFIG_HAVE_HW_BREAKPOINT
35 35
36extern int __init init_hw_breakpoint(void);
37
36static inline void hw_breakpoint_init(struct perf_event_attr *attr) 38static inline void hw_breakpoint_init(struct perf_event_attr *attr)
37{ 39{
38 memset(attr, 0, sizeof(*attr)); 40 memset(attr, 0, sizeof(*attr));
@@ -108,6 +110,8 @@ static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
108 110
109#else /* !CONFIG_HAVE_HW_BREAKPOINT */ 111#else /* !CONFIG_HAVE_HW_BREAKPOINT */
110 112
113static inline int __init init_hw_breakpoint(void) { return 0; }
114
111static inline struct perf_event * 115static inline struct perf_event *
112register_user_hw_breakpoint(struct perf_event_attr *attr, 116register_user_hw_breakpoint(struct perf_event_attr *attr,
113 perf_overflow_handler_t triggered, 117 perf_overflow_handler_t triggered,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 40150f345982..de2c41758e29 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -850,6 +850,7 @@ struct perf_event_context {
850 int nr_active; 850 int nr_active;
851 int is_active; 851 int is_active;
852 int nr_stat; 852 int nr_stat;
853 int rotate_disable;
853 atomic_t refcount; 854 atomic_t refcount;
854 struct task_struct *task; 855 struct task_struct *task;
855 856
@@ -908,20 +909,6 @@ extern int perf_num_counters(void);
908extern const char *perf_pmu_name(void); 909extern const char *perf_pmu_name(void);
909extern void __perf_event_task_sched_in(struct task_struct *task); 910extern void __perf_event_task_sched_in(struct task_struct *task);
910extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); 911extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
911
912extern atomic_t perf_task_events;
913
914static inline void perf_event_task_sched_in(struct task_struct *task)
915{
916 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
917}
918
919static inline
920void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
921{
922 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
923}
924
925extern int perf_event_init_task(struct task_struct *child); 912extern int perf_event_init_task(struct task_struct *child);
926extern void perf_event_exit_task(struct task_struct *child); 913extern void perf_event_exit_task(struct task_struct *child);
927extern void perf_event_free_task(struct task_struct *task); 914extern void perf_event_free_task(struct task_struct *task);
@@ -1030,6 +1017,21 @@ have_event:
1030 __perf_sw_event(event_id, nr, nmi, regs, addr); 1017 __perf_sw_event(event_id, nr, nmi, regs, addr);
1031} 1018}
1032 1019
1020extern atomic_t perf_task_events;
1021
1022static inline void perf_event_task_sched_in(struct task_struct *task)
1023{
1024 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
1025}
1026
1027static inline
1028void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
1029{
1030 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1031
1032 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
1033}
1034
1033extern void perf_event_mmap(struct vm_area_struct *vma); 1035extern void perf_event_mmap(struct vm_area_struct *vma);
1034extern struct perf_guest_info_callbacks *perf_guest_cbs; 1036extern struct perf_guest_info_callbacks *perf_guest_cbs;
1035extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 1037extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 2c9120f0afca..e5325825aeb6 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -620,7 +620,7 @@ static struct pmu perf_breakpoint = {
620 .read = hw_breakpoint_pmu_read, 620 .read = hw_breakpoint_pmu_read,
621}; 621};
622 622
623static int __init init_hw_breakpoint(void) 623int __init init_hw_breakpoint(void)
624{ 624{
625 unsigned int **task_bp_pinned; 625 unsigned int **task_bp_pinned;
626 int cpu, err_cpu; 626 int cpu, err_cpu;
@@ -655,6 +655,5 @@ static int __init init_hw_breakpoint(void)
655 655
656 return -ENOMEM; 656 return -ENOMEM;
657} 657}
658core_initcall(init_hw_breakpoint);
659 658
660 659
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index f16763ff8481..90f881904bb1 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -145,7 +145,9 @@ void irq_work_run(void)
145 * Clear the BUSY bit and return to the free state if 145 * Clear the BUSY bit and return to the free state if
146 * no-one else claimed it meanwhile. 146 * no-one else claimed it meanwhile.
147 */ 147 */
148 cmpxchg(&entry->next, next_flags(NULL, IRQ_WORK_BUSY), NULL); 148 (void)cmpxchg(&entry->next,
149 next_flags(NULL, IRQ_WORK_BUSY),
150 NULL);
149 } 151 }
150} 152}
151EXPORT_SYMBOL_GPL(irq_work_run); 153EXPORT_SYMBOL_GPL(irq_work_run);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 40c3aab648a1..43f757ccf831 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -31,6 +31,7 @@
31#include <linux/kernel_stat.h> 31#include <linux/kernel_stat.h>
32#include <linux/perf_event.h> 32#include <linux/perf_event.h>
33#include <linux/ftrace_event.h> 33#include <linux/ftrace_event.h>
34#include <linux/hw_breakpoint.h>
34 35
35#include <asm/irq_regs.h> 36#include <asm/irq_regs.h>
36 37
@@ -1286,8 +1287,6 @@ void __perf_event_task_sched_out(struct task_struct *task,
1286{ 1287{
1287 int ctxn; 1288 int ctxn;
1288 1289
1289 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1290
1291 for_each_task_context_nr(ctxn) 1290 for_each_task_context_nr(ctxn)
1292 perf_event_context_sched_out(task, ctxn, next); 1291 perf_event_context_sched_out(task, ctxn, next);
1293} 1292}
@@ -1621,8 +1620,12 @@ static void rotate_ctx(struct perf_event_context *ctx)
1621{ 1620{
1622 raw_spin_lock(&ctx->lock); 1621 raw_spin_lock(&ctx->lock);
1623 1622
1624 /* Rotate the first entry last of non-pinned groups */ 1623 /*
1625 list_rotate_left(&ctx->flexible_groups); 1624 * Rotate the first entry last of non-pinned groups. Rotation might be
1625 * disabled by the inheritance code.
1626 */
1627 if (!ctx->rotate_disable)
1628 list_rotate_left(&ctx->flexible_groups);
1626 1629
1627 raw_spin_unlock(&ctx->lock); 1630 raw_spin_unlock(&ctx->lock);
1628} 1631}
@@ -2234,11 +2237,6 @@ int perf_event_release_kernel(struct perf_event *event)
2234 raw_spin_unlock_irq(&ctx->lock); 2237 raw_spin_unlock_irq(&ctx->lock);
2235 mutex_unlock(&ctx->mutex); 2238 mutex_unlock(&ctx->mutex);
2236 2239
2237 mutex_lock(&event->owner->perf_event_mutex);
2238 list_del_init(&event->owner_entry);
2239 mutex_unlock(&event->owner->perf_event_mutex);
2240 put_task_struct(event->owner);
2241
2242 free_event(event); 2240 free_event(event);
2243 2241
2244 return 0; 2242 return 0;
@@ -2251,9 +2249,43 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2251static int perf_release(struct inode *inode, struct file *file) 2249static int perf_release(struct inode *inode, struct file *file)
2252{ 2250{
2253 struct perf_event *event = file->private_data; 2251 struct perf_event *event = file->private_data;
2252 struct task_struct *owner;
2254 2253
2255 file->private_data = NULL; 2254 file->private_data = NULL;
2256 2255
2256 rcu_read_lock();
2257 owner = ACCESS_ONCE(event->owner);
2258 /*
2259 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2260 * !owner it means the list deletion is complete and we can indeed
2261 * free this event, otherwise we need to serialize on
2262 * owner->perf_event_mutex.
2263 */
2264 smp_read_barrier_depends();
2265 if (owner) {
2266 /*
2267 * Since delayed_put_task_struct() also drops the last
2268 * task reference we can safely take a new reference
2269 * while holding the rcu_read_lock().
2270 */
2271 get_task_struct(owner);
2272 }
2273 rcu_read_unlock();
2274
2275 if (owner) {
2276 mutex_lock(&owner->perf_event_mutex);
2277 /*
2278 * We have to re-check the event->owner field, if it is cleared
2279 * we raced with perf_event_exit_task(), acquiring the mutex
2280 * ensured they're done, and we can proceed with freeing the
2281 * event.
2282 */
2283 if (event->owner)
2284 list_del_init(&event->owner_entry);
2285 mutex_unlock(&owner->perf_event_mutex);
2286 put_task_struct(owner);
2287 }
2288
2257 return perf_event_release_kernel(event); 2289 return perf_event_release_kernel(event);
2258} 2290}
2259 2291
@@ -5668,7 +5700,7 @@ SYSCALL_DEFINE5(perf_event_open,
5668 mutex_unlock(&ctx->mutex); 5700 mutex_unlock(&ctx->mutex);
5669 5701
5670 event->owner = current; 5702 event->owner = current;
5671 get_task_struct(current); 5703
5672 mutex_lock(&current->perf_event_mutex); 5704 mutex_lock(&current->perf_event_mutex);
5673 list_add_tail(&event->owner_entry, &current->perf_event_list); 5705 list_add_tail(&event->owner_entry, &current->perf_event_list);
5674 mutex_unlock(&current->perf_event_mutex); 5706 mutex_unlock(&current->perf_event_mutex);
@@ -5736,12 +5768,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
5736 ++ctx->generation; 5768 ++ctx->generation;
5737 mutex_unlock(&ctx->mutex); 5769 mutex_unlock(&ctx->mutex);
5738 5770
5739 event->owner = current;
5740 get_task_struct(current);
5741 mutex_lock(&current->perf_event_mutex);
5742 list_add_tail(&event->owner_entry, &current->perf_event_list);
5743 mutex_unlock(&current->perf_event_mutex);
5744
5745 return event; 5771 return event;
5746 5772
5747err_free: 5773err_free:
@@ -5892,8 +5918,24 @@ again:
5892 */ 5918 */
5893void perf_event_exit_task(struct task_struct *child) 5919void perf_event_exit_task(struct task_struct *child)
5894{ 5920{
5921 struct perf_event *event, *tmp;
5895 int ctxn; 5922 int ctxn;
5896 5923
5924 mutex_lock(&child->perf_event_mutex);
5925 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
5926 owner_entry) {
5927 list_del_init(&event->owner_entry);
5928
5929 /*
5930 * Ensure the list deletion is visible before we clear
5931 * the owner, closes a race against perf_release() where
5932 * we need to serialize on the owner->perf_event_mutex.
5933 */
5934 smp_wmb();
5935 event->owner = NULL;
5936 }
5937 mutex_unlock(&child->perf_event_mutex);
5938
5897 for_each_task_context_nr(ctxn) 5939 for_each_task_context_nr(ctxn)
5898 perf_event_exit_task_context(child, ctxn); 5940 perf_event_exit_task_context(child, ctxn);
5899} 5941}
@@ -6113,6 +6155,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
6113 struct perf_event *event; 6155 struct perf_event *event;
6114 struct task_struct *parent = current; 6156 struct task_struct *parent = current;
6115 int inherited_all = 1; 6157 int inherited_all = 1;
6158 unsigned long flags;
6116 int ret = 0; 6159 int ret = 0;
6117 6160
6118 child->perf_event_ctxp[ctxn] = NULL; 6161 child->perf_event_ctxp[ctxn] = NULL;
@@ -6153,6 +6196,15 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
6153 break; 6196 break;
6154 } 6197 }
6155 6198
6199 /*
6200 * We can't hold ctx->lock when iterating the ->flexible_group list due
6201 * to allocations, but we need to prevent rotation because
6202 * rotate_ctx() will change the list from interrupt context.
6203 */
6204 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6205 parent_ctx->rotate_disable = 1;
6206 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6207
6156 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { 6208 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
6157 ret = inherit_task_group(event, parent, parent_ctx, 6209 ret = inherit_task_group(event, parent, parent_ctx,
6158 child, ctxn, &inherited_all); 6210 child, ctxn, &inherited_all);
@@ -6160,6 +6212,10 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
6160 break; 6212 break;
6161 } 6213 }
6162 6214
6215 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6216 parent_ctx->rotate_disable = 0;
6217 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6218
6163 child_ctx = child->perf_event_ctxp[ctxn]; 6219 child_ctx = child->perf_event_ctxp[ctxn];
6164 6220
6165 if (child_ctx && inherited_all) { 6221 if (child_ctx && inherited_all) {
@@ -6312,6 +6368,8 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6312 6368
6313void __init perf_event_init(void) 6369void __init perf_event_init(void)
6314{ 6370{
6371 int ret;
6372
6315 perf_event_init_all_cpus(); 6373 perf_event_init_all_cpus();
6316 init_srcu_struct(&pmus_srcu); 6374 init_srcu_struct(&pmus_srcu);
6317 perf_pmu_register(&perf_swevent); 6375 perf_pmu_register(&perf_swevent);
@@ -6319,4 +6377,7 @@ void __init perf_event_init(void)
6319 perf_pmu_register(&perf_task_clock); 6377 perf_pmu_register(&perf_task_clock);
6320 perf_tp_register(); 6378 perf_tp_register();
6321 perf_cpu_notifier(perf_cpu_notify); 6379 perf_cpu_notifier(perf_cpu_notify);
6380
6381 ret = init_hw_breakpoint();
6382 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
6322} 6383}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index d9dd47885218..3d2cb4899807 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -697,17 +697,18 @@ static int __cmd_record(int argc, const char **argv)
697 if (err < 0) 697 if (err < 0)
698 err = event__synthesize_kernel_mmap(process_synthesized_event, 698 err = event__synthesize_kernel_mmap(process_synthesized_event,
699 session, machine, "_stext"); 699 session, machine, "_stext");
700 if (err < 0) { 700 if (err < 0)
701 pr_err("Couldn't record kernel reference relocation symbol.\n"); 701 pr_err("Couldn't record kernel reference relocation symbol\n"
702 return err; 702 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
703 } 703 "Check /proc/kallsyms permission or run as root.\n");
704 704
705 err = event__synthesize_modules(process_synthesized_event, 705 err = event__synthesize_modules(process_synthesized_event,
706 session, machine); 706 session, machine);
707 if (err < 0) { 707 if (err < 0)
708 pr_err("Couldn't record kernel reference relocation symbol.\n"); 708 pr_err("Couldn't record kernel module information.\n"
709 return err; 709 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
710 } 710 "Check /proc/modules permission or run as root.\n");
711
711 if (perf_guest) 712 if (perf_guest)
712 perf_session__process_machines(session, event__synthesize_guest_os); 713 perf_session__process_machines(session, event__synthesize_guest_os);
713 714
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index b39f499e575a..0500895a45af 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -295,7 +295,9 @@ static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym)
295{ 295{
296 struct rb_node **p = &self->rb_node; 296 struct rb_node **p = &self->rb_node;
297 struct rb_node *parent = NULL; 297 struct rb_node *parent = NULL;
298 struct symbol_name_rb_node *symn = ((void *)sym) - sizeof(*parent), *s; 298 struct symbol_name_rb_node *symn, *s;
299
300 symn = container_of(sym, struct symbol_name_rb_node, sym);
299 301
300 while (*p != NULL) { 302 while (*p != NULL) {
301 parent = *p; 303 parent = *p;