aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c54
1 files changed, 44 insertions, 10 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 0316ffe851bd..db5bdc8addf8 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -29,6 +29,7 @@
29#include <asm/apic.h> 29#include <asm/apic.h>
30#include <asm/stacktrace.h> 30#include <asm/stacktrace.h>
31#include <asm/nmi.h> 31#include <asm/nmi.h>
32#include <asm/compat.h>
32 33
33static u64 perf_event_mask __read_mostly; 34static u64 perf_event_mask __read_mostly;
34 35
@@ -159,7 +160,7 @@ struct x86_pmu {
159 struct perf_event *event); 160 struct perf_event *event);
160 struct event_constraint *event_constraints; 161 struct event_constraint *event_constraints;
161 162
162 void (*cpu_prepare)(int cpu); 163 int (*cpu_prepare)(int cpu);
163 void (*cpu_starting)(int cpu); 164 void (*cpu_starting)(int cpu);
164 void (*cpu_dying)(int cpu); 165 void (*cpu_dying)(int cpu);
165 void (*cpu_dead)(int cpu); 166 void (*cpu_dead)(int cpu);
@@ -1334,11 +1335,12 @@ static int __cpuinit
1334x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1335x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1335{ 1336{
1336 unsigned int cpu = (long)hcpu; 1337 unsigned int cpu = (long)hcpu;
1338 int ret = NOTIFY_OK;
1337 1339
1338 switch (action & ~CPU_TASKS_FROZEN) { 1340 switch (action & ~CPU_TASKS_FROZEN) {
1339 case CPU_UP_PREPARE: 1341 case CPU_UP_PREPARE:
1340 if (x86_pmu.cpu_prepare) 1342 if (x86_pmu.cpu_prepare)
1341 x86_pmu.cpu_prepare(cpu); 1343 ret = x86_pmu.cpu_prepare(cpu);
1342 break; 1344 break;
1343 1345
1344 case CPU_STARTING: 1346 case CPU_STARTING:
@@ -1351,6 +1353,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1351 x86_pmu.cpu_dying(cpu); 1353 x86_pmu.cpu_dying(cpu);
1352 break; 1354 break;
1353 1355
1356 case CPU_UP_CANCELED:
1354 case CPU_DEAD: 1357 case CPU_DEAD:
1355 if (x86_pmu.cpu_dead) 1358 if (x86_pmu.cpu_dead)
1356 x86_pmu.cpu_dead(cpu); 1359 x86_pmu.cpu_dead(cpu);
@@ -1360,7 +1363,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1360 break; 1363 break;
1361 } 1364 }
1362 1365
1363 return NOTIFY_OK; 1366 return ret;
1364} 1367}
1365 1368
1366static void __init pmu_check_apic(void) 1369static void __init pmu_check_apic(void)
@@ -1629,14 +1632,42 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
1629 return len; 1632 return len;
1630} 1633}
1631 1634
1632static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) 1635#ifdef CONFIG_COMPAT
1636static inline int
1637perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1633{ 1638{
1634 unsigned long bytes; 1639 /* 32-bit process in 64-bit kernel. */
1640 struct stack_frame_ia32 frame;
1641 const void __user *fp;
1635 1642
1636 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame)); 1643 if (!test_thread_flag(TIF_IA32))
1644 return 0;
1645
1646 fp = compat_ptr(regs->bp);
1647 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1648 unsigned long bytes;
1649 frame.next_frame = 0;
1650 frame.return_address = 0;
1651
1652 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1653 if (bytes != sizeof(frame))
1654 break;
1655
1656 if (fp < compat_ptr(regs->sp))
1657 break;
1637 1658
1638 return bytes == sizeof(*frame); 1659 callchain_store(entry, frame.return_address);
1660 fp = compat_ptr(frame.next_frame);
1661 }
1662 return 1;
1663}
1664#else
1665static inline int
1666perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1667{
1668 return 0;
1639} 1669}
1670#endif
1640 1671
1641static void 1672static void
1642perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) 1673perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
@@ -1652,11 +1683,16 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1652 callchain_store(entry, PERF_CONTEXT_USER); 1683 callchain_store(entry, PERF_CONTEXT_USER);
1653 callchain_store(entry, regs->ip); 1684 callchain_store(entry, regs->ip);
1654 1685
1686 if (perf_callchain_user32(regs, entry))
1687 return;
1688
1655 while (entry->nr < PERF_MAX_STACK_DEPTH) { 1689 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1690 unsigned long bytes;
1656 frame.next_frame = NULL; 1691 frame.next_frame = NULL;
1657 frame.return_address = 0; 1692 frame.return_address = 0;
1658 1693
1659 if (!copy_stack_frame(fp, &frame)) 1694 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1695 if (bytes != sizeof(frame))
1660 break; 1696 break;
1661 1697
1662 if ((unsigned long)fp < regs->sp) 1698 if ((unsigned long)fp < regs->sp)
@@ -1703,7 +1739,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1703 return entry; 1739 return entry;
1704} 1740}
1705 1741
1706#ifdef CONFIG_EVENT_TRACING
1707void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) 1742void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
1708{ 1743{
1709 regs->ip = ip; 1744 regs->ip = ip;
@@ -1715,4 +1750,3 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
1715 regs->cs = __KERNEL_CS; 1750 regs->cs = __KERNEL_CS;
1716 local_save_flags(regs->flags); 1751 local_save_flags(regs->flags);
1717} 1752}
1718#endif