aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c54
1 files changed, 44 insertions, 10 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 60398a0d947c..53ea4cf1a878 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -28,6 +28,7 @@
28#include <asm/apic.h> 28#include <asm/apic.h>
29#include <asm/stacktrace.h> 29#include <asm/stacktrace.h>
30#include <asm/nmi.h> 30#include <asm/nmi.h>
31#include <asm/compat.h>
31 32
32static u64 perf_event_mask __read_mostly; 33static u64 perf_event_mask __read_mostly;
33 34
@@ -158,7 +159,7 @@ struct x86_pmu {
158 struct perf_event *event); 159 struct perf_event *event);
159 struct event_constraint *event_constraints; 160 struct event_constraint *event_constraints;
160 161
161 void (*cpu_prepare)(int cpu); 162 int (*cpu_prepare)(int cpu);
162 void (*cpu_starting)(int cpu); 163 void (*cpu_starting)(int cpu);
163 void (*cpu_dying)(int cpu); 164 void (*cpu_dying)(int cpu);
164 void (*cpu_dead)(int cpu); 165 void (*cpu_dead)(int cpu);
@@ -1333,11 +1334,12 @@ static int __cpuinit
1333x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1334x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1334{ 1335{
1335 unsigned int cpu = (long)hcpu; 1336 unsigned int cpu = (long)hcpu;
1337 int ret = NOTIFY_OK;
1336 1338
1337 switch (action & ~CPU_TASKS_FROZEN) { 1339 switch (action & ~CPU_TASKS_FROZEN) {
1338 case CPU_UP_PREPARE: 1340 case CPU_UP_PREPARE:
1339 if (x86_pmu.cpu_prepare) 1341 if (x86_pmu.cpu_prepare)
1340 x86_pmu.cpu_prepare(cpu); 1342 ret = x86_pmu.cpu_prepare(cpu);
1341 break; 1343 break;
1342 1344
1343 case CPU_STARTING: 1345 case CPU_STARTING:
@@ -1350,6 +1352,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1350 x86_pmu.cpu_dying(cpu); 1352 x86_pmu.cpu_dying(cpu);
1351 break; 1353 break;
1352 1354
1355 case CPU_UP_CANCELED:
1353 case CPU_DEAD: 1356 case CPU_DEAD:
1354 if (x86_pmu.cpu_dead) 1357 if (x86_pmu.cpu_dead)
1355 x86_pmu.cpu_dead(cpu); 1358 x86_pmu.cpu_dead(cpu);
@@ -1359,7 +1362,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1359 break; 1362 break;
1360 } 1363 }
1361 1364
1362 return NOTIFY_OK; 1365 return ret;
1363} 1366}
1364 1367
1365static void __init pmu_check_apic(void) 1368static void __init pmu_check_apic(void)
@@ -1628,14 +1631,42 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
1628 return len; 1631 return len;
1629} 1632}
1630 1633
1631static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) 1634#ifdef CONFIG_COMPAT
1635static inline int
1636perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1632{ 1637{
1633 unsigned long bytes; 1638 /* 32-bit process in 64-bit kernel. */
1639 struct stack_frame_ia32 frame;
1640 const void __user *fp;
1634 1641
1635 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame)); 1642 if (!test_thread_flag(TIF_IA32))
1643 return 0;
1644
1645 fp = compat_ptr(regs->bp);
1646 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1647 unsigned long bytes;
1648 frame.next_frame = 0;
1649 frame.return_address = 0;
1650
1651 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1652 if (bytes != sizeof(frame))
1653 break;
1654
1655 if (fp < compat_ptr(regs->sp))
1656 break;
1636 1657
1637 return bytes == sizeof(*frame); 1658 callchain_store(entry, frame.return_address);
1659 fp = compat_ptr(frame.next_frame);
1660 }
1661 return 1;
1662}
1663#else
1664static inline int
1665perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1666{
1667 return 0;
1638} 1668}
1669#endif
1639 1670
1640static void 1671static void
1641perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) 1672perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
@@ -1651,11 +1682,16 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1651 callchain_store(entry, PERF_CONTEXT_USER); 1682 callchain_store(entry, PERF_CONTEXT_USER);
1652 callchain_store(entry, regs->ip); 1683 callchain_store(entry, regs->ip);
1653 1684
1685 if (perf_callchain_user32(regs, entry))
1686 return;
1687
1654 while (entry->nr < PERF_MAX_STACK_DEPTH) { 1688 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1689 unsigned long bytes;
1655 frame.next_frame = NULL; 1690 frame.next_frame = NULL;
1656 frame.return_address = 0; 1691 frame.return_address = 0;
1657 1692
1658 if (!copy_stack_frame(fp, &frame)) 1693 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1694 if (bytes != sizeof(frame))
1659 break; 1695 break;
1660 1696
1661 if ((unsigned long)fp < regs->sp) 1697 if ((unsigned long)fp < regs->sp)
@@ -1702,7 +1738,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1702 return entry; 1738 return entry;
1703} 1739}
1704 1740
1705#ifdef CONFIG_EVENT_TRACING
1706void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) 1741void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
1707{ 1742{
1708 regs->ip = ip; 1743 regs->ip = ip;
@@ -1714,4 +1749,3 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
1714 regs->cs = __KERNEL_CS; 1749 regs->cs = __KERNEL_CS;
1715 local_save_flags(regs->flags); 1750 local_save_flags(regs->flags);
1716} 1751}
1717#endif