aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c70
1 files changed, 35 insertions, 35 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 1badff6b6b28..5cb4e8dcee4b 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -29,6 +29,41 @@
29#include <asm/stacktrace.h> 29#include <asm/stacktrace.h>
30#include <asm/nmi.h> 30#include <asm/nmi.h>
31 31
32/*
33 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
34 */
35static unsigned long
36copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
37{
38 unsigned long offset, addr = (unsigned long)from;
39 int type = in_nmi() ? KM_NMI : KM_IRQ0;
40 unsigned long size, len = 0;
41 struct page *page;
42 void *map;
43 int ret;
44
45 do {
46 ret = __get_user_pages_fast(addr, 1, 0, &page);
47 if (!ret)
48 break;
49
50 offset = addr & (PAGE_SIZE - 1);
51 size = min(PAGE_SIZE - offset, n - len);
52
53 map = kmap_atomic(page, type);
54 memcpy(to, map+offset, size);
55 kunmap_atomic(map, type);
56 put_page(page);
57
58 len += size;
59 to += size;
60 addr += size;
61
62 } while (len < n);
63
64 return len;
65}
66
32static u64 perf_event_mask __read_mostly; 67static u64 perf_event_mask __read_mostly;
33 68
34struct event_constraint { 69struct event_constraint {
@@ -1550,41 +1585,6 @@ perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1550 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); 1585 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
1551} 1586}
1552 1587
1553/*
1554 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
1555 */
1556static unsigned long
1557copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
1558{
1559 unsigned long offset, addr = (unsigned long)from;
1560 int type = in_nmi() ? KM_NMI : KM_IRQ0;
1561 unsigned long size, len = 0;
1562 struct page *page;
1563 void *map;
1564 int ret;
1565
1566 do {
1567 ret = __get_user_pages_fast(addr, 1, 0, &page);
1568 if (!ret)
1569 break;
1570
1571 offset = addr & (PAGE_SIZE - 1);
1572 size = min(PAGE_SIZE - offset, n - len);
1573
1574 map = kmap_atomic(page, type);
1575 memcpy(to, map+offset, size);
1576 kunmap_atomic(map, type);
1577 put_page(page);
1578
1579 len += size;
1580 to += size;
1581 addr += size;
1582
1583 } while (len < n);
1584
1585 return len;
1586}
1587
1588static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) 1588static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1589{ 1589{
1590 unsigned long bytes; 1590 unsigned long bytes;