aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/uaccess.h3
-rw-r--r--arch/x86/kernel/cpu/perf_event.c35
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/lib/usercopy.c43
-rw-r--r--arch/x86/oprofile/backtrace.c39
5 files changed, 48 insertions, 74 deletions
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 99ddd148a760..36361bf6fdd1 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -555,6 +555,9 @@ struct __large_struct { unsigned long buf[100]; };
555 555
556#endif /* CONFIG_X86_WP_WORKS_OK */ 556#endif /* CONFIG_X86_WP_WORKS_OK */
557 557
558extern unsigned long
559copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
560
558/* 561/*
559 * movsl can be slow when source and dest are not both 8-byte aligned 562 * movsl can be slow when source and dest are not both 8-byte aligned
560 */ 563 */
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b7a010fce8c3..4ee3abf20ed6 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -22,7 +22,6 @@
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/highmem.h>
26#include <linux/cpu.h> 25#include <linux/cpu.h>
27#include <linux/bitops.h> 26#include <linux/bitops.h>
28 27
@@ -67,40 +66,6 @@ enum extra_reg_type {
67 EXTRA_REG_MAX /* number of entries needed */ 66 EXTRA_REG_MAX /* number of entries needed */
68}; 67};
69 68
70/*
71 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
72 */
73static unsigned long
74copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
75{
76 unsigned long offset, addr = (unsigned long)from;
77 unsigned long size, len = 0;
78 struct page *page;
79 void *map;
80 int ret;
81
82 do {
83 ret = __get_user_pages_fast(addr, 1, 0, &page);
84 if (!ret)
85 break;
86
87 offset = addr & (PAGE_SIZE - 1);
88 size = min(PAGE_SIZE - offset, n - len);
89
90 map = kmap_atomic(page);
91 memcpy(to, map+offset, size);
92 kunmap_atomic(map);
93 put_page(page);
94
95 len += size;
96 to += size;
97 addr += size;
98
99 } while (len < n);
100
101 return len;
102}
103
104struct event_constraint { 69struct event_constraint {
105 union { 70 union {
106 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 71 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index f2479f19ddde..6ba477342b8e 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
18 18
19lib-y := delay.o 19lib-y := delay.o
20lib-y += thunk_$(BITS).o 20lib-y += thunk_$(BITS).o
21lib-y += usercopy_$(BITS).o getuser.o putuser.o 21lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
22lib-y += memcpy_$(BITS).o 22lib-y += memcpy_$(BITS).o
23lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o 23lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
24 24
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
new file mode 100644
index 000000000000..97be9cb54483
--- /dev/null
+++ b/arch/x86/lib/usercopy.c
@@ -0,0 +1,43 @@
1/*
2 * User address space access functions.
3 *
4 * For licencing details see kernel-base/COPYING
5 */
6
7#include <linux/highmem.h>
8#include <linux/module.h>
9
10/*
11 * best effort, GUP based copy_from_user() that is NMI-safe
12 */
13unsigned long
14copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
15{
16 unsigned long offset, addr = (unsigned long)from;
17 unsigned long size, len = 0;
18 struct page *page;
19 void *map;
20 int ret;
21
22 do {
23 ret = __get_user_pages_fast(addr, 1, 0, &page);
24 if (!ret)
25 break;
26
27 offset = addr & (PAGE_SIZE - 1);
28 size = min(PAGE_SIZE - offset, n - len);
29
30 map = kmap_atomic(page);
31 memcpy(to, map+offset, size);
32 kunmap_atomic(map);
33 put_page(page);
34
35 len += size;
36 to += size;
37 addr += size;
38
39 } while (len < n);
40
41 return len;
42}
43EXPORT_SYMBOL_GPL(copy_from_user_nmi);
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index 32f78eb46744..bff89dfe3619 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -12,10 +12,9 @@
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/compat.h> 14#include <linux/compat.h>
15#include <linux/highmem.h> 15#include <linux/uaccess.h>
16 16
17#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18#include <asm/uaccess.h>
19#include <asm/stacktrace.h> 18#include <asm/stacktrace.h>
20 19
21static int backtrace_stack(void *data, char *name) 20static int backtrace_stack(void *data, char *name)
@@ -38,42 +37,6 @@ static struct stacktrace_ops backtrace_ops = {
38 .walk_stack = print_context_stack, 37 .walk_stack = print_context_stack,
39}; 38};
40 39
41/* from arch/x86/kernel/cpu/perf_event.c: */
42
43/*
44 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
45 */
46static unsigned long
47copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
48{
49 unsigned long offset, addr = (unsigned long)from;
50 unsigned long size, len = 0;
51 struct page *page;
52 void *map;
53 int ret;
54
55 do {
56 ret = __get_user_pages_fast(addr, 1, 0, &page);
57 if (!ret)
58 break;
59
60 offset = addr & (PAGE_SIZE - 1);
61 size = min(PAGE_SIZE - offset, n - len);
62
63 map = kmap_atomic(page);
64 memcpy(to, map+offset, size);
65 kunmap_atomic(map);
66 put_page(page);
67
68 len += size;
69 to += size;
70 addr += size;
71
72 } while (len < n);
73
74 return len;
75}
76
77#ifdef CONFIG_COMPAT 40#ifdef CONFIG_COMPAT
78static struct stack_frame_ia32 * 41static struct stack_frame_ia32 *
79dump_user_backtrace_32(struct stack_frame_ia32 *head) 42dump_user_backtrace_32(struct stack_frame_ia32 *head)