aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-09 09:00:48 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-09 09:00:48 -0400
commitde989ef093623ab5259f885f30be699c431d4006 (patch)
tree8896b217f6f4ce6cd2d47b03b907ae0a8f4f4321
parenta737abd11ac4eb9f4226fa8c9f1d9b5be12a96c1 (diff)
parent22cac1670786108ccd4caa0656c39fa4ba69fa7d (diff)
Merge branch 'x86/unify-lib' into x86/core
-rw-r--r--arch/x86/Kconfig.cpu2
-rw-r--r--arch/x86/ia32/ia32entry.S25
-rw-r--r--arch/x86/kernel/asm-offsets_64.c2
-rw-r--r--arch/x86/kernel/entry_64.S23
-rw-r--r--arch/x86/kernel/tsc.c1
-rw-r--r--arch/x86/lib/Makefile4
-rw-r--r--arch/x86/lib/copy_user_64.S4
-rw-r--r--arch/x86/lib/delay.c (renamed from arch/x86/lib/delay_32.c)17
-rw-r--r--arch/x86/lib/delay_64.c85
-rw-r--r--arch/x86/lib/getuser.S (renamed from arch/x86/lib/getuser_64.S)87
-rw-r--r--arch/x86/lib/getuser_32.S78
-rw-r--r--arch/x86/lib/putuser.S (renamed from arch/x86/lib/putuser_32.S)73
-rw-r--r--arch/x86/lib/putuser_64.S106
-rw-r--r--include/asm-x86/asm.h9
-rw-r--r--include/asm-x86/delay.h4
-rw-r--r--include/asm-x86/uaccess.h448
-rw-r--r--include/asm-x86/uaccess_32.h422
-rw-r--r--include/asm-x86/uaccess_64.h260
18 files changed, 574 insertions, 1076 deletions
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 3d22bb8175b4..abff1b84ed5b 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -344,7 +344,7 @@ config X86_F00F_BUG
344 344
345config X86_WP_WORKS_OK 345config X86_WP_WORKS_OK
346 def_bool y 346 def_bool y
347 depends on X86_32 && !M386 347 depends on !M386
348 348
349config X86_INVLPG 349config X86_INVLPG
350 def_bool y 350 def_bool y
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 24e4d4928d65..20371d0635e4 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -116,7 +116,7 @@ ENTRY(ia32_sysenter_target)
116 pushfq 116 pushfq
117 CFI_ADJUST_CFA_OFFSET 8 117 CFI_ADJUST_CFA_OFFSET 8
118 /*CFI_REL_OFFSET rflags,0*/ 118 /*CFI_REL_OFFSET rflags,0*/
119 movl 8*3-THREAD_SIZE+threadinfo_sysenter_return(%rsp), %r10d 119 movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
120 CFI_REGISTER rip,r10 120 CFI_REGISTER rip,r10
121 pushq $__USER32_CS 121 pushq $__USER32_CS
122 CFI_ADJUST_CFA_OFFSET 8 122 CFI_ADJUST_CFA_OFFSET 8
@@ -136,8 +136,9 @@ ENTRY(ia32_sysenter_target)
136 .quad 1b,ia32_badarg 136 .quad 1b,ia32_badarg
137 .previous 137 .previous
138 GET_THREAD_INFO(%r10) 138 GET_THREAD_INFO(%r10)
139 orl $TS_COMPAT,threadinfo_status(%r10) 139 orl $TS_COMPAT,TI_status(%r10)
140 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10) 140 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP), \
141 TI_flags(%r10)
141 CFI_REMEMBER_STATE 142 CFI_REMEMBER_STATE
142 jnz sysenter_tracesys 143 jnz sysenter_tracesys
143sysenter_do_call: 144sysenter_do_call:
@@ -149,9 +150,9 @@ sysenter_do_call:
149 GET_THREAD_INFO(%r10) 150 GET_THREAD_INFO(%r10)
150 DISABLE_INTERRUPTS(CLBR_NONE) 151 DISABLE_INTERRUPTS(CLBR_NONE)
151 TRACE_IRQS_OFF 152 TRACE_IRQS_OFF
152 testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10) 153 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
153 jnz int_ret_from_sys_call 154 jnz int_ret_from_sys_call
154 andl $~TS_COMPAT,threadinfo_status(%r10) 155 andl $~TS_COMPAT,TI_status(%r10)
155 /* clear IF, that popfq doesn't enable interrupts early */ 156 /* clear IF, that popfq doesn't enable interrupts early */
156 andl $~0x200,EFLAGS-R11(%rsp) 157 andl $~0x200,EFLAGS-R11(%rsp)
157 movl RIP-R11(%rsp),%edx /* User %eip */ 158 movl RIP-R11(%rsp),%edx /* User %eip */
@@ -240,8 +241,9 @@ ENTRY(ia32_cstar_target)
240 .quad 1b,ia32_badarg 241 .quad 1b,ia32_badarg
241 .previous 242 .previous
242 GET_THREAD_INFO(%r10) 243 GET_THREAD_INFO(%r10)
243 orl $TS_COMPAT,threadinfo_status(%r10) 244 orl $TS_COMPAT,TI_status(%r10)
244 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10) 245 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP), \
246 TI_flags(%r10)
245 CFI_REMEMBER_STATE 247 CFI_REMEMBER_STATE
246 jnz cstar_tracesys 248 jnz cstar_tracesys
247cstar_do_call: 249cstar_do_call:
@@ -253,9 +255,9 @@ cstar_do_call:
253 GET_THREAD_INFO(%r10) 255 GET_THREAD_INFO(%r10)
254 DISABLE_INTERRUPTS(CLBR_NONE) 256 DISABLE_INTERRUPTS(CLBR_NONE)
255 TRACE_IRQS_OFF 257 TRACE_IRQS_OFF
256 testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10) 258 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
257 jnz int_ret_from_sys_call 259 jnz int_ret_from_sys_call
258 andl $~TS_COMPAT,threadinfo_status(%r10) 260 andl $~TS_COMPAT,TI_status(%r10)
259 RESTORE_ARGS 1,-ARG_SKIP,1,1,1 261 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
260 movl RIP-ARGOFFSET(%rsp),%ecx 262 movl RIP-ARGOFFSET(%rsp),%ecx
261 CFI_REGISTER rip,rcx 263 CFI_REGISTER rip,rcx
@@ -333,8 +335,9 @@ ENTRY(ia32_syscall)
333 this could be a problem. */ 335 this could be a problem. */
334 SAVE_ARGS 0,0,1 336 SAVE_ARGS 0,0,1
335 GET_THREAD_INFO(%r10) 337 GET_THREAD_INFO(%r10)
336 orl $TS_COMPAT,threadinfo_status(%r10) 338 orl $TS_COMPAT,TI_status(%r10)
337 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10) 339 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP), \
340 TI_flags(%r10)
338 jnz ia32_tracesys 341 jnz ia32_tracesys
339ia32_do_syscall: 342ia32_do_syscall:
340 cmpl $(IA32_NR_syscalls-1),%eax 343 cmpl $(IA32_NR_syscalls-1),%eax
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 3295e7c08fe7..bacf5deeec2d 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -34,7 +34,7 @@ int main(void)
34 ENTRY(pid); 34 ENTRY(pid);
35 BLANK(); 35 BLANK();
36#undef ENTRY 36#undef ENTRY
37#define ENTRY(entry) DEFINE(threadinfo_ ## entry, offsetof(struct thread_info, entry)) 37#define ENTRY(entry) DEFINE(TI_ ## entry, offsetof(struct thread_info, entry))
38 ENTRY(flags); 38 ENTRY(flags);
39 ENTRY(addr_limit); 39 ENTRY(addr_limit);
40 ENTRY(preempt_count); 40 ENTRY(preempt_count);
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 07d69f262337..466b9284ed2f 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -168,13 +168,13 @@ ENTRY(ret_from_fork)
168 CFI_ADJUST_CFA_OFFSET -4 168 CFI_ADJUST_CFA_OFFSET -4
169 call schedule_tail 169 call schedule_tail
170 GET_THREAD_INFO(%rcx) 170 GET_THREAD_INFO(%rcx)
171 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx) 171 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
172 jnz rff_trace 172 jnz rff_trace
173rff_action: 173rff_action:
174 RESTORE_REST 174 RESTORE_REST
175 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread? 175 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
176 je int_ret_from_sys_call 176 je int_ret_from_sys_call
177 testl $_TIF_IA32,threadinfo_flags(%rcx) 177 testl $_TIF_IA32,TI_flags(%rcx)
178 jnz int_ret_from_sys_call 178 jnz int_ret_from_sys_call
179 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET 179 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
180 jmp ret_from_sys_call 180 jmp ret_from_sys_call
@@ -243,7 +243,8 @@ ENTRY(system_call_after_swapgs)
243 movq %rcx,RIP-ARGOFFSET(%rsp) 243 movq %rcx,RIP-ARGOFFSET(%rsp)
244 CFI_REL_OFFSET rip,RIP-ARGOFFSET 244 CFI_REL_OFFSET rip,RIP-ARGOFFSET
245 GET_THREAD_INFO(%rcx) 245 GET_THREAD_INFO(%rcx)
246 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx) 246 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP), \
247 TI_flags(%rcx)
247 jnz tracesys 248 jnz tracesys
248 cmpq $__NR_syscall_max,%rax 249 cmpq $__NR_syscall_max,%rax
249 ja badsys 250 ja badsys
@@ -262,7 +263,7 @@ sysret_check:
262 GET_THREAD_INFO(%rcx) 263 GET_THREAD_INFO(%rcx)
263 DISABLE_INTERRUPTS(CLBR_NONE) 264 DISABLE_INTERRUPTS(CLBR_NONE)
264 TRACE_IRQS_OFF 265 TRACE_IRQS_OFF
265 movl threadinfo_flags(%rcx),%edx 266 movl TI_flags(%rcx),%edx
266 andl %edi,%edx 267 andl %edi,%edx
267 jnz sysret_careful 268 jnz sysret_careful
268 CFI_REMEMBER_STATE 269 CFI_REMEMBER_STATE
@@ -347,10 +348,10 @@ int_ret_from_sys_call:
347int_with_check: 348int_with_check:
348 LOCKDEP_SYS_EXIT_IRQ 349 LOCKDEP_SYS_EXIT_IRQ
349 GET_THREAD_INFO(%rcx) 350 GET_THREAD_INFO(%rcx)
350 movl threadinfo_flags(%rcx),%edx 351 movl TI_flags(%rcx),%edx
351 andl %edi,%edx 352 andl %edi,%edx
352 jnz int_careful 353 jnz int_careful
353 andl $~TS_COMPAT,threadinfo_status(%rcx) 354 andl $~TS_COMPAT,TI_status(%rcx)
354 jmp retint_swapgs 355 jmp retint_swapgs
355 356
356 /* Either reschedule or signal or syscall exit tracking needed. */ 357 /* Either reschedule or signal or syscall exit tracking needed. */
@@ -558,7 +559,7 @@ retint_with_reschedule:
558 movl $_TIF_WORK_MASK,%edi 559 movl $_TIF_WORK_MASK,%edi
559retint_check: 560retint_check:
560 LOCKDEP_SYS_EXIT_IRQ 561 LOCKDEP_SYS_EXIT_IRQ
561 movl threadinfo_flags(%rcx),%edx 562 movl TI_flags(%rcx),%edx
562 andl %edi,%edx 563 andl %edi,%edx
563 CFI_REMEMBER_STATE 564 CFI_REMEMBER_STATE
564 jnz retint_careful 565 jnz retint_careful
@@ -654,9 +655,9 @@ retint_signal:
654 /* Returning to kernel space. Check if we need preemption */ 655 /* Returning to kernel space. Check if we need preemption */
655 /* rcx: threadinfo. interrupts off. */ 656 /* rcx: threadinfo. interrupts off. */
656ENTRY(retint_kernel) 657ENTRY(retint_kernel)
657 cmpl $0,threadinfo_preempt_count(%rcx) 658 cmpl $0,TI_preempt_count(%rcx)
658 jnz retint_restore_args 659 jnz retint_restore_args
659 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx) 660 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
660 jnc retint_restore_args 661 jnc retint_restore_args
661 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ 662 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
662 jnc retint_restore_args 663 jnc retint_restore_args
@@ -819,7 +820,7 @@ paranoid_restore\trace:
819 jmp irq_return 820 jmp irq_return
820paranoid_userspace\trace: 821paranoid_userspace\trace:
821 GET_THREAD_INFO(%rcx) 822 GET_THREAD_INFO(%rcx)
822 movl threadinfo_flags(%rcx),%ebx 823 movl TI_flags(%rcx),%ebx
823 andl $_TIF_WORK_MASK,%ebx 824 andl $_TIF_WORK_MASK,%ebx
824 jz paranoid_swapgs\trace 825 jz paranoid_swapgs\trace
825 movq %rsp,%rdi /* &pt_regs */ 826 movq %rsp,%rdi /* &pt_regs */
@@ -917,7 +918,7 @@ error_exit:
917 testl %eax,%eax 918 testl %eax,%eax
918 jne retint_kernel 919 jne retint_kernel
919 LOCKDEP_SYS_EXIT_IRQ 920 LOCKDEP_SYS_EXIT_IRQ
920 movl threadinfo_flags(%rcx),%edx 921 movl TI_flags(%rcx),%edx
921 movl $_TIF_WORK_MASK,%edi 922 movl $_TIF_WORK_MASK,%edi
922 andl %edi,%edx 923 andl %edi,%edx
923 jnz retint_careful 924 jnz retint_careful
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 3c36f92160c9..4a775d001957 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -513,6 +513,7 @@ void __init tsc_init(void)
513 */ 513 */
514 for_each_possible_cpu(cpu) 514 for_each_possible_cpu(cpu)
515 set_cyc2ns_scale(cpu_khz, cpu); 515 set_cyc2ns_scale(cpu_khz, cpu);
516 use_tsc_delay();
516 517
517 if (tsc_disabled > 0) 518 if (tsc_disabled > 0)
518 return; 519 return;
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 76f60f52a885..83226e0a7ce4 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -4,8 +4,8 @@
4 4
5obj-$(CONFIG_SMP) := msr-on-cpu.o 5obj-$(CONFIG_SMP) := msr-on-cpu.o
6 6
7lib-y := delay_$(BITS).o 7lib-y := delay.o
8lib-y += usercopy_$(BITS).o getuser_$(BITS).o putuser_$(BITS).o 8lib-y += usercopy_$(BITS).o getuser.o putuser.o
9lib-y += memcpy_$(BITS).o 9lib-y += memcpy_$(BITS).o
10 10
11ifeq ($(CONFIG_X86_32),y) 11ifeq ($(CONFIG_X86_32),y)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index ee1c3f635157..7eaaf0123b4d 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -40,7 +40,7 @@ ENTRY(copy_to_user)
40 movq %rdi,%rcx 40 movq %rdi,%rcx
41 addq %rdx,%rcx 41 addq %rdx,%rcx
42 jc bad_to_user 42 jc bad_to_user
43 cmpq threadinfo_addr_limit(%rax),%rcx 43 cmpq TI_addr_limit(%rax),%rcx
44 jae bad_to_user 44 jae bad_to_user
45 xorl %eax,%eax /* clear zero flag */ 45 xorl %eax,%eax /* clear zero flag */
46 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string 46 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
@@ -65,7 +65,7 @@ ENTRY(copy_from_user)
65 movq %rsi,%rcx 65 movq %rsi,%rcx
66 addq %rdx,%rcx 66 addq %rdx,%rcx
67 jc bad_from_user 67 jc bad_from_user
68 cmpq threadinfo_addr_limit(%rax),%rcx 68 cmpq TI_addr_limit(%rax),%rcx
69 jae bad_from_user 69 jae bad_from_user
70 movl $1,%ecx /* set zero flag */ 70 movl $1,%ecx /* set zero flag */
71 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string 71 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
diff --git a/arch/x86/lib/delay_32.c b/arch/x86/lib/delay.c
index ef691316f8b6..f4568605d7d5 100644
--- a/arch/x86/lib/delay_32.c
+++ b/arch/x86/lib/delay.c
@@ -29,7 +29,7 @@
29/* simple loop based delay: */ 29/* simple loop based delay: */
30static void delay_loop(unsigned long loops) 30static void delay_loop(unsigned long loops)
31{ 31{
32 __asm__ __volatile__( 32 asm volatile(
33 " test %0,%0 \n" 33 " test %0,%0 \n"
34 " jz 3f \n" 34 " jz 3f \n"
35 " jmp 1f \n" 35 " jmp 1f \n"
@@ -38,9 +38,9 @@ static void delay_loop(unsigned long loops)
38 "1: jmp 2f \n" 38 "1: jmp 2f \n"
39 39
40 ".align 16 \n" 40 ".align 16 \n"
41 "2: decl %0 \n" 41 "2: dec %0 \n"
42 " jnz 2b \n" 42 " jnz 2b \n"
43 "3: decl %0 \n" 43 "3: dec %0 \n"
44 44
45 : /* we don't need output */ 45 : /* we don't need output */
46 :"a" (loops) 46 :"a" (loops)
@@ -98,7 +98,7 @@ void use_tsc_delay(void)
98int __devinit read_current_timer(unsigned long *timer_val) 98int __devinit read_current_timer(unsigned long *timer_val)
99{ 99{
100 if (delay_fn == delay_tsc) { 100 if (delay_fn == delay_tsc) {
101 rdtscl(*timer_val); 101 rdtscll(*timer_val);
102 return 0; 102 return 0;
103 } 103 }
104 return -1; 104 return -1;
@@ -108,31 +108,30 @@ void __delay(unsigned long loops)
108{ 108{
109 delay_fn(loops); 109 delay_fn(loops);
110} 110}
111EXPORT_SYMBOL(__delay);
111 112
112inline void __const_udelay(unsigned long xloops) 113inline void __const_udelay(unsigned long xloops)
113{ 114{
114 int d0; 115 int d0;
115 116
116 xloops *= 4; 117 xloops *= 4;
117 __asm__("mull %0" 118 asm("mull %%edx"
118 :"=d" (xloops), "=&a" (d0) 119 :"=d" (xloops), "=&a" (d0)
119 :"1" (xloops), "0" 120 :"1" (xloops), "0"
120 (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4))); 121 (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
121 122
122 __delay(++xloops); 123 __delay(++xloops);
123} 124}
125EXPORT_SYMBOL(__const_udelay);
124 126
125void __udelay(unsigned long usecs) 127void __udelay(unsigned long usecs)
126{ 128{
127 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ 129 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
128} 130}
131EXPORT_SYMBOL(__udelay);
129 132
130void __ndelay(unsigned long nsecs) 133void __ndelay(unsigned long nsecs)
131{ 134{
132 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ 135 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
133} 136}
134
135EXPORT_SYMBOL(__delay);
136EXPORT_SYMBOL(__const_udelay);
137EXPORT_SYMBOL(__udelay);
138EXPORT_SYMBOL(__ndelay); 137EXPORT_SYMBOL(__ndelay);
diff --git a/arch/x86/lib/delay_64.c b/arch/x86/lib/delay_64.c
deleted file mode 100644
index 4c441be92641..000000000000
--- a/arch/x86/lib/delay_64.c
+++ /dev/null
@@ -1,85 +0,0 @@
1/*
2 * Precise Delay Loops for x86-64
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 *
7 * The __delay function must _NOT_ be inlined as its execution time
8 * depends wildly on alignment on many x86 processors.
9 */
10
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/timex.h>
14#include <linux/preempt.h>
15#include <linux/delay.h>
16#include <linux/init.h>
17
18#include <asm/delay.h>
19#include <asm/msr.h>
20
21#ifdef CONFIG_SMP
22#include <asm/smp.h>
23#endif
24
25int __devinit read_current_timer(unsigned long *timer_value)
26{
27 rdtscll(*timer_value);
28 return 0;
29}
30
31void __delay(unsigned long loops)
32{
33 unsigned bclock, now;
34 int cpu;
35
36 preempt_disable();
37 cpu = smp_processor_id();
38 rdtscl(bclock);
39 for (;;) {
40 rdtscl(now);
41 if ((now - bclock) >= loops)
42 break;
43
44 /* Allow RT tasks to run */
45 preempt_enable();
46 rep_nop();
47 preempt_disable();
48
49 /*
50 * It is possible that we moved to another CPU, and
51 * since TSC's are per-cpu we need to calculate
52 * that. The delay must guarantee that we wait "at
53 * least" the amount of time. Being moved to another
54 * CPU could make the wait longer but we just need to
55 * make sure we waited long enough. Rebalance the
56 * counter for this CPU.
57 */
58 if (unlikely(cpu != smp_processor_id())) {
59 loops -= (now - bclock);
60 cpu = smp_processor_id();
61 rdtscl(bclock);
62 }
63 }
64 preempt_enable();
65}
66EXPORT_SYMBOL(__delay);
67
68inline void __const_udelay(unsigned long xloops)
69{
70 __delay(((xloops * HZ *
71 cpu_data(raw_smp_processor_id()).loops_per_jiffy) >> 32) + 1);
72}
73EXPORT_SYMBOL(__const_udelay);
74
75void __udelay(unsigned long usecs)
76{
77 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
78}
79EXPORT_SYMBOL(__udelay);
80
81void __ndelay(unsigned long nsecs)
82{
83 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
84}
85EXPORT_SYMBOL(__ndelay);
diff --git a/arch/x86/lib/getuser_64.S b/arch/x86/lib/getuser.S
index 5448876261f8..ad374003742f 100644
--- a/arch/x86/lib/getuser_64.S
+++ b/arch/x86/lib/getuser.S
@@ -3,6 +3,7 @@
3 * 3 *
4 * (C) Copyright 1998 Linus Torvalds 4 * (C) Copyright 1998 Linus Torvalds
5 * (C) Copyright 2005 Andi Kleen 5 * (C) Copyright 2005 Andi Kleen
6 * (C) Copyright 2008 Glauber Costa
6 * 7 *
7 * These functions have a non-standard call interface 8 * These functions have a non-standard call interface
8 * to make them more efficient, especially as they 9 * to make them more efficient, especially as they
@@ -13,14 +14,13 @@
13/* 14/*
14 * __get_user_X 15 * __get_user_X
15 * 16 *
16 * Inputs: %rcx contains the address. 17 * Inputs: %[r|e]ax contains the address.
17 * The register is modified, but all changes are undone 18 * The register is modified, but all changes are undone
18 * before returning because the C code doesn't know about it. 19 * before returning because the C code doesn't know about it.
19 * 20 *
20 * Outputs: %rax is error code (0 or -EFAULT) 21 * Outputs: %[r|e]ax is error code (0 or -EFAULT)
21 * %rdx contains zero-extended value 22 * %[r|e]dx contains zero-extended value
22 * 23 *
23 * %r8 is destroyed.
24 * 24 *
25 * These functions should not modify any other registers, 25 * These functions should not modify any other registers,
26 * as they get called from within inline assembly. 26 * as they get called from within inline assembly.
@@ -32,78 +32,73 @@
32#include <asm/errno.h> 32#include <asm/errno.h>
33#include <asm/asm-offsets.h> 33#include <asm/asm-offsets.h>
34#include <asm/thread_info.h> 34#include <asm/thread_info.h>
35#include <asm/asm.h>
35 36
36 .text 37 .text
37ENTRY(__get_user_1) 38ENTRY(__get_user_1)
38 CFI_STARTPROC 39 CFI_STARTPROC
39 GET_THREAD_INFO(%r8) 40 GET_THREAD_INFO(%_ASM_DX)
40 cmpq threadinfo_addr_limit(%r8),%rcx 41 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
41 jae bad_get_user 42 jae bad_get_user
421: movzb (%rcx),%edx 431: movzb (%_ASM_AX),%edx
43 xorl %eax,%eax 44 xor %eax,%eax
44 ret 45 ret
45 CFI_ENDPROC 46 CFI_ENDPROC
46ENDPROC(__get_user_1) 47ENDPROC(__get_user_1)
47 48
48ENTRY(__get_user_2) 49ENTRY(__get_user_2)
49 CFI_STARTPROC 50 CFI_STARTPROC
50 GET_THREAD_INFO(%r8) 51 add $1,%_ASM_AX
51 addq $1,%rcx 52 jc bad_get_user
52 jc 20f 53 GET_THREAD_INFO(%_ASM_DX)
53 cmpq threadinfo_addr_limit(%r8),%rcx 54 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
54 jae 20f 55 jae bad_get_user
55 decq %rcx 562: movzwl -1(%_ASM_AX),%edx
562: movzwl (%rcx),%edx 57 xor %eax,%eax
57 xorl %eax,%eax
58 ret 58 ret
5920: decq %rcx
60 jmp bad_get_user
61 CFI_ENDPROC 59 CFI_ENDPROC
62ENDPROC(__get_user_2) 60ENDPROC(__get_user_2)
63 61
64ENTRY(__get_user_4) 62ENTRY(__get_user_4)
65 CFI_STARTPROC 63 CFI_STARTPROC
66 GET_THREAD_INFO(%r8) 64 add $3,%_ASM_AX
67 addq $3,%rcx 65 jc bad_get_user
68 jc 30f 66 GET_THREAD_INFO(%_ASM_DX)
69 cmpq threadinfo_addr_limit(%r8),%rcx 67 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
70 jae 30f 68 jae bad_get_user
71 subq $3,%rcx 693: mov -3(%_ASM_AX),%edx
723: movl (%rcx),%edx 70 xor %eax,%eax
73 xorl %eax,%eax
74 ret 71 ret
7530: subq $3,%rcx
76 jmp bad_get_user
77 CFI_ENDPROC 72 CFI_ENDPROC
78ENDPROC(__get_user_4) 73ENDPROC(__get_user_4)
79 74
75#ifdef CONFIG_X86_64
80ENTRY(__get_user_8) 76ENTRY(__get_user_8)
81 CFI_STARTPROC 77 CFI_STARTPROC
82 GET_THREAD_INFO(%r8) 78 add $7,%_ASM_AX
83 addq $7,%rcx 79 jc bad_get_user
84 jc 40f 80 GET_THREAD_INFO(%_ASM_DX)
85 cmpq threadinfo_addr_limit(%r8),%rcx 81 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
86 jae 40f 82 jae bad_get_user
87 subq $7,%rcx 834: movq -7(%_ASM_AX),%_ASM_DX
884: movq (%rcx),%rdx 84 xor %eax,%eax
89 xorl %eax,%eax
90 ret 85 ret
9140: subq $7,%rcx
92 jmp bad_get_user
93 CFI_ENDPROC 86 CFI_ENDPROC
94ENDPROC(__get_user_8) 87ENDPROC(__get_user_8)
88#endif
95 89
96bad_get_user: 90bad_get_user:
97 CFI_STARTPROC 91 CFI_STARTPROC
98 xorl %edx,%edx 92 xor %edx,%edx
99 movq $(-EFAULT),%rax 93 mov $(-EFAULT),%_ASM_AX
100 ret 94 ret
101 CFI_ENDPROC 95 CFI_ENDPROC
102END(bad_get_user) 96END(bad_get_user)
103 97
104.section __ex_table,"a" 98.section __ex_table,"a"
105 .quad 1b,bad_get_user 99 _ASM_PTR 1b,bad_get_user
106 .quad 2b,bad_get_user 100 _ASM_PTR 2b,bad_get_user
107 .quad 3b,bad_get_user 101 _ASM_PTR 3b,bad_get_user
108 .quad 4b,bad_get_user 102#ifdef CONFIG_X86_64
109.previous 103 _ASM_PTR 4b,bad_get_user
104#endif
diff --git a/arch/x86/lib/getuser_32.S b/arch/x86/lib/getuser_32.S
deleted file mode 100644
index 6d84b53f12a2..000000000000
--- a/arch/x86/lib/getuser_32.S
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2 * __get_user functions.
3 *
4 * (C) Copyright 1998 Linus Torvalds
5 *
6 * These functions have a non-standard call interface
7 * to make them more efficient, especially as they
8 * return an error value in addition to the "real"
9 * return value.
10 */
11#include <linux/linkage.h>
12#include <asm/dwarf2.h>
13#include <asm/thread_info.h>
14
15
16/*
17 * __get_user_X
18 *
19 * Inputs: %eax contains the address
20 *
21 * Outputs: %eax is error code (0 or -EFAULT)
22 * %edx contains zero-extended value
23 *
24 * These functions should not modify any other registers,
25 * as they get called from within inline assembly.
26 */
27
28.text
29ENTRY(__get_user_1)
30 CFI_STARTPROC
31 GET_THREAD_INFO(%edx)
32 cmpl TI_addr_limit(%edx),%eax
33 jae bad_get_user
341: movzbl (%eax),%edx
35 xorl %eax,%eax
36 ret
37 CFI_ENDPROC
38ENDPROC(__get_user_1)
39
40ENTRY(__get_user_2)
41 CFI_STARTPROC
42 addl $1,%eax
43 jc bad_get_user
44 GET_THREAD_INFO(%edx)
45 cmpl TI_addr_limit(%edx),%eax
46 jae bad_get_user
472: movzwl -1(%eax),%edx
48 xorl %eax,%eax
49 ret
50 CFI_ENDPROC
51ENDPROC(__get_user_2)
52
53ENTRY(__get_user_4)
54 CFI_STARTPROC
55 addl $3,%eax
56 jc bad_get_user
57 GET_THREAD_INFO(%edx)
58 cmpl TI_addr_limit(%edx),%eax
59 jae bad_get_user
603: movl -3(%eax),%edx
61 xorl %eax,%eax
62 ret
63 CFI_ENDPROC
64ENDPROC(__get_user_4)
65
66bad_get_user:
67 CFI_STARTPROC
68 xorl %edx,%edx
69 movl $-14,%eax
70 ret
71 CFI_ENDPROC
72END(bad_get_user)
73
74.section __ex_table,"a"
75 .long 1b,bad_get_user
76 .long 2b,bad_get_user
77 .long 3b,bad_get_user
78.previous
diff --git a/arch/x86/lib/putuser_32.S b/arch/x86/lib/putuser.S
index f58fba109d18..36b0d15ae6e9 100644
--- a/arch/x86/lib/putuser_32.S
+++ b/arch/x86/lib/putuser.S
@@ -2,6 +2,8 @@
2 * __put_user functions. 2 * __put_user functions.
3 * 3 *
4 * (C) Copyright 2005 Linus Torvalds 4 * (C) Copyright 2005 Linus Torvalds
5 * (C) Copyright 2005 Andi Kleen
6 * (C) Copyright 2008 Glauber Costa
5 * 7 *
6 * These functions have a non-standard call interface 8 * These functions have a non-standard call interface
7 * to make them more efficient, especially as they 9 * to make them more efficient, especially as they
@@ -11,6 +13,8 @@
11#include <linux/linkage.h> 13#include <linux/linkage.h>
12#include <asm/dwarf2.h> 14#include <asm/dwarf2.h>
13#include <asm/thread_info.h> 15#include <asm/thread_info.h>
16#include <asm/errno.h>
17#include <asm/asm.h>
14 18
15 19
16/* 20/*
@@ -26,73 +30,68 @@
26 */ 30 */
27 31
28#define ENTER CFI_STARTPROC ; \ 32#define ENTER CFI_STARTPROC ; \
29 pushl %ebx ; \ 33 GET_THREAD_INFO(%_ASM_BX)
30 CFI_ADJUST_CFA_OFFSET 4 ; \ 34#define EXIT ret ; \
31 CFI_REL_OFFSET ebx, 0 ; \
32 GET_THREAD_INFO(%ebx)
33#define EXIT popl %ebx ; \
34 CFI_ADJUST_CFA_OFFSET -4 ; \
35 CFI_RESTORE ebx ; \
36 ret ; \
37 CFI_ENDPROC 35 CFI_ENDPROC
38 36
39.text 37.text
40ENTRY(__put_user_1) 38ENTRY(__put_user_1)
41 ENTER 39 ENTER
42 cmpl TI_addr_limit(%ebx),%ecx 40 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
43 jae bad_put_user 41 jae bad_put_user
441: movb %al,(%ecx) 421: movb %al,(%_ASM_CX)
45 xorl %eax,%eax 43 xor %eax,%eax
46 EXIT 44 EXIT
47ENDPROC(__put_user_1) 45ENDPROC(__put_user_1)
48 46
49ENTRY(__put_user_2) 47ENTRY(__put_user_2)
50 ENTER 48 ENTER
51 movl TI_addr_limit(%ebx),%ebx 49 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
52 subl $1,%ebx 50 sub $1,%_ASM_BX
53 cmpl %ebx,%ecx 51 cmp %_ASM_BX,%_ASM_CX
54 jae bad_put_user 52 jae bad_put_user
552: movw %ax,(%ecx) 532: movw %ax,(%_ASM_CX)
56 xorl %eax,%eax 54 xor %eax,%eax
57 EXIT 55 EXIT
58ENDPROC(__put_user_2) 56ENDPROC(__put_user_2)
59 57
60ENTRY(__put_user_4) 58ENTRY(__put_user_4)
61 ENTER 59 ENTER
62 movl TI_addr_limit(%ebx),%ebx 60 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
63 subl $3,%ebx 61 sub $3,%_ASM_BX
64 cmpl %ebx,%ecx 62 cmp %_ASM_BX,%_ASM_CX
65 jae bad_put_user 63 jae bad_put_user
663: movl %eax,(%ecx) 643: movl %eax,(%_ASM_CX)
67 xorl %eax,%eax 65 xor %eax,%eax
68 EXIT 66 EXIT
69ENDPROC(__put_user_4) 67ENDPROC(__put_user_4)
70 68
71ENTRY(__put_user_8) 69ENTRY(__put_user_8)
72 ENTER 70 ENTER
73 movl TI_addr_limit(%ebx),%ebx 71 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
74 subl $7,%ebx 72 sub $7,%_ASM_BX
75 cmpl %ebx,%ecx 73 cmp %_ASM_BX,%_ASM_CX
76 jae bad_put_user 74 jae bad_put_user
774: movl %eax,(%ecx) 754: mov %_ASM_AX,(%_ASM_CX)
785: movl %edx,4(%ecx) 76#ifdef CONFIG_X86_32
79 xorl %eax,%eax 775: movl %edx,4(%_ASM_CX)
78#endif
79 xor %eax,%eax
80 EXIT 80 EXIT
81ENDPROC(__put_user_8) 81ENDPROC(__put_user_8)
82 82
83bad_put_user: 83bad_put_user:
84 CFI_STARTPROC simple 84 CFI_STARTPROC
85 CFI_DEF_CFA esp, 2*4 85 movl $-EFAULT,%eax
86 CFI_OFFSET eip, -1*4
87 CFI_OFFSET ebx, -2*4
88 movl $-14,%eax
89 EXIT 86 EXIT
90END(bad_put_user) 87END(bad_put_user)
91 88
92.section __ex_table,"a" 89.section __ex_table,"a"
93 .long 1b,bad_put_user 90 _ASM_PTR 1b,bad_put_user
94 .long 2b,bad_put_user 91 _ASM_PTR 2b,bad_put_user
95 .long 3b,bad_put_user 92 _ASM_PTR 3b,bad_put_user
96 .long 4b,bad_put_user 93 _ASM_PTR 4b,bad_put_user
97 .long 5b,bad_put_user 94#ifdef CONFIG_X86_32
95 _ASM_PTR 5b,bad_put_user
96#endif
98.previous 97.previous
diff --git a/arch/x86/lib/putuser_64.S b/arch/x86/lib/putuser_64.S
deleted file mode 100644
index 4989f5a8fa9b..000000000000
--- a/arch/x86/lib/putuser_64.S
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * __put_user functions.
3 *
4 * (C) Copyright 1998 Linus Torvalds
5 * (C) Copyright 2005 Andi Kleen
6 *
7 * These functions have a non-standard call interface
8 * to make them more efficient, especially as they
9 * return an error value in addition to the "real"
10 * return value.
11 */
12
13/*
14 * __put_user_X
15 *
16 * Inputs: %rcx contains the address
17 * %rdx contains new value
18 *
19 * Outputs: %rax is error code (0 or -EFAULT)
20 *
21 * %r8 is destroyed.
22 *
23 * These functions should not modify any other registers,
24 * as they get called from within inline assembly.
25 */
26
27#include <linux/linkage.h>
28#include <asm/dwarf2.h>
29#include <asm/page.h>
30#include <asm/errno.h>
31#include <asm/asm-offsets.h>
32#include <asm/thread_info.h>
33
34 .text
35ENTRY(__put_user_1)
36 CFI_STARTPROC
37 GET_THREAD_INFO(%r8)
38 cmpq threadinfo_addr_limit(%r8),%rcx
39 jae bad_put_user
401: movb %dl,(%rcx)
41 xorl %eax,%eax
42 ret
43 CFI_ENDPROC
44ENDPROC(__put_user_1)
45
46ENTRY(__put_user_2)
47 CFI_STARTPROC
48 GET_THREAD_INFO(%r8)
49 addq $1,%rcx
50 jc 20f
51 cmpq threadinfo_addr_limit(%r8),%rcx
52 jae 20f
53 decq %rcx
542: movw %dx,(%rcx)
55 xorl %eax,%eax
56 ret
5720: decq %rcx
58 jmp bad_put_user
59 CFI_ENDPROC
60ENDPROC(__put_user_2)
61
62ENTRY(__put_user_4)
63 CFI_STARTPROC
64 GET_THREAD_INFO(%r8)
65 addq $3,%rcx
66 jc 30f
67 cmpq threadinfo_addr_limit(%r8),%rcx
68 jae 30f
69 subq $3,%rcx
703: movl %edx,(%rcx)
71 xorl %eax,%eax
72 ret
7330: subq $3,%rcx
74 jmp bad_put_user
75 CFI_ENDPROC
76ENDPROC(__put_user_4)
77
78ENTRY(__put_user_8)
79 CFI_STARTPROC
80 GET_THREAD_INFO(%r8)
81 addq $7,%rcx
82 jc 40f
83 cmpq threadinfo_addr_limit(%r8),%rcx
84 jae 40f
85 subq $7,%rcx
864: movq %rdx,(%rcx)
87 xorl %eax,%eax
88 ret
8940: subq $7,%rcx
90 jmp bad_put_user
91 CFI_ENDPROC
92ENDPROC(__put_user_8)
93
94bad_put_user:
95 CFI_STARTPROC
96 movq $(-EFAULT),%rax
97 ret
98 CFI_ENDPROC
99END(bad_put_user)
100
101.section __ex_table,"a"
102 .quad 1b,bad_put_user
103 .quad 2b,bad_put_user
104 .quad 3b,bad_put_user
105 .quad 4b,bad_put_user
106.previous
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h
index 70939820c55f..97220321f39d 100644
--- a/include/asm-x86/asm.h
+++ b/include/asm-x86/asm.h
@@ -3,8 +3,10 @@
3 3
4#ifdef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
5# define __ASM_FORM(x) x 5# define __ASM_FORM(x) x
6# define __ASM_EX_SEC .section __ex_table
6#else 7#else
7# define __ASM_FORM(x) " " #x " " 8# define __ASM_FORM(x) " " #x " "
9# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
8#endif 10#endif
9 11
10#ifdef CONFIG_X86_32 12#ifdef CONFIG_X86_32
@@ -14,6 +16,7 @@
14#endif 16#endif
15 17
16#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q) 18#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q)
19#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg)
17 20
18#define _ASM_PTR __ASM_SEL(.long, .quad) 21#define _ASM_PTR __ASM_SEL(.long, .quad)
19#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) 22#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
@@ -24,10 +27,14 @@
24#define _ASM_ADD __ASM_SIZE(add) 27#define _ASM_ADD __ASM_SIZE(add)
25#define _ASM_SUB __ASM_SIZE(sub) 28#define _ASM_SUB __ASM_SIZE(sub)
26#define _ASM_XADD __ASM_SIZE(xadd) 29#define _ASM_XADD __ASM_SIZE(xadd)
30#define _ASM_AX __ASM_REG(ax)
31#define _ASM_BX __ASM_REG(bx)
32#define _ASM_CX __ASM_REG(cx)
33#define _ASM_DX __ASM_REG(dx)
27 34
28/* Exception table entry */ 35/* Exception table entry */
29# define _ASM_EXTABLE(from,to) \ 36# define _ASM_EXTABLE(from,to) \
30 " .section __ex_table,\"a\"\n" \ 37 __ASM_EX_SEC \
31 _ASM_ALIGN "\n" \ 38 _ASM_ALIGN "\n" \
32 _ASM_PTR #from "," #to "\n" \ 39 _ASM_PTR #from "," #to "\n" \
33 " .previous\n" 40 " .previous\n"
diff --git a/include/asm-x86/delay.h b/include/asm-x86/delay.h
index bb80880c834b..409a649204aa 100644
--- a/include/asm-x86/delay.h
+++ b/include/asm-x86/delay.h
@@ -26,10 +26,6 @@ extern void __delay(unsigned long loops);
26 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ 26 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
27 __ndelay(n)) 27 __ndelay(n))
28 28
29#ifdef CONFIG_X86_32
30void use_tsc_delay(void); 29void use_tsc_delay(void);
31#else
32#define use_tsc_delay() {}
33#endif
34 30
35#endif /* _ASM_X86_DELAY_H */ 31#endif /* _ASM_X86_DELAY_H */
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h
index 9fefd2947e78..f6fa4d841bbc 100644
--- a/include/asm-x86/uaccess.h
+++ b/include/asm-x86/uaccess.h
@@ -1,5 +1,453 @@
1#ifndef _ASM_UACCES_H_
2#define _ASM_UACCES_H_
3/*
4 * User space memory access functions
5 */
6#include <linux/errno.h>
7#include <linux/compiler.h>
8#include <linux/thread_info.h>
9#include <linux/prefetch.h>
10#include <linux/string.h>
11#include <asm/asm.h>
12#include <asm/page.h>
13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17/*
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(-1UL)
28#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
29
30#define get_ds() (KERNEL_DS)
31#define get_fs() (current_thread_info()->addr_limit)
32#define set_fs(x) (current_thread_info()->addr_limit = (x))
33
34#define segment_eq(a, b) ((a).seg == (b).seg)
35
36#define __addr_ok(addr) \
37 ((unsigned long __force)(addr) < \
38 (current_thread_info()->addr_limit.seg))
39
40/*
41 * Test whether a block of memory is a valid user space address.
42 * Returns 0 if the range is valid, nonzero otherwise.
43 *
44 * This is equivalent to the following test:
45 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64)
46 *
47 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
48 */
49
50#define __range_not_ok(addr, size) \
51({ \
52 unsigned long flag, roksum; \
53 __chk_user_ptr(addr); \
54 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
55 : "=&r" (flag), "=r" (roksum) \
56 : "1" (addr), "g" ((long)(size)), \
57 "rm" (current_thread_info()->addr_limit.seg)); \
58 flag; \
59})
60
61/**
62 * access_ok: - Checks if a user space pointer is valid
63 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
64 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
65 * to write to a block, it is always safe to read from it.
66 * @addr: User space pointer to start of block to check
67 * @size: Size of block to check
68 *
69 * Context: User context only. This function may sleep.
70 *
71 * Checks if a pointer to a block of memory in user space is valid.
72 *
73 * Returns true (nonzero) if the memory block may be valid, false (zero)
74 * if it is definitely invalid.
75 *
76 * Note that, depending on architecture, this function probably just
77 * checks that the pointer is in the user space range - after calling
78 * this function, memory access functions may still return -EFAULT.
79 */
80#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
81
82/*
83 * The exception table consists of pairs of addresses: the first is the
84 * address of an instruction that is allowed to fault, and the second is
85 * the address at which the program should continue. No registers are
86 * modified, so it is entirely up to the continuation code to figure out
87 * what to do.
88 *
89 * All the routines below use bits of fixup code that are out of line
90 * with the main instruction path. This means when everything is well,
91 * we don't even have to jump over them. Further, they do not intrude
92 * on our cache or tlb entries.
93 */
94
95struct exception_table_entry {
96 unsigned long insn, fixup;
97};
98
99extern int fixup_exception(struct pt_regs *regs);
100
101/*
102 * These are the main single-value transfer routines. They automatically
103 * use the right size if we just have the right pointer type.
104 *
105 * This gets kind of ugly. We want to return _two_ values in "get_user()"
106 * and yet we don't want to do any pointers, because that is too much
107 * of a performance impact. Thus we have a few rather ugly macros here,
108 * and hide all the ugliness from the user.
109 *
110 * The "__xxx" versions of the user access functions are versions that
111 * do not verify the address space, that must have been done previously
112 * with a separate "access_ok()" call (this is used when we do multiple
113 * accesses to the same area of user memory).
114 */
115
116extern int __get_user_1(void);
117extern int __get_user_2(void);
118extern int __get_user_4(void);
119extern int __get_user_8(void);
120extern int __get_user_bad(void);
121
122#define __get_user_x(size, ret, x, ptr) \
123 asm volatile("call __get_user_" #size \
124 : "=a" (ret),"=d" (x) \
125 : "0" (ptr)) \
126
127/* Careful: we have to cast the result to the type of the pointer
128 * for sign reasons */
129
130/**
131 * get_user: - Get a simple variable from user space.
132 * @x: Variable to store result.
133 * @ptr: Source address, in user space.
134 *
135 * Context: User context only. This function may sleep.
136 *
137 * This macro copies a single simple variable from user space to kernel
138 * space. It supports simple types like char and int, but not larger
139 * data types like structures or arrays.
140 *
141 * @ptr must have pointer-to-simple-variable type, and the result of
142 * dereferencing @ptr must be assignable to @x without a cast.
143 *
144 * Returns zero on success, or -EFAULT on error.
145 * On error, the variable @x is set to zero.
146 */
147#ifdef CONFIG_X86_32
148#define __get_user_8(__ret_gu, __val_gu, ptr) \
149 __get_user_x(X, __ret_gu, __val_gu, ptr)
150#else
151#define __get_user_8(__ret_gu, __val_gu, ptr) \
152 __get_user_x(8, __ret_gu, __val_gu, ptr)
153#endif
154
155#define get_user(x, ptr) \
156({ \
157 int __ret_gu; \
158 unsigned long __val_gu; \
159 __chk_user_ptr(ptr); \
160 switch (sizeof(*(ptr))) { \
161 case 1: \
162 __get_user_x(1, __ret_gu, __val_gu, ptr); \
163 break; \
164 case 2: \
165 __get_user_x(2, __ret_gu, __val_gu, ptr); \
166 break; \
167 case 4: \
168 __get_user_x(4, __ret_gu, __val_gu, ptr); \
169 break; \
170 case 8: \
171 __get_user_8(__ret_gu, __val_gu, ptr); \
172 break; \
173 default: \
174 __get_user_x(X, __ret_gu, __val_gu, ptr); \
175 break; \
176 } \
177 (x) = (__typeof__(*(ptr)))__val_gu; \
178 __ret_gu; \
179})
180
181#define __put_user_x(size, x, ptr, __ret_pu) \
182 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
183 :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
184
185
186
187#ifdef CONFIG_X86_32
188#define __put_user_u64(x, addr, err) \
189 asm volatile("1: movl %%eax,0(%2)\n" \
190 "2: movl %%edx,4(%2)\n" \
191 "3:\n" \
192 ".section .fixup,\"ax\"\n" \
193 "4: movl %3,%0\n" \
194 " jmp 3b\n" \
195 ".previous\n" \
196 _ASM_EXTABLE(1b, 4b) \
197 _ASM_EXTABLE(2b, 4b) \
198 : "=r" (err) \
199 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
200
201#define __put_user_x8(x, ptr, __ret_pu) \
202 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
203 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
204#else
205#define __put_user_u64(x, ptr, retval) \
206 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
207#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
208#endif
209
210extern void __put_user_bad(void);
211
212/*
213 * Strange magic calling convention: pointer in %ecx,
214 * value in %eax(:%edx), return value in %eax. clobbers %rbx
215 */
216extern void __put_user_1(void);
217extern void __put_user_2(void);
218extern void __put_user_4(void);
219extern void __put_user_8(void);
220
221#ifdef CONFIG_X86_WP_WORKS_OK
222
223/**
224 * put_user: - Write a simple value into user space.
225 * @x: Value to copy to user space.
226 * @ptr: Destination address, in user space.
227 *
228 * Context: User context only. This function may sleep.
229 *
230 * This macro copies a single simple value from kernel space to user
231 * space. It supports simple types like char and int, but not larger
232 * data types like structures or arrays.
233 *
234 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
235 * to the result of dereferencing @ptr.
236 *
237 * Returns zero on success, or -EFAULT on error.
238 */
239#define put_user(x, ptr) \
240({ \
241 int __ret_pu; \
242 __typeof__(*(ptr)) __pu_val; \
243 __chk_user_ptr(ptr); \
244 __pu_val = x; \
245 switch (sizeof(*(ptr))) { \
246 case 1: \
247 __put_user_x(1, __pu_val, ptr, __ret_pu); \
248 break; \
249 case 2: \
250 __put_user_x(2, __pu_val, ptr, __ret_pu); \
251 break; \
252 case 4: \
253 __put_user_x(4, __pu_val, ptr, __ret_pu); \
254 break; \
255 case 8: \
256 __put_user_x8(__pu_val, ptr, __ret_pu); \
257 break; \
258 default: \
259 __put_user_x(X, __pu_val, ptr, __ret_pu); \
260 break; \
261 } \
262 __ret_pu; \
263})
264
265#define __put_user_size(x, ptr, size, retval, errret) \
266do { \
267 retval = 0; \
268 __chk_user_ptr(ptr); \
269 switch (size) { \
270 case 1: \
271 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
272 break; \
273 case 2: \
274 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
275 break; \
276 case 4: \
277 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\
278 break; \
279 case 8: \
280 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
281 break; \
282 default: \
283 __put_user_bad(); \
284 } \
285} while (0)
286
287#else
288
289#define __put_user_size(x, ptr, size, retval, errret) \
290do { \
291 __typeof__(*(ptr))__pus_tmp = x; \
292 retval = 0; \
293 \
294 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
295 retval = errret; \
296} while (0)
297
298#define put_user(x, ptr) \
299({ \
300 int __ret_pu; \
301 __typeof__(*(ptr))__pus_tmp = x; \
302 __ret_pu = 0; \
303 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
304 sizeof(*(ptr))) != 0)) \
305 __ret_pu = -EFAULT; \
306 __ret_pu; \
307})
308#endif
309
310#ifdef CONFIG_X86_32
311#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
312#else
313#define __get_user_asm_u64(x, ptr, retval, errret) \
314 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
315#endif
316
317#define __get_user_size(x, ptr, size, retval, errret) \
318do { \
319 retval = 0; \
320 __chk_user_ptr(ptr); \
321 switch (size) { \
322 case 1: \
323 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
324 break; \
325 case 2: \
326 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
327 break; \
328 case 4: \
329 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
330 break; \
331 case 8: \
332 __get_user_asm_u64(x, ptr, retval, errret); \
333 break; \
334 default: \
335 (x) = __get_user_bad(); \
336 } \
337} while (0)
338
339#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
340 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
341 "2:\n" \
342 ".section .fixup,\"ax\"\n" \
343 "3: mov %3,%0\n" \
344 " xor"itype" %"rtype"1,%"rtype"1\n" \
345 " jmp 2b\n" \
346 ".previous\n" \
347 _ASM_EXTABLE(1b, 3b) \
348 : "=r" (err), ltype(x) \
349 : "m" (__m(addr)), "i" (errret), "0" (err))
350
351#define __put_user_nocheck(x, ptr, size) \
352({ \
353 long __pu_err; \
354 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
355 __pu_err; \
356})
357
358#define __get_user_nocheck(x, ptr, size) \
359({ \
360 long __gu_err; \
361 unsigned long __gu_val; \
362 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
363 (x) = (__force __typeof__(*(ptr)))__gu_val; \
364 __gu_err; \
365})
366
367/* FIXME: this hack is definitely wrong -AK */
368struct __large_struct { unsigned long buf[100]; };
369#define __m(x) (*(struct __large_struct __user *)(x))
370
371/*
372 * Tell gcc we read from memory instead of writing: this is because
373 * we do not write to any memory gcc knows about, so there are no
374 * aliasing issues.
375 */
376#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
377 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
378 "2:\n" \
379 ".section .fixup,\"ax\"\n" \
380 "3: mov %3,%0\n" \
381 " jmp 2b\n" \
382 ".previous\n" \
383 _ASM_EXTABLE(1b, 3b) \
384 : "=r"(err) \
385 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
386/**
387 * __get_user: - Get a simple variable from user space, with less checking.
388 * @x: Variable to store result.
389 * @ptr: Source address, in user space.
390 *
391 * Context: User context only. This function may sleep.
392 *
393 * This macro copies a single simple variable from user space to kernel
394 * space. It supports simple types like char and int, but not larger
395 * data types like structures or arrays.
396 *
397 * @ptr must have pointer-to-simple-variable type, and the result of
398 * dereferencing @ptr must be assignable to @x without a cast.
399 *
400 * Caller must check the pointer with access_ok() before calling this
401 * function.
402 *
403 * Returns zero on success, or -EFAULT on error.
404 * On error, the variable @x is set to zero.
405 */
406
407#define __get_user(x, ptr) \
408 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
409/**
410 * __put_user: - Write a simple value into user space, with less checking.
411 * @x: Value to copy to user space.
412 * @ptr: Destination address, in user space.
413 *
414 * Context: User context only. This function may sleep.
415 *
416 * This macro copies a single simple value from kernel space to user
417 * space. It supports simple types like char and int, but not larger
418 * data types like structures or arrays.
419 *
420 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
421 * to the result of dereferencing @ptr.
422 *
423 * Caller must check the pointer with access_ok() before calling this
424 * function.
425 *
426 * Returns zero on success, or -EFAULT on error.
427 */
428
429#define __put_user(x, ptr) \
430 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
431
432#define __get_user_unaligned __get_user
433#define __put_user_unaligned __put_user
434
435/*
436 * movsl can be slow when source and dest are not both 8-byte aligned
437 */
438#ifdef CONFIG_X86_INTEL_USERCOPY
439extern struct movsl_mask {
440 int mask;
441} ____cacheline_aligned_in_smp movsl_mask;
442#endif
443
444#define ARCH_HAS_NOCACHE_UACCESS 1
445
1#ifdef CONFIG_X86_32 446#ifdef CONFIG_X86_32
2# include "uaccess_32.h" 447# include "uaccess_32.h"
3#else 448#else
449# define ARCH_HAS_SEARCH_EXTABLE
4# include "uaccess_64.h" 450# include "uaccess_64.h"
5#endif 451#endif
452
453#endif
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h
index 8e7595c1f34e..6fdef39a0bcb 100644
--- a/include/asm-x86/uaccess_32.h
+++ b/include/asm-x86/uaccess_32.h
@@ -11,426 +11,6 @@
11#include <asm/asm.h> 11#include <asm/asm.h>
12#include <asm/page.h> 12#include <asm/page.h>
13 13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17/*
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27
28#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL)
29#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
30
31#define get_ds() (KERNEL_DS)
32#define get_fs() (current_thread_info()->addr_limit)
33#define set_fs(x) (current_thread_info()->addr_limit = (x))
34
35#define segment_eq(a, b) ((a).seg == (b).seg)
36
37/*
38 * movsl can be slow when source and dest are not both 8-byte aligned
39 */
40#ifdef CONFIG_X86_INTEL_USERCOPY
41extern struct movsl_mask {
42 int mask;
43} ____cacheline_aligned_in_smp movsl_mask;
44#endif
45
46#define __addr_ok(addr) \
47 ((unsigned long __force)(addr) < \
48 (current_thread_info()->addr_limit.seg))
49
50/*
51 * Test whether a block of memory is a valid user space address.
52 * Returns 0 if the range is valid, nonzero otherwise.
53 *
54 * This is equivalent to the following test:
55 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg
56 *
57 * This needs 33-bit arithmetic. We have a carry...
58 */
59#define __range_ok(addr, size) \
60({ \
61 unsigned long flag, roksum; \
62 __chk_user_ptr(addr); \
63 asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
64 :"=&r" (flag), "=r" (roksum) \
65 :"1" (addr), "g" ((int)(size)), \
66 "rm" (current_thread_info()->addr_limit.seg)); \
67 flag; \
68})
69
70/**
71 * access_ok: - Checks if a user space pointer is valid
72 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
73 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
74 * to write to a block, it is always safe to read from it.
75 * @addr: User space pointer to start of block to check
76 * @size: Size of block to check
77 *
78 * Context: User context only. This function may sleep.
79 *
80 * Checks if a pointer to a block of memory in user space is valid.
81 *
82 * Returns true (nonzero) if the memory block may be valid, false (zero)
83 * if it is definitely invalid.
84 *
85 * Note that, depending on architecture, this function probably just
86 * checks that the pointer is in the user space range - after calling
87 * this function, memory access functions may still return -EFAULT.
88 */
89#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))
90
91/*
92 * The exception table consists of pairs of addresses: the first is the
93 * address of an instruction that is allowed to fault, and the second is
94 * the address at which the program should continue. No registers are
95 * modified, so it is entirely up to the continuation code to figure out
96 * what to do.
97 *
98 * All the routines below use bits of fixup code that are out of line
99 * with the main instruction path. This means when everything is well,
100 * we don't even have to jump over them. Further, they do not intrude
101 * on our cache or tlb entries.
102 */
103
104struct exception_table_entry {
105 unsigned long insn, fixup;
106};
107
108extern int fixup_exception(struct pt_regs *regs);
109
110/*
111 * These are the main single-value transfer routines. They automatically
112 * use the right size if we just have the right pointer type.
113 *
114 * This gets kind of ugly. We want to return _two_ values in "get_user()"
115 * and yet we don't want to do any pointers, because that is too much
116 * of a performance impact. Thus we have a few rather ugly macros here,
117 * and hide all the ugliness from the user.
118 *
119 * The "__xxx" versions of the user access functions are versions that
120 * do not verify the address space, that must have been done previously
121 * with a separate "access_ok()" call (this is used when we do multiple
122 * accesses to the same area of user memory).
123 */
124
125extern void __get_user_1(void);
126extern void __get_user_2(void);
127extern void __get_user_4(void);
128
129#define __get_user_x(size, ret, x, ptr) \
130 asm volatile("call __get_user_" #size \
131 :"=a" (ret),"=d" (x) \
132 :"0" (ptr))
133
134
135/* Careful: we have to cast the result to the type of the pointer
136 * for sign reasons */
137
138/**
139 * get_user: - Get a simple variable from user space.
140 * @x: Variable to store result.
141 * @ptr: Source address, in user space.
142 *
143 * Context: User context only. This function may sleep.
144 *
145 * This macro copies a single simple variable from user space to kernel
146 * space. It supports simple types like char and int, but not larger
147 * data types like structures or arrays.
148 *
149 * @ptr must have pointer-to-simple-variable type, and the result of
150 * dereferencing @ptr must be assignable to @x without a cast.
151 *
152 * Returns zero on success, or -EFAULT on error.
153 * On error, the variable @x is set to zero.
154 */
155#define get_user(x, ptr) \
156({ \
157 int __ret_gu; \
158 unsigned long __val_gu; \
159 __chk_user_ptr(ptr); \
160 switch (sizeof(*(ptr))) { \
161 case 1: \
162 __get_user_x(1, __ret_gu, __val_gu, ptr); \
163 break; \
164 case 2: \
165 __get_user_x(2, __ret_gu, __val_gu, ptr); \
166 break; \
167 case 4: \
168 __get_user_x(4, __ret_gu, __val_gu, ptr); \
169 break; \
170 default: \
171 __get_user_x(X, __ret_gu, __val_gu, ptr); \
172 break; \
173 } \
174 (x) = (__typeof__(*(ptr)))__val_gu; \
175 __ret_gu; \
176})
177
178extern void __put_user_bad(void);
179
180/*
181 * Strange magic calling convention: pointer in %ecx,
182 * value in %eax(:%edx), return value in %eax, no clobbers.
183 */
184extern void __put_user_1(void);
185extern void __put_user_2(void);
186extern void __put_user_4(void);
187extern void __put_user_8(void);
188
189#define __put_user_1(x, ptr) \
190 asm volatile("call __put_user_1" : "=a" (__ret_pu) \
191 : "0" ((typeof(*(ptr)))(x)), "c" (ptr))
192
193#define __put_user_2(x, ptr) \
194 asm volatile("call __put_user_2" : "=a" (__ret_pu) \
195 : "0" ((typeof(*(ptr)))(x)), "c" (ptr))
196
197#define __put_user_4(x, ptr) \
198 asm volatile("call __put_user_4" : "=a" (__ret_pu) \
199 : "0" ((typeof(*(ptr)))(x)), "c" (ptr))
200
201#define __put_user_8(x, ptr) \
202 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
203 : "A" ((typeof(*(ptr)))(x)), "c" (ptr))
204
205#define __put_user_X(x, ptr) \
206 asm volatile("call __put_user_X" : "=a" (__ret_pu) \
207 : "c" (ptr))
208
209/**
210 * put_user: - Write a simple value into user space.
211 * @x: Value to copy to user space.
212 * @ptr: Destination address, in user space.
213 *
214 * Context: User context only. This function may sleep.
215 *
216 * This macro copies a single simple value from kernel space to user
217 * space. It supports simple types like char and int, but not larger
218 * data types like structures or arrays.
219 *
220 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
221 * to the result of dereferencing @ptr.
222 *
223 * Returns zero on success, or -EFAULT on error.
224 */
225#ifdef CONFIG_X86_WP_WORKS_OK
226
227#define put_user(x, ptr) \
228({ \
229 int __ret_pu; \
230 __typeof__(*(ptr)) __pu_val; \
231 __chk_user_ptr(ptr); \
232 __pu_val = x; \
233 switch (sizeof(*(ptr))) { \
234 case 1: \
235 __put_user_1(__pu_val, ptr); \
236 break; \
237 case 2: \
238 __put_user_2(__pu_val, ptr); \
239 break; \
240 case 4: \
241 __put_user_4(__pu_val, ptr); \
242 break; \
243 case 8: \
244 __put_user_8(__pu_val, ptr); \
245 break; \
246 default: \
247 __put_user_X(__pu_val, ptr); \
248 break; \
249 } \
250 __ret_pu; \
251})
252
253#else
254#define put_user(x, ptr) \
255({ \
256 int __ret_pu; \
257 __typeof__(*(ptr))__pus_tmp = x; \
258 __ret_pu = 0; \
259 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
260 sizeof(*(ptr))) != 0)) \
261 __ret_pu = -EFAULT; \
262 __ret_pu; \
263})
264
265
266#endif
267
268/**
269 * __get_user: - Get a simple variable from user space, with less checking.
270 * @x: Variable to store result.
271 * @ptr: Source address, in user space.
272 *
273 * Context: User context only. This function may sleep.
274 *
275 * This macro copies a single simple variable from user space to kernel
276 * space. It supports simple types like char and int, but not larger
277 * data types like structures or arrays.
278 *
279 * @ptr must have pointer-to-simple-variable type, and the result of
280 * dereferencing @ptr must be assignable to @x without a cast.
281 *
282 * Caller must check the pointer with access_ok() before calling this
283 * function.
284 *
285 * Returns zero on success, or -EFAULT on error.
286 * On error, the variable @x is set to zero.
287 */
288#define __get_user(x, ptr) \
289 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
290
291
292/**
293 * __put_user: - Write a simple value into user space, with less checking.
294 * @x: Value to copy to user space.
295 * @ptr: Destination address, in user space.
296 *
297 * Context: User context only. This function may sleep.
298 *
299 * This macro copies a single simple value from kernel space to user
300 * space. It supports simple types like char and int, but not larger
301 * data types like structures or arrays.
302 *
303 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
304 * to the result of dereferencing @ptr.
305 *
306 * Caller must check the pointer with access_ok() before calling this
307 * function.
308 *
309 * Returns zero on success, or -EFAULT on error.
310 */
311#define __put_user(x, ptr) \
312 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
313
314#define __put_user_nocheck(x, ptr, size) \
315({ \
316 long __pu_err; \
317 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
318 __pu_err; \
319})
320
321
322#define __put_user_u64(x, addr, err) \
323 asm volatile("1: movl %%eax,0(%2)\n" \
324 "2: movl %%edx,4(%2)\n" \
325 "3:\n" \
326 ".section .fixup,\"ax\"\n" \
327 "4: movl %3,%0\n" \
328 " jmp 3b\n" \
329 ".previous\n" \
330 _ASM_EXTABLE(1b, 4b) \
331 _ASM_EXTABLE(2b, 4b) \
332 : "=r" (err) \
333 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
334
335#ifdef CONFIG_X86_WP_WORKS_OK
336
337#define __put_user_size(x, ptr, size, retval, errret) \
338do { \
339 retval = 0; \
340 __chk_user_ptr(ptr); \
341 switch (size) { \
342 case 1: \
343 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
344 break; \
345 case 2: \
346 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
347 break; \
348 case 4: \
349 __put_user_asm(x, ptr, retval, "l", "", "ir", errret); \
350 break; \
351 case 8: \
352 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
353 break; \
354 default: \
355 __put_user_bad(); \
356 } \
357} while (0)
358
359#else
360
361#define __put_user_size(x, ptr, size, retval, errret) \
362do { \
363 __typeof__(*(ptr))__pus_tmp = x; \
364 retval = 0; \
365 \
366 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
367 retval = errret; \
368} while (0)
369
370#endif
371struct __large_struct { unsigned long buf[100]; };
372#define __m(x) (*(struct __large_struct __user *)(x))
373
374/*
375 * Tell gcc we read from memory instead of writing: this is because
376 * we do not write to any memory gcc knows about, so there are no
377 * aliasing issues.
378 */
379#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
380 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
381 "2:\n" \
382 ".section .fixup,\"ax\"\n" \
383 "3: movl %3,%0\n" \
384 " jmp 2b\n" \
385 ".previous\n" \
386 _ASM_EXTABLE(1b, 3b) \
387 : "=r"(err) \
388 : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
389
390
391#define __get_user_nocheck(x, ptr, size) \
392({ \
393 long __gu_err; \
394 unsigned long __gu_val; \
395 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
396 (x) = (__typeof__(*(ptr)))__gu_val; \
397 __gu_err; \
398})
399
400extern long __get_user_bad(void);
401
402#define __get_user_size(x, ptr, size, retval, errret) \
403do { \
404 retval = 0; \
405 __chk_user_ptr(ptr); \
406 switch (size) { \
407 case 1: \
408 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
409 break; \
410 case 2: \
411 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
412 break; \
413 case 4: \
414 __get_user_asm(x, ptr, retval, "l", "", "=r", errret); \
415 break; \
416 default: \
417 (x) = __get_user_bad(); \
418 } \
419} while (0)
420
421#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
422 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
423 "2:\n" \
424 ".section .fixup,\"ax\"\n" \
425 "3: movl %3,%0\n" \
426 " xor"itype" %"rtype"1,%"rtype"1\n" \
427 " jmp 2b\n" \
428 ".previous\n" \
429 _ASM_EXTABLE(1b, 3b) \
430 : "=r" (err), ltype (x) \
431 : "m" (__m(addr)), "i" (errret), "0" (err))
432
433
434unsigned long __must_check __copy_to_user_ll 14unsigned long __must_check __copy_to_user_ll
435 (void __user *to, const void *from, unsigned long n); 15 (void __user *to, const void *from, unsigned long n);
436unsigned long __must_check __copy_from_user_ll 16unsigned long __must_check __copy_from_user_ll
@@ -576,8 +156,6 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
576 return __copy_from_user_ll(to, from, n); 156 return __copy_from_user_ll(to, from, n);
577} 157}
578 158
579#define ARCH_HAS_NOCACHE_UACCESS
580
581static __always_inline unsigned long __copy_from_user_nocache(void *to, 159static __always_inline unsigned long __copy_from_user_nocache(void *to,
582 const void __user *from, unsigned long n) 160 const void __user *from, unsigned long n)
583{ 161{
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index b8a2f4339903..4e3ec004e14a 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -9,265 +9,6 @@
9#include <linux/prefetch.h> 9#include <linux/prefetch.h>
10#include <asm/page.h> 10#include <asm/page.h>
11 11
12#define VERIFY_READ 0
13#define VERIFY_WRITE 1
14
15/*
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
19 *
20 * For historical reasons, these macros are grossly misnamed.
21 */
22
23#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
26#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
27
28#define get_ds() (KERNEL_DS)
29#define get_fs() (current_thread_info()->addr_limit)
30#define set_fs(x) (current_thread_info()->addr_limit = (x))
31
32#define segment_eq(a, b) ((a).seg == (b).seg)
33
34#define __addr_ok(addr) (!((unsigned long)(addr) & \
35 (current_thread_info()->addr_limit.seg)))
36
37/*
38 * Uhhuh, this needs 65-bit arithmetic. We have a carry..
39 */
40#define __range_not_ok(addr, size) \
41({ \
42 unsigned long flag, roksum; \
43 __chk_user_ptr(addr); \
44 asm("# range_ok\n\r" \
45 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
46 : "=&r" (flag), "=r" (roksum) \
47 : "1" (addr), "g" ((long)(size)), \
48 "g" (current_thread_info()->addr_limit.seg)); \
49 flag; \
50})
51
52#define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0)
53
54/*
55 * The exception table consists of pairs of addresses: the first is the
56 * address of an instruction that is allowed to fault, and the second is
57 * the address at which the program should continue. No registers are
58 * modified, so it is entirely up to the continuation code to figure out
59 * what to do.
60 *
61 * All the routines below use bits of fixup code that are out of line
62 * with the main instruction path. This means when everything is well,
63 * we don't even have to jump over them. Further, they do not intrude
64 * on our cache or tlb entries.
65 */
66
67struct exception_table_entry {
68 unsigned long insn, fixup;
69};
70
71extern int fixup_exception(struct pt_regs *regs);
72
73#define ARCH_HAS_SEARCH_EXTABLE
74
75/*
76 * These are the main single-value transfer routines. They automatically
77 * use the right size if we just have the right pointer type.
78 *
79 * This gets kind of ugly. We want to return _two_ values in "get_user()"
80 * and yet we don't want to do any pointers, because that is too much
81 * of a performance impact. Thus we have a few rather ugly macros here,
82 * and hide all the ugliness from the user.
83 *
84 * The "__xxx" versions of the user access functions are versions that
85 * do not verify the address space, that must have been done previously
86 * with a separate "access_ok()" call (this is used when we do multiple
87 * accesses to the same area of user memory).
88 */
89
90#define __get_user_x(size, ret, x, ptr) \
91 asm volatile("call __get_user_" #size \
92 : "=a" (ret),"=d" (x) \
93 : "c" (ptr) \
94 : "r8")
95
96/* Careful: we have to cast the result to the type of the pointer
97 * for sign reasons */
98
99#define get_user(x, ptr) \
100({ \
101 unsigned long __val_gu; \
102 int __ret_gu; \
103 __chk_user_ptr(ptr); \
104 switch (sizeof(*(ptr))) { \
105 case 1: \
106 __get_user_x(1, __ret_gu, __val_gu, ptr); \
107 break; \
108 case 2: \
109 __get_user_x(2, __ret_gu, __val_gu, ptr); \
110 break; \
111 case 4: \
112 __get_user_x(4, __ret_gu, __val_gu, ptr); \
113 break; \
114 case 8: \
115 __get_user_x(8, __ret_gu, __val_gu, ptr); \
116 break; \
117 default: \
118 __get_user_bad(); \
119 break; \
120 } \
121 (x) = (__force typeof(*(ptr)))__val_gu; \
122 __ret_gu; \
123})
124
125extern void __put_user_1(void);
126extern void __put_user_2(void);
127extern void __put_user_4(void);
128extern void __put_user_8(void);
129extern void __put_user_bad(void);
130
131#define __put_user_x(size, ret, x, ptr) \
132 asm volatile("call __put_user_" #size \
133 :"=a" (ret) \
134 :"c" (ptr),"d" (x) \
135 :"r8")
136
137#define put_user(x, ptr) \
138 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
139
140#define __get_user(x, ptr) \
141 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
142#define __put_user(x, ptr) \
143 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
144
145#define __get_user_unaligned __get_user
146#define __put_user_unaligned __put_user
147
148#define __put_user_nocheck(x, ptr, size) \
149({ \
150 int __pu_err; \
151 __put_user_size((x), (ptr), (size), __pu_err); \
152 __pu_err; \
153})
154
155
156#define __put_user_check(x, ptr, size) \
157({ \
158 int __pu_err; \
159 typeof(*(ptr)) __user *__pu_addr = (ptr); \
160 switch (size) { \
161 case 1: \
162 __put_user_x(1, __pu_err, x, __pu_addr); \
163 break; \
164 case 2: \
165 __put_user_x(2, __pu_err, x, __pu_addr); \
166 break; \
167 case 4: \
168 __put_user_x(4, __pu_err, x, __pu_addr); \
169 break; \
170 case 8: \
171 __put_user_x(8, __pu_err, x, __pu_addr); \
172 break; \
173 default: \
174 __put_user_bad(); \
175 } \
176 __pu_err; \
177})
178
179#define __put_user_size(x, ptr, size, retval) \
180do { \
181 retval = 0; \
182 __chk_user_ptr(ptr); \
183 switch (size) { \
184 case 1: \
185 __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
186 break; \
187 case 2: \
188 __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
189 break; \
190 case 4: \
191 __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
192 break; \
193 case 8: \
194 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
195 break; \
196 default: \
197 __put_user_bad(); \
198 } \
199} while (0)
200
201/* FIXME: this hack is definitely wrong -AK */
202struct __large_struct { unsigned long buf[100]; };
203#define __m(x) (*(struct __large_struct __user *)(x))
204
205/*
206 * Tell gcc we read from memory instead of writing: this is because
207 * we do not write to any memory gcc knows about, so there are no
208 * aliasing issues.
209 */
210#define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
211 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
212 "2:\n" \
213 ".section .fixup, \"ax\"\n" \
214 "3: mov %3,%0\n" \
215 " jmp 2b\n" \
216 ".previous\n" \
217 _ASM_EXTABLE(1b, 3b) \
218 : "=r"(err) \
219 : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
220
221
222#define __get_user_nocheck(x, ptr, size) \
223({ \
224 int __gu_err; \
225 unsigned long __gu_val; \
226 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
227 (x) = (__force typeof(*(ptr)))__gu_val; \
228 __gu_err; \
229})
230
231extern int __get_user_1(void);
232extern int __get_user_2(void);
233extern int __get_user_4(void);
234extern int __get_user_8(void);
235extern int __get_user_bad(void);
236
237#define __get_user_size(x, ptr, size, retval) \
238do { \
239 retval = 0; \
240 __chk_user_ptr(ptr); \
241 switch (size) { \
242 case 1: \
243 __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
244 break; \
245 case 2: \
246 __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
247 break; \
248 case 4: \
249 __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
250 break; \
251 case 8: \
252 __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
253 break; \
254 default: \
255 (x) = __get_user_bad(); \
256 } \
257} while (0)
258
259#define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
260 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
261 "2:\n" \
262 ".section .fixup, \"ax\"\n" \
263 "3: mov %3,%0\n" \
264 " xor"itype" %"rtype"1,%"rtype"1\n" \
265 " jmp 2b\n" \
266 ".previous\n" \
267 _ASM_EXTABLE(1b, 3b) \
268 : "=r" (err), ltype (x) \
269 : "m" (__m(addr)), "i"(errno), "0"(err))
270
271/* 12/*
272 * Copy To/From Userspace 13 * Copy To/From Userspace
273 */ 14 */
@@ -437,7 +178,6 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
437 return copy_user_generic((__force void *)dst, src, size); 178 return copy_user_generic((__force void *)dst, src, size);
438} 179}
439 180
440#define ARCH_HAS_NOCACHE_UACCESS 1
441extern long __copy_user_nocache(void *dst, const void __user *src, 181extern long __copy_user_nocache(void *dst, const void __user *src,
442 unsigned size, int zerorest); 182 unsigned size, int zerorest);
443 183