aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 13:44:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 13:44:35 -0400
commit269af9a1a08d368b46d72e74126564d04c354f7e (patch)
treef0f2a8dd54075edebbb728602822e2b7378588d0 /arch
parent8ca038dc10eec80f280d9d483f1835ac2763a787 (diff)
parent8b5ad472991796b2347464922c72de2ca5a028f3 (diff)
Merge branch 'x86-extable-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull exception table generation updates from Ingo Molnar: "The biggest change here is to allow the build-time sorting of the exception table, to speed up booting. This is achieved by the architecture enabling BUILDTIME_EXTABLE_SORT. This option is enabled for x86 and MIPS currently. On x86 a number of fixes and changes were needed to allow build-time sorting of the exception table, in particular a relocation invariant exception table format was needed. This required the abstracting out of exception table protocol and the removal of 20 years of accumulated assumptions about the x86 exception table format. While at it, this tree also cleans up various other aspects of exception handling, such as early(er) exception handling for rdmsr_safe() et al. All in one, as the result of these changes the x86 exception code is now pretty nice and modern. As an added bonus any regressions in this code will be early and violent crashes, so if you see any of those, you'll know whom to blame!" Fix up trivial conflicts in arch/{mips,x86}/Kconfig files due to nearby modifications of other core architecture options. * 'x86-extable-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (35 commits) Revert "x86, extable: Disable presorted exception table for now" scripts/sortextable: Handle relative entries, and other cleanups x86, extable: Switch to relative exception table entries x86, extable: Disable presorted exception table for now x86, extable: Add _ASM_EXTABLE_EX() macro x86, extable: Remove open-coded exception table entries in arch/x86/ia32/ia32entry.S x86, extable: Remove open-coded exception table entries in arch/x86/include/asm/xsave.h x86, extable: Remove open-coded exception table entries in arch/x86/include/asm/kvm_host.h x86, extable: Remove the now-unused __ASM_EX_SEC macros x86, extable: Remove open-coded exception table entries in arch/x86/xen/xen-asm_32.S x86, extable: Remove open-coded exception table entries in arch/x86/um/checksum_32.S x86, extable: Remove open-coded exception table entries in arch/x86/lib/usercopy_32.c x86, extable: Remove open-coded exception table entries in arch/x86/lib/putuser.S x86, extable: Remove open-coded exception table entries in arch/x86/lib/getuser.S x86, extable: Remove open-coded exception table entries in arch/x86/lib/csum-copy_64.S x86, extable: Remove open-coded exception table entries in arch/x86/lib/copy_user_nocache_64.S x86, extable: Remove open-coded exception table entries in arch/x86/lib/copy_user_64.S x86, extable: Remove open-coded exception table entries in arch/x86/lib/checksum_32.S x86, extable: Remove open-coded exception table entries in arch/x86/kernel/test_rodata.c x86, extable: Remove open-coded exception table entries in arch/x86/kernel/entry_64.S ...
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/ia32/ia32entry.S9
-rw-r--r--arch/x86/include/asm/asm.h38
-rw-r--r--arch/x86/include/asm/kvm_host.h5
-rw-r--r--arch/x86/include/asm/msr.h9
-rw-r--r--arch/x86/include/asm/nops.h4
-rw-r--r--arch/x86/include/asm/paravirt.h6
-rw-r--r--arch/x86/include/asm/segment.h4
-rw-r--r--arch/x86/include/asm/uaccess.h25
-rw-r--r--arch/x86/include/asm/xsave.h10
-rw-r--r--arch/x86/kernel/entry_32.S47
-rw-r--r--arch/x86/kernel/entry_64.S16
-rw-r--r--arch/x86/kernel/head_32.S223
-rw-r--r--arch/x86/kernel/head_64.S80
-rw-r--r--arch/x86/kernel/test_rodata.c10
-rw-r--r--arch/x86/lib/checksum_32.S9
-rw-r--r--arch/x86/lib/copy_user_64.S63
-rw-r--r--arch/x86/lib/copy_user_nocache_64.S50
-rw-r--r--arch/x86/lib/csum-copy_64.S16
-rw-r--r--arch/x86/lib/getuser.S9
-rw-r--r--arch/x86/lib/putuser.S12
-rw-r--r--arch/x86/lib/usercopy_32.c232
-rw-r--r--arch/x86/mm/extable.c142
-rw-r--r--arch/x86/um/checksum_32.S9
-rw-r--r--arch/x86/xen/xen-asm_32.S6
26 files changed, 581 insertions, 455 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index f5e121213c22..85aad0321397 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -30,6 +30,7 @@ config MIPS
30 select HAVE_MEMBLOCK_NODE_MAP 30 select HAVE_MEMBLOCK_NODE_MAP
31 select ARCH_DISCARD_MEMBLOCK 31 select ARCH_DISCARD_MEMBLOCK
32 select GENERIC_SMP_IDLE_THREAD 32 select GENERIC_SMP_IDLE_THREAD
33 select BUILDTIME_EXTABLE_SORT
33 34
34menu "Machine selection" 35menu "Machine selection"
35 36
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7b383d8da7b9..21ea6d28d71f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -83,6 +83,7 @@ config X86
83 select DCACHE_WORD_ACCESS 83 select DCACHE_WORD_ACCESS
84 select GENERIC_SMP_IDLE_THREAD 84 select GENERIC_SMP_IDLE_THREAD
85 select HAVE_ARCH_SECCOMP_FILTER 85 select HAVE_ARCH_SECCOMP_FILTER
86 select BUILDTIME_EXTABLE_SORT
86 87
87config INSTRUCTION_DECODER 88config INSTRUCTION_DECODER
88 def_bool (KPROBES || PERF_EVENTS) 89 def_bool (KPROBES || PERF_EVENTS)
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index e3e734005e19..20e5f7ba0e6b 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -13,6 +13,7 @@
13#include <asm/thread_info.h> 13#include <asm/thread_info.h>
14#include <asm/segment.h> 14#include <asm/segment.h>
15#include <asm/irqflags.h> 15#include <asm/irqflags.h>
16#include <asm/asm.h>
16#include <linux/linkage.h> 17#include <linux/linkage.h>
17#include <linux/err.h> 18#include <linux/err.h>
18 19
@@ -146,9 +147,7 @@ ENTRY(ia32_sysenter_target)
146 /* no need to do an access_ok check here because rbp has been 147 /* no need to do an access_ok check here because rbp has been
147 32bit zero extended */ 148 32bit zero extended */
1481: movl (%rbp),%ebp 1491: movl (%rbp),%ebp
149 .section __ex_table,"a" 150 _ASM_EXTABLE(1b,ia32_badarg)
150 .quad 1b,ia32_badarg
151 .previous
152 orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) 151 orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
153 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 152 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
154 CFI_REMEMBER_STATE 153 CFI_REMEMBER_STATE
@@ -303,9 +302,7 @@ ENTRY(ia32_cstar_target)
303 32bit zero extended */ 302 32bit zero extended */
304 /* hardware stack frame is complete now */ 303 /* hardware stack frame is complete now */
3051: movl (%r8),%r9d 3041: movl (%r8),%r9d
306 .section __ex_table,"a" 305 _ASM_EXTABLE(1b,ia32_badarg)
307 .quad 1b,ia32_badarg
308 .previous
309 orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) 306 orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
310 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 307 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
311 CFI_REMEMBER_STATE 308 CFI_REMEMBER_STATE
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 9412d6558c88..1c2d247f65ce 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -4,11 +4,9 @@
4#ifdef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
5# define __ASM_FORM(x) x 5# define __ASM_FORM(x) x
6# define __ASM_FORM_COMMA(x) x, 6# define __ASM_FORM_COMMA(x) x,
7# define __ASM_EX_SEC .section __ex_table, "a"
8#else 7#else
9# define __ASM_FORM(x) " " #x " " 8# define __ASM_FORM(x) " " #x " "
10# define __ASM_FORM_COMMA(x) " " #x "," 9# define __ASM_FORM_COMMA(x) " " #x ","
11# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
12#endif 10#endif
13 11
14#ifdef CONFIG_X86_32 12#ifdef CONFIG_X86_32
@@ -42,17 +40,33 @@
42 40
43/* Exception table entry */ 41/* Exception table entry */
44#ifdef __ASSEMBLY__ 42#ifdef __ASSEMBLY__
45# define _ASM_EXTABLE(from,to) \ 43# define _ASM_EXTABLE(from,to) \
46 __ASM_EX_SEC ; \ 44 .pushsection "__ex_table","a" ; \
47 _ASM_ALIGN ; \ 45 .balign 8 ; \
48 _ASM_PTR from , to ; \ 46 .long (from) - . ; \
49 .previous 47 .long (to) - . ; \
48 .popsection
49
50# define _ASM_EXTABLE_EX(from,to) \
51 .pushsection "__ex_table","a" ; \
52 .balign 8 ; \
53 .long (from) - . ; \
54 .long (to) - . + 0x7ffffff0 ; \
55 .popsection
50#else 56#else
51# define _ASM_EXTABLE(from,to) \ 57# define _ASM_EXTABLE(from,to) \
52 __ASM_EX_SEC \ 58 " .pushsection \"__ex_table\",\"a\"\n" \
53 _ASM_ALIGN "\n" \ 59 " .balign 8\n" \
54 _ASM_PTR #from "," #to "\n" \ 60 " .long (" #from ") - .\n" \
55 " .previous\n" 61 " .long (" #to ") - .\n" \
62 " .popsection\n"
63
64# define _ASM_EXTABLE_EX(from,to) \
65 " .pushsection \"__ex_table\",\"a\"\n" \
66 " .balign 8\n" \
67 " .long (" #from ") - .\n" \
68 " .long (" #to ") - . + 0x7ffffff0\n" \
69 " .popsection\n"
56#endif 70#endif
57 71
58#endif /* _ASM_X86_ASM_H */ 72#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e216ba066e79..e5b97be12d2a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -27,6 +27,7 @@
27#include <asm/desc.h> 27#include <asm/desc.h>
28#include <asm/mtrr.h> 28#include <asm/mtrr.h>
29#include <asm/msr-index.h> 29#include <asm/msr-index.h>
30#include <asm/asm.h>
30 31
31#define KVM_MAX_VCPUS 254 32#define KVM_MAX_VCPUS 254
32#define KVM_SOFT_MAX_VCPUS 160 33#define KVM_SOFT_MAX_VCPUS 160
@@ -921,9 +922,7 @@ extern bool kvm_rebooting;
921 __ASM_SIZE(push) " $666b \n\t" \ 922 __ASM_SIZE(push) " $666b \n\t" \
922 "call kvm_spurious_fault \n\t" \ 923 "call kvm_spurious_fault \n\t" \
923 ".popsection \n\t" \ 924 ".popsection \n\t" \
924 ".pushsection __ex_table, \"a\" \n\t" \ 925 _ASM_EXTABLE(666b, 667b)
925 _ASM_PTR " 666b, 667b \n\t" \
926 ".popsection"
927 926
928#define __kvm_handle_fault_on_reboot(insn) \ 927#define __kvm_handle_fault_on_reboot(insn) \
929 ____kvm_handle_fault_on_reboot(insn, "") 928 ____kvm_handle_fault_on_reboot(insn, "")
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 95203d40ffdd..084ef95274cd 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -169,14 +169,7 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
169 return native_write_msr_safe(msr, low, high); 169 return native_write_msr_safe(msr, low, high);
170} 170}
171 171
172/* 172/* rdmsr with exception handling */
173 * rdmsr with exception handling.
174 *
175 * Please note that the exception handling works only after we've
176 * switched to the "smart" #GP handler in trap_init() which knows about
177 * exception tables - using this macro earlier than that causes machine
178 * hangs on boxes which do not implement the @msr in the first argument.
179 */
180#define rdmsr_safe(msr, p1, p2) \ 173#define rdmsr_safe(msr, p1, p2) \
181({ \ 174({ \
182 int __err; \ 175 int __err; \
diff --git a/arch/x86/include/asm/nops.h b/arch/x86/include/asm/nops.h
index 405b4032a60b..aff2b3356101 100644
--- a/arch/x86/include/asm/nops.h
+++ b/arch/x86/include/asm/nops.h
@@ -87,7 +87,11 @@
87#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0 87#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0
88#define P6_NOP5_ATOMIC P6_NOP5 88#define P6_NOP5_ATOMIC P6_NOP5
89 89
90#ifdef __ASSEMBLY__
91#define _ASM_MK_NOP(x) .byte x
92#else
90#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n" 93#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
94#endif
91 95
92#if defined(CONFIG_MK7) 96#if defined(CONFIG_MK7)
93#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1) 97#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index aa0f91308367..6cbbabf52707 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -1023,10 +1023,8 @@ extern void default_banner(void);
1023 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \ 1023 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
1024 ) 1024 )
1025 1025
1026#define GET_CR2_INTO_RCX \ 1026#define GET_CR2_INTO_RAX \
1027 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \ 1027 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
1028 movq %rax, %rcx; \
1029 xorq %rax, %rax;
1030 1028
1031#define PARAVIRT_ADJUST_EXCEPTION_FRAME \ 1029#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1032 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \ 1030 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 165466233ab0..c48a95035a77 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -205,13 +205,15 @@
205 205
206#define IDT_ENTRIES 256 206#define IDT_ENTRIES 256
207#define NUM_EXCEPTION_VECTORS 32 207#define NUM_EXCEPTION_VECTORS 32
208/* Bitmask of exception vectors which push an error code on the stack */
209#define EXCEPTION_ERRCODE_MASK 0x00027d00
208#define GDT_SIZE (GDT_ENTRIES * 8) 210#define GDT_SIZE (GDT_ENTRIES * 8)
209#define GDT_ENTRY_TLS_ENTRIES 3 211#define GDT_ENTRY_TLS_ENTRIES 3
210#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) 212#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
211 213
212#ifdef __KERNEL__ 214#ifdef __KERNEL__
213#ifndef __ASSEMBLY__ 215#ifndef __ASSEMBLY__
214extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10]; 216extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
215 217
216/* 218/*
217 * Load a segment. Fall back on loading the zero 219 * Load a segment. Fall back on loading the zero
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index e0544597cfe7..851fe0dc13bc 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -79,11 +79,12 @@
79#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) 79#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
80 80
81/* 81/*
82 * The exception table consists of pairs of addresses: the first is the 82 * The exception table consists of pairs of addresses relative to the
83 * address of an instruction that is allowed to fault, and the second is 83 * exception table enty itself: the first is the address of an
84 * the address at which the program should continue. No registers are 84 * instruction that is allowed to fault, and the second is the address
85 * modified, so it is entirely up to the continuation code to figure out 85 * at which the program should continue. No registers are modified,
86 * what to do. 86 * so it is entirely up to the continuation code to figure out what to
87 * do.
87 * 88 *
88 * All the routines below use bits of fixup code that are out of line 89 * All the routines below use bits of fixup code that are out of line
89 * with the main instruction path. This means when everything is well, 90 * with the main instruction path. This means when everything is well,
@@ -92,10 +93,14 @@
92 */ 93 */
93 94
94struct exception_table_entry { 95struct exception_table_entry {
95 unsigned long insn, fixup; 96 int insn, fixup;
96}; 97};
98/* This is not the generic standard exception_table_entry format */
99#define ARCH_HAS_SORT_EXTABLE
100#define ARCH_HAS_SEARCH_EXTABLE
97 101
98extern int fixup_exception(struct pt_regs *regs); 102extern int fixup_exception(struct pt_regs *regs);
103extern int early_fixup_exception(unsigned long *ip);
99 104
100/* 105/*
101 * These are the main single-value transfer routines. They automatically 106 * These are the main single-value transfer routines. They automatically
@@ -202,8 +207,8 @@ extern int __get_user_bad(void);
202 asm volatile("1: movl %%eax,0(%1)\n" \ 207 asm volatile("1: movl %%eax,0(%1)\n" \
203 "2: movl %%edx,4(%1)\n" \ 208 "2: movl %%edx,4(%1)\n" \
204 "3:\n" \ 209 "3:\n" \
205 _ASM_EXTABLE(1b, 2b - 1b) \ 210 _ASM_EXTABLE_EX(1b, 2b) \
206 _ASM_EXTABLE(2b, 3b - 2b) \ 211 _ASM_EXTABLE_EX(2b, 3b) \
207 : : "A" (x), "r" (addr)) 212 : : "A" (x), "r" (addr))
208 213
209#define __put_user_x8(x, ptr, __ret_pu) \ 214#define __put_user_x8(x, ptr, __ret_pu) \
@@ -408,7 +413,7 @@ do { \
408#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 413#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
409 asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 414 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
410 "2:\n" \ 415 "2:\n" \
411 _ASM_EXTABLE(1b, 2b - 1b) \ 416 _ASM_EXTABLE_EX(1b, 2b) \
412 : ltype(x) : "m" (__m(addr))) 417 : ltype(x) : "m" (__m(addr)))
413 418
414#define __put_user_nocheck(x, ptr, size) \ 419#define __put_user_nocheck(x, ptr, size) \
@@ -450,7 +455,7 @@ struct __large_struct { unsigned long buf[100]; };
450#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 455#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
451 asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 456 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
452 "2:\n" \ 457 "2:\n" \
453 _ASM_EXTABLE(1b, 2b - 1b) \ 458 _ASM_EXTABLE_EX(1b, 2b) \
454 : : ltype(x), "m" (__m(addr))) 459 : : ltype(x), "m" (__m(addr)))
455 460
456/* 461/*
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index c6ce2452f10c..8a1b6f9b594a 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -80,10 +80,7 @@ static inline int xsave_user(struct xsave_struct __user *buf)
80 "3: movl $-1,%[err]\n" 80 "3: movl $-1,%[err]\n"
81 " jmp 2b\n" 81 " jmp 2b\n"
82 ".previous\n" 82 ".previous\n"
83 ".section __ex_table,\"a\"\n" 83 _ASM_EXTABLE(1b,3b)
84 _ASM_ALIGN "\n"
85 _ASM_PTR "1b,3b\n"
86 ".previous"
87 : [err] "=r" (err) 84 : [err] "=r" (err)
88 : "D" (buf), "a" (-1), "d" (-1), "0" (0) 85 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
89 : "memory"); 86 : "memory");
@@ -106,10 +103,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
106 "3: movl $-1,%[err]\n" 103 "3: movl $-1,%[err]\n"
107 " jmp 2b\n" 104 " jmp 2b\n"
108 ".previous\n" 105 ".previous\n"
109 ".section __ex_table,\"a\"\n" 106 _ASM_EXTABLE(1b,3b)
110 _ASM_ALIGN "\n"
111 _ASM_PTR "1b,3b\n"
112 ".previous"
113 : [err] "=r" (err) 107 : [err] "=r" (err)
114 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) 108 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
115 : "memory"); /* memory required? */ 109 : "memory"); /* memory required? */
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 7b784f4ef1e4..01ccf9b71473 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -56,6 +56,7 @@
56#include <asm/irq_vectors.h> 56#include <asm/irq_vectors.h>
57#include <asm/cpufeature.h> 57#include <asm/cpufeature.h>
58#include <asm/alternative-asm.h> 58#include <asm/alternative-asm.h>
59#include <asm/asm.h>
59 60
60/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ 61/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
61#include <linux/elf-em.h> 62#include <linux/elf-em.h>
@@ -151,10 +152,8 @@
151.pushsection .fixup, "ax" 152.pushsection .fixup, "ax"
15299: movl $0, (%esp) 15399: movl $0, (%esp)
153 jmp 98b 154 jmp 98b
154.section __ex_table, "a"
155 .align 4
156 .long 98b, 99b
157.popsection 155.popsection
156 _ASM_EXTABLE(98b,99b)
158.endm 157.endm
159 158
160.macro PTGS_TO_GS 159.macro PTGS_TO_GS
@@ -164,10 +163,8 @@
164.pushsection .fixup, "ax" 163.pushsection .fixup, "ax"
16599: movl $0, PT_GS(%esp) 16499: movl $0, PT_GS(%esp)
166 jmp 98b 165 jmp 98b
167.section __ex_table, "a"
168 .align 4
169 .long 98b, 99b
170.popsection 166.popsection
167 _ASM_EXTABLE(98b,99b)
171.endm 168.endm
172 169
173.macro GS_TO_REG reg 170.macro GS_TO_REG reg
@@ -249,12 +246,10 @@
249 jmp 2b 246 jmp 2b
2506: movl $0, (%esp) 2476: movl $0, (%esp)
251 jmp 3b 248 jmp 3b
252.section __ex_table, "a"
253 .align 4
254 .long 1b, 4b
255 .long 2b, 5b
256 .long 3b, 6b
257.popsection 249.popsection
250 _ASM_EXTABLE(1b,4b)
251 _ASM_EXTABLE(2b,5b)
252 _ASM_EXTABLE(3b,6b)
258 POP_GS_EX 253 POP_GS_EX
259.endm 254.endm
260 255
@@ -415,10 +410,7 @@ sysenter_past_esp:
415 jae syscall_fault 410 jae syscall_fault
4161: movl (%ebp),%ebp 4111: movl (%ebp),%ebp
417 movl %ebp,PT_EBP(%esp) 412 movl %ebp,PT_EBP(%esp)
418.section __ex_table,"a" 413 _ASM_EXTABLE(1b,syscall_fault)
419 .align 4
420 .long 1b,syscall_fault
421.previous
422 414
423 GET_THREAD_INFO(%ebp) 415 GET_THREAD_INFO(%ebp)
424 416
@@ -485,10 +477,8 @@ sysexit_audit:
485.pushsection .fixup,"ax" 477.pushsection .fixup,"ax"
4862: movl $0,PT_FS(%esp) 4782: movl $0,PT_FS(%esp)
487 jmp 1b 479 jmp 1b
488.section __ex_table,"a"
489 .align 4
490 .long 1b,2b
491.popsection 480.popsection
481 _ASM_EXTABLE(1b,2b)
492 PTGS_TO_GS_EX 482 PTGS_TO_GS_EX
493ENDPROC(ia32_sysenter_target) 483ENDPROC(ia32_sysenter_target)
494 484
@@ -543,10 +533,7 @@ ENTRY(iret_exc)
543 pushl $do_iret_error 533 pushl $do_iret_error
544 jmp error_code 534 jmp error_code
545.previous 535.previous
546.section __ex_table,"a" 536 _ASM_EXTABLE(irq_return,iret_exc)
547 .align 4
548 .long irq_return,iret_exc
549.previous
550 537
551 CFI_RESTORE_STATE 538 CFI_RESTORE_STATE
552ldt_ss: 539ldt_ss:
@@ -901,10 +888,7 @@ END(device_not_available)
901#ifdef CONFIG_PARAVIRT 888#ifdef CONFIG_PARAVIRT
902ENTRY(native_iret) 889ENTRY(native_iret)
903 iret 890 iret
904.section __ex_table,"a" 891 _ASM_EXTABLE(native_iret, iret_exc)
905 .align 4
906 .long native_iret, iret_exc
907.previous
908END(native_iret) 892END(native_iret)
909 893
910ENTRY(native_irq_enable_sysexit) 894ENTRY(native_irq_enable_sysexit)
@@ -1093,13 +1077,10 @@ ENTRY(xen_failsafe_callback)
1093 movl %eax,16(%esp) 1077 movl %eax,16(%esp)
1094 jmp 4b 1078 jmp 4b
1095.previous 1079.previous
1096.section __ex_table,"a" 1080 _ASM_EXTABLE(1b,6b)
1097 .align 4 1081 _ASM_EXTABLE(2b,7b)
1098 .long 1b,6b 1082 _ASM_EXTABLE(3b,8b)
1099 .long 2b,7b 1083 _ASM_EXTABLE(4b,9b)
1100 .long 3b,8b
1101 .long 4b,9b
1102.previous
1103ENDPROC(xen_failsafe_callback) 1084ENDPROC(xen_failsafe_callback)
1104 1085
1105BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK, 1086BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index cdc79b5cfcd9..320852d02026 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -55,6 +55,7 @@
55#include <asm/paravirt.h> 55#include <asm/paravirt.h>
56#include <asm/ftrace.h> 56#include <asm/ftrace.h>
57#include <asm/percpu.h> 57#include <asm/percpu.h>
58#include <asm/asm.h>
58#include <linux/err.h> 59#include <linux/err.h>
59 60
60/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ 61/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
@@ -900,18 +901,12 @@ restore_args:
900 901
901irq_return: 902irq_return:
902 INTERRUPT_RETURN 903 INTERRUPT_RETURN
903 904 _ASM_EXTABLE(irq_return, bad_iret)
904 .section __ex_table, "a"
905 .quad irq_return, bad_iret
906 .previous
907 905
908#ifdef CONFIG_PARAVIRT 906#ifdef CONFIG_PARAVIRT
909ENTRY(native_iret) 907ENTRY(native_iret)
910 iretq 908 iretq
911 909 _ASM_EXTABLE(native_iret, bad_iret)
912 .section __ex_table,"a"
913 .quad native_iret, bad_iret
914 .previous
915#endif 910#endif
916 911
917 .section .fixup,"ax" 912 .section .fixup,"ax"
@@ -1181,10 +1176,7 @@ gs_change:
1181 CFI_ENDPROC 1176 CFI_ENDPROC
1182END(native_load_gs_index) 1177END(native_load_gs_index)
1183 1178
1184 .section __ex_table,"a" 1179 _ASM_EXTABLE(gs_change,bad_gs)
1185 .align 8
1186 .quad gs_change,bad_gs
1187 .previous
1188 .section .fixup,"ax" 1180 .section .fixup,"ax"
1189 /* running with kernelgs */ 1181 /* running with kernelgs */
1190bad_gs: 1182bad_gs:
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index ce0be7cd085e..463c9797ca6a 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -21,6 +21,7 @@
21#include <asm/msr-index.h> 21#include <asm/msr-index.h>
22#include <asm/cpufeature.h> 22#include <asm/cpufeature.h>
23#include <asm/percpu.h> 23#include <asm/percpu.h>
24#include <asm/nops.h>
24 25
25/* Physical address */ 26/* Physical address */
26#define pa(X) ((X) - __PAGE_OFFSET) 27#define pa(X) ((X) - __PAGE_OFFSET)
@@ -363,28 +364,23 @@ default_entry:
363 pushl $0 364 pushl $0
364 popfl 365 popfl
365 366
366#ifdef CONFIG_SMP
367 cmpb $0, ready
368 jnz checkCPUtype
369#endif /* CONFIG_SMP */
370
371/* 367/*
372 * start system 32-bit setup. We need to re-do some of the things done 368 * start system 32-bit setup. We need to re-do some of the things done
373 * in 16-bit mode for the "real" operations. 369 * in 16-bit mode for the "real" operations.
374 */ 370 */
375 call setup_idt 371 movl setup_once_ref,%eax
376 372 andl %eax,%eax
377checkCPUtype: 373 jz 1f # Did we do this already?
378 374 call *%eax
379 movl $-1,X86_CPUID # -1 for no CPUID initially 3751:
380 376
381/* check if it is 486 or 386. */ 377/* check if it is 486 or 386. */
382/* 378/*
383 * XXX - this does a lot of unnecessary setup. Alignment checks don't 379 * XXX - this does a lot of unnecessary setup. Alignment checks don't
384 * apply at our cpl of 0 and the stack ought to be aligned already, and 380 * apply at our cpl of 0 and the stack ought to be aligned already, and
385 * we don't need to preserve eflags. 381 * we don't need to preserve eflags.
386 */ 382 */
387 383 movl $-1,X86_CPUID # -1 for no CPUID initially
388 movb $3,X86 # at least 386 384 movb $3,X86 # at least 386
389 pushfl # push EFLAGS 385 pushfl # push EFLAGS
390 popl %eax # get EFLAGS 386 popl %eax # get EFLAGS
@@ -450,21 +446,6 @@ is386: movl $2,%ecx # set MP
450 movl $(__KERNEL_PERCPU), %eax 446 movl $(__KERNEL_PERCPU), %eax
451 movl %eax,%fs # set this cpu's percpu 447 movl %eax,%fs # set this cpu's percpu
452 448
453#ifdef CONFIG_CC_STACKPROTECTOR
454 /*
455 * The linker can't handle this by relocation. Manually set
456 * base address in stack canary segment descriptor.
457 */
458 cmpb $0,ready
459 jne 1f
460 movl $gdt_page,%eax
461 movl $stack_canary,%ecx
462 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
463 shrl $16, %ecx
464 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
465 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
4661:
467#endif
468 movl $(__KERNEL_STACK_CANARY),%eax 449 movl $(__KERNEL_STACK_CANARY),%eax
469 movl %eax,%gs 450 movl %eax,%gs
470 451
@@ -473,7 +454,6 @@ is386: movl $2,%ecx # set MP
473 454
474 cld # gcc2 wants the direction flag cleared at all times 455 cld # gcc2 wants the direction flag cleared at all times
475 pushl $0 # fake return address for unwinder 456 pushl $0 # fake return address for unwinder
476 movb $1, ready
477 jmp *(initial_code) 457 jmp *(initial_code)
478 458
479/* 459/*
@@ -495,81 +475,122 @@ check_x87:
495 .byte 0xDB,0xE4 /* fsetpm for 287, ignored by 387 */ 475 .byte 0xDB,0xE4 /* fsetpm for 287, ignored by 387 */
496 ret 476 ret
497 477
478
479#include "verify_cpu.S"
480
498/* 481/*
499 * setup_idt 482 * setup_once
500 * 483 *
501 * sets up a idt with 256 entries pointing to 484 * The setup work we only want to run on the BSP.
502 * ignore_int, interrupt gates. It doesn't actually load
503 * idt - that can be done only after paging has been enabled
504 * and the kernel moved to PAGE_OFFSET. Interrupts
505 * are enabled elsewhere, when we can be relatively
506 * sure everything is ok.
507 * 485 *
508 * Warning: %esi is live across this function. 486 * Warning: %esi is live across this function.
509 */ 487 */
510setup_idt: 488__INIT
511 lea ignore_int,%edx 489setup_once:
512 movl $(__KERNEL_CS << 16),%eax 490 /*
513 movw %dx,%ax /* selector = 0x0010 = cs */ 491 * Set up a idt with 256 entries pointing to ignore_int,
514 movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ 492 * interrupt gates. It doesn't actually load idt - that needs
493 * to be done on each CPU. Interrupts are enabled elsewhere,
494 * when we can be relatively sure everything is ok.
495 */
515 496
516 lea idt_table,%edi 497 movl $idt_table,%edi
517 mov $256,%ecx 498 movl $early_idt_handlers,%eax
518rp_sidt: 499 movl $NUM_EXCEPTION_VECTORS,%ecx
5001:
519 movl %eax,(%edi) 501 movl %eax,(%edi)
520 movl %edx,4(%edi) 502 movl %eax,4(%edi)
503 /* interrupt gate, dpl=0, present */
504 movl $(0x8E000000 + __KERNEL_CS),2(%edi)
505 addl $9,%eax
521 addl $8,%edi 506 addl $8,%edi
522 dec %ecx 507 loop 1b
523 jne rp_sidt
524 508
525.macro set_early_handler handler,trapno 509 movl $256 - NUM_EXCEPTION_VECTORS,%ecx
526 lea \handler,%edx 510 movl $ignore_int,%edx
527 movl $(__KERNEL_CS << 16),%eax 511 movl $(__KERNEL_CS << 16),%eax
528 movw %dx,%ax 512 movw %dx,%ax /* selector = 0x0010 = cs */
529 movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ 513 movw $0x8E00,%dx /* interrupt gate - dpl=0, present */
530 lea idt_table,%edi 5142:
531 movl %eax,8*\trapno(%edi) 515 movl %eax,(%edi)
532 movl %edx,8*\trapno+4(%edi) 516 movl %edx,4(%edi)
533.endm 517 addl $8,%edi
518 loop 2b
534 519
535 set_early_handler handler=early_divide_err,trapno=0 520#ifdef CONFIG_CC_STACKPROTECTOR
536 set_early_handler handler=early_illegal_opcode,trapno=6 521 /*
537 set_early_handler handler=early_protection_fault,trapno=13 522 * Configure the stack canary. The linker can't handle this by
538 set_early_handler handler=early_page_fault,trapno=14 523 * relocation. Manually set base address in stack canary
524 * segment descriptor.
525 */
526 movl $gdt_page,%eax
527 movl $stack_canary,%ecx
528 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
529 shrl $16, %ecx
530 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
531 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
532#endif
539 533
534 andl $0,setup_once_ref /* Once is enough, thanks */
540 ret 535 ret
541 536
542early_divide_err: 537ENTRY(early_idt_handlers)
543 xor %edx,%edx 538 # 36(%esp) %eflags
544 pushl $0 /* fake errcode */ 539 # 32(%esp) %cs
545 jmp early_fault 540 # 28(%esp) %eip
541 # 24(%rsp) error code
542 i = 0
543 .rept NUM_EXCEPTION_VECTORS
544 .if (EXCEPTION_ERRCODE_MASK >> i) & 1
545 ASM_NOP2
546 .else
547 pushl $0 # Dummy error code, to make stack frame uniform
548 .endif
549 pushl $i # 20(%esp) Vector number
550 jmp early_idt_handler
551 i = i + 1
552 .endr
553ENDPROC(early_idt_handlers)
554
555 /* This is global to keep gas from relaxing the jumps */
556ENTRY(early_idt_handler)
557 cld
558 cmpl $2,%ss:early_recursion_flag
559 je hlt_loop
560 incl %ss:early_recursion_flag
546 561
547early_illegal_opcode: 562 push %eax # 16(%esp)
548 movl $6,%edx 563 push %ecx # 12(%esp)
549 pushl $0 /* fake errcode */ 564 push %edx # 8(%esp)
550 jmp early_fault 565 push %ds # 4(%esp)
566 push %es # 0(%esp)
567 movl $(__KERNEL_DS),%eax
568 movl %eax,%ds
569 movl %eax,%es
551 570
552early_protection_fault: 571 cmpl $(__KERNEL_CS),32(%esp)
553 movl $13,%edx 572 jne 10f
554 jmp early_fault
555 573
556early_page_fault: 574 leal 28(%esp),%eax # Pointer to %eip
557 movl $14,%edx 575 call early_fixup_exception
558 jmp early_fault 576 andl %eax,%eax
577 jnz ex_entry /* found an exception entry */
559 578
560early_fault: 57910:
561 cld
562#ifdef CONFIG_PRINTK 580#ifdef CONFIG_PRINTK
563 pusha 581 xorl %eax,%eax
564 movl $(__KERNEL_DS),%eax 582 movw %ax,2(%esp) /* clean up the segment values on some cpus */
565 movl %eax,%ds 583 movw %ax,6(%esp)
566 movl %eax,%es 584 movw %ax,34(%esp)
567 cmpl $2,early_recursion_flag 585 leal 40(%esp),%eax
568 je hlt_loop 586 pushl %eax /* %esp before the exception */
569 incl early_recursion_flag 587 pushl %ebx
588 pushl %ebp
589 pushl %esi
590 pushl %edi
570 movl %cr2,%eax 591 movl %cr2,%eax
571 pushl %eax 592 pushl %eax
572 pushl %edx /* trapno */ 593 pushl (20+6*4)(%esp) /* trapno */
573 pushl $fault_msg 594 pushl $fault_msg
574 call printk 595 call printk
575#endif 596#endif
@@ -578,6 +599,17 @@ hlt_loop:
578 hlt 599 hlt
579 jmp hlt_loop 600 jmp hlt_loop
580 601
602ex_entry:
603 pop %es
604 pop %ds
605 pop %edx
606 pop %ecx
607 pop %eax
608 addl $8,%esp /* drop vector number and error code */
609 decl %ss:early_recursion_flag
610 iret
611ENDPROC(early_idt_handler)
612
581/* This is the default interrupt "handler" :-) */ 613/* This is the default interrupt "handler" :-) */
582 ALIGN 614 ALIGN
583ignore_int: 615ignore_int:
@@ -611,13 +643,18 @@ ignore_int:
611 popl %eax 643 popl %eax
612#endif 644#endif
613 iret 645 iret
646ENDPROC(ignore_int)
647__INITDATA
648 .align 4
649early_recursion_flag:
650 .long 0
614 651
615#include "verify_cpu.S" 652__REFDATA
616 653 .align 4
617 __REFDATA
618.align 4
619ENTRY(initial_code) 654ENTRY(initial_code)
620 .long i386_start_kernel 655 .long i386_start_kernel
656ENTRY(setup_once_ref)
657 .long setup_once
621 658
622/* 659/*
623 * BSS section 660 * BSS section
@@ -670,22 +707,19 @@ ENTRY(initial_page_table)
670ENTRY(stack_start) 707ENTRY(stack_start)
671 .long init_thread_union+THREAD_SIZE 708 .long init_thread_union+THREAD_SIZE
672 709
673early_recursion_flag: 710__INITRODATA
674 .long 0
675
676ready: .byte 0
677
678int_msg: 711int_msg:
679 .asciz "Unknown interrupt or fault at: %p %p %p\n" 712 .asciz "Unknown interrupt or fault at: %p %p %p\n"
680 713
681fault_msg: 714fault_msg:
682/* fault info: */ 715/* fault info: */
683 .ascii "BUG: Int %d: CR2 %p\n" 716 .ascii "BUG: Int %d: CR2 %p\n"
684/* pusha regs: */ 717/* regs pushed in early_idt_handler: */
685 .ascii " EDI %p ESI %p EBP %p ESP %p\n" 718 .ascii " EDI %p ESI %p EBP %p EBX %p\n"
686 .ascii " EBX %p EDX %p ECX %p EAX %p\n" 719 .ascii " ESP %p ES %p DS %p\n"
720 .ascii " EDX %p ECX %p EAX %p\n"
687/* fault frame: */ 721/* fault frame: */
688 .ascii " err %p EIP %p CS %p flg %p\n" 722 .ascii " vec %p err %p EIP %p CS %p flg %p\n"
689 .ascii "Stack: %p %p %p %p %p %p %p %p\n" 723 .ascii "Stack: %p %p %p %p %p %p %p %p\n"
690 .ascii " %p %p %p %p %p %p %p %p\n" 724 .ascii " %p %p %p %p %p %p %p %p\n"
691 .asciz " %p %p %p %p %p %p %p %p\n" 725 .asciz " %p %p %p %p %p %p %p %p\n"
@@ -699,6 +733,7 @@ fault_msg:
699 * segment size, and 32-bit linear address value: 733 * segment size, and 32-bit linear address value:
700 */ 734 */
701 735
736 .data
702.globl boot_gdt_descr 737.globl boot_gdt_descr
703.globl idt_descr 738.globl idt_descr
704 739
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 40f4eb3766d1..7a40f2447321 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -19,12 +19,15 @@
19#include <asm/cache.h> 19#include <asm/cache.h>
20#include <asm/processor-flags.h> 20#include <asm/processor-flags.h>
21#include <asm/percpu.h> 21#include <asm/percpu.h>
22#include <asm/nops.h>
22 23
23#ifdef CONFIG_PARAVIRT 24#ifdef CONFIG_PARAVIRT
24#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
25#include <asm/paravirt.h> 26#include <asm/paravirt.h>
27#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
26#else 28#else
27#define GET_CR2_INTO_RCX movq %cr2, %rcx 29#define GET_CR2_INTO(reg) movq %cr2, reg
30#define INTERRUPT_RETURN iretq
28#endif 31#endif
29 32
30/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE 33/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
@@ -270,36 +273,56 @@ bad_address:
270 jmp bad_address 273 jmp bad_address
271 274
272 .section ".init.text","ax" 275 .section ".init.text","ax"
273#ifdef CONFIG_EARLY_PRINTK
274 .globl early_idt_handlers 276 .globl early_idt_handlers
275early_idt_handlers: 277early_idt_handlers:
278 # 104(%rsp) %rflags
279 # 96(%rsp) %cs
280 # 88(%rsp) %rip
281 # 80(%rsp) error code
276 i = 0 282 i = 0
277 .rept NUM_EXCEPTION_VECTORS 283 .rept NUM_EXCEPTION_VECTORS
278 movl $i, %esi 284 .if (EXCEPTION_ERRCODE_MASK >> i) & 1
285 ASM_NOP2
286 .else
287 pushq $0 # Dummy error code, to make stack frame uniform
288 .endif
289 pushq $i # 72(%rsp) Vector number
279 jmp early_idt_handler 290 jmp early_idt_handler
280 i = i + 1 291 i = i + 1
281 .endr 292 .endr
282#endif
283 293
284ENTRY(early_idt_handler) 294ENTRY(early_idt_handler)
285#ifdef CONFIG_EARLY_PRINTK 295 cld
296
286 cmpl $2,early_recursion_flag(%rip) 297 cmpl $2,early_recursion_flag(%rip)
287 jz 1f 298 jz 1f
288 incl early_recursion_flag(%rip) 299 incl early_recursion_flag(%rip)
289 GET_CR2_INTO_RCX 300
290 movq %rcx,%r9 301 pushq %rax # 64(%rsp)
291 xorl %r8d,%r8d # zero for error code 302 pushq %rcx # 56(%rsp)
292 movl %esi,%ecx # get vector number 303 pushq %rdx # 48(%rsp)
293 # Test %ecx against mask of vectors that push error code. 304 pushq %rsi # 40(%rsp)
294 cmpl $31,%ecx 305 pushq %rdi # 32(%rsp)
295 ja 0f 306 pushq %r8 # 24(%rsp)
296 movl $1,%eax 307 pushq %r9 # 16(%rsp)
297 salq %cl,%rax 308 pushq %r10 # 8(%rsp)
298 testl $0x27d00,%eax 309 pushq %r11 # 0(%rsp)
299 je 0f 310
300 popq %r8 # get error code 311 cmpl $__KERNEL_CS,96(%rsp)
3010: movq 0(%rsp),%rcx # get ip 312 jne 10f
302 movq 8(%rsp),%rdx # get cs 313
314 leaq 88(%rsp),%rdi # Pointer to %rip
315 call early_fixup_exception
316 andl %eax,%eax
317 jnz 20f # Found an exception entry
318
31910:
320#ifdef CONFIG_EARLY_PRINTK
321 GET_CR2_INTO(%r9) # can clobber any volatile register if pv
322 movl 80(%rsp),%r8d # error code
323 movl 72(%rsp),%esi # vector number
324 movl 96(%rsp),%edx # %cs
325 movq 88(%rsp),%rcx # %rip
303 xorl %eax,%eax 326 xorl %eax,%eax
304 leaq early_idt_msg(%rip),%rdi 327 leaq early_idt_msg(%rip),%rdi
305 call early_printk 328 call early_printk
@@ -308,17 +331,32 @@ ENTRY(early_idt_handler)
308 call dump_stack 331 call dump_stack
309#ifdef CONFIG_KALLSYMS 332#ifdef CONFIG_KALLSYMS
310 leaq early_idt_ripmsg(%rip),%rdi 333 leaq early_idt_ripmsg(%rip),%rdi
311 movq 0(%rsp),%rsi # get rip again 334 movq 40(%rsp),%rsi # %rip again
312 call __print_symbol 335 call __print_symbol
313#endif 336#endif
314#endif /* EARLY_PRINTK */ 337#endif /* EARLY_PRINTK */
3151: hlt 3381: hlt
316 jmp 1b 339 jmp 1b
317 340
318#ifdef CONFIG_EARLY_PRINTK 34120: # Exception table entry found
342 popq %r11
343 popq %r10
344 popq %r9
345 popq %r8
346 popq %rdi
347 popq %rsi
348 popq %rdx
349 popq %rcx
350 popq %rax
351 addq $16,%rsp # drop vector number and error code
352 decl early_recursion_flag(%rip)
353 INTERRUPT_RETURN
354
355 .balign 4
319early_recursion_flag: 356early_recursion_flag:
320 .long 0 357 .long 0
321 358
359#ifdef CONFIG_EARLY_PRINTK
322early_idt_msg: 360early_idt_msg:
323 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" 361 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
324early_idt_ripmsg: 362early_idt_ripmsg:
diff --git a/arch/x86/kernel/test_rodata.c b/arch/x86/kernel/test_rodata.c
index c29e235792af..b79133abda48 100644
--- a/arch/x86/kernel/test_rodata.c
+++ b/arch/x86/kernel/test_rodata.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <asm/cacheflush.h> 13#include <asm/cacheflush.h>
14#include <asm/sections.h> 14#include <asm/sections.h>
15#include <asm/asm.h>
15 16
16int rodata_test(void) 17int rodata_test(void)
17{ 18{
@@ -42,14 +43,7 @@ int rodata_test(void)
42 ".section .fixup,\"ax\"\n" 43 ".section .fixup,\"ax\"\n"
43 "2: jmp 1b\n" 44 "2: jmp 1b\n"
44 ".previous\n" 45 ".previous\n"
45 ".section __ex_table,\"a\"\n" 46 _ASM_EXTABLE(0b,2b)
46 " .align 16\n"
47#ifdef CONFIG_X86_32
48 " .long 0b,2b\n"
49#else
50 " .quad 0b,2b\n"
51#endif
52 ".previous"
53 : [rslt] "=r" (result) 47 : [rslt] "=r" (result)
54 : [rodata_test] "r" (&rodata_test_data), [zero] "r" (0UL) 48 : [rodata_test] "r" (&rodata_test_data), [zero] "r" (0UL)
55 ); 49 );
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 78d16a554db0..2af5df3ade7c 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -28,6 +28,7 @@
28#include <linux/linkage.h> 28#include <linux/linkage.h>
29#include <asm/dwarf2.h> 29#include <asm/dwarf2.h>
30#include <asm/errno.h> 30#include <asm/errno.h>
31#include <asm/asm.h>
31 32
32/* 33/*
33 * computes a partial checksum, e.g. for TCP/UDP fragments 34 * computes a partial checksum, e.g. for TCP/UDP fragments
@@ -282,15 +283,11 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
282 283
283#define SRC(y...) \ 284#define SRC(y...) \
284 9999: y; \ 285 9999: y; \
285 .section __ex_table, "a"; \ 286 _ASM_EXTABLE(9999b, 6001f)
286 .long 9999b, 6001f ; \
287 .previous
288 287
289#define DST(y...) \ 288#define DST(y...) \
290 9999: y; \ 289 9999: y; \
291 .section __ex_table, "a"; \ 290 _ASM_EXTABLE(9999b, 6002f)
292 .long 9999b, 6002f ; \
293 .previous
294 291
295#ifndef CONFIG_X86_USE_PPRO_CHECKSUM 292#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
296 293
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 024840266ba0..5b2995f4557a 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -16,6 +16,7 @@
16#include <asm/thread_info.h> 16#include <asm/thread_info.h>
17#include <asm/cpufeature.h> 17#include <asm/cpufeature.h>
18#include <asm/alternative-asm.h> 18#include <asm/alternative-asm.h>
19#include <asm/asm.h>
19 20
20/* 21/*
21 * By placing feature2 after feature1 in altinstructions section, we logically 22 * By placing feature2 after feature1 in altinstructions section, we logically
@@ -63,11 +64,8 @@
63 jmp copy_user_handle_tail 64 jmp copy_user_handle_tail
64 .previous 65 .previous
65 66
66 .section __ex_table,"a" 67 _ASM_EXTABLE(100b,103b)
67 .align 8 68 _ASM_EXTABLE(101b,103b)
68 .quad 100b,103b
69 .quad 101b,103b
70 .previous
71#endif 69#endif
72 .endm 70 .endm
73 71
@@ -191,29 +189,26 @@ ENTRY(copy_user_generic_unrolled)
19160: jmp copy_user_handle_tail /* ecx is zerorest also */ 18960: jmp copy_user_handle_tail /* ecx is zerorest also */
192 .previous 190 .previous
193 191
194 .section __ex_table,"a" 192 _ASM_EXTABLE(1b,30b)
195 .align 8 193 _ASM_EXTABLE(2b,30b)
196 .quad 1b,30b 194 _ASM_EXTABLE(3b,30b)
197 .quad 2b,30b 195 _ASM_EXTABLE(4b,30b)
198 .quad 3b,30b 196 _ASM_EXTABLE(5b,30b)
199 .quad 4b,30b 197 _ASM_EXTABLE(6b,30b)
200 .quad 5b,30b 198 _ASM_EXTABLE(7b,30b)
201 .quad 6b,30b 199 _ASM_EXTABLE(8b,30b)
202 .quad 7b,30b 200 _ASM_EXTABLE(9b,30b)
203 .quad 8b,30b 201 _ASM_EXTABLE(10b,30b)
204 .quad 9b,30b 202 _ASM_EXTABLE(11b,30b)
205 .quad 10b,30b 203 _ASM_EXTABLE(12b,30b)
206 .quad 11b,30b 204 _ASM_EXTABLE(13b,30b)
207 .quad 12b,30b 205 _ASM_EXTABLE(14b,30b)
208 .quad 13b,30b 206 _ASM_EXTABLE(15b,30b)
209 .quad 14b,30b 207 _ASM_EXTABLE(16b,30b)
210 .quad 15b,30b 208 _ASM_EXTABLE(18b,40b)
211 .quad 16b,30b 209 _ASM_EXTABLE(19b,40b)
212 .quad 18b,40b 210 _ASM_EXTABLE(21b,50b)
213 .quad 19b,40b 211 _ASM_EXTABLE(22b,50b)
214 .quad 21b,50b
215 .quad 22b,50b
216 .previous
217 CFI_ENDPROC 212 CFI_ENDPROC
218ENDPROC(copy_user_generic_unrolled) 213ENDPROC(copy_user_generic_unrolled)
219 214
@@ -259,11 +254,8 @@ ENTRY(copy_user_generic_string)
259 jmp copy_user_handle_tail 254 jmp copy_user_handle_tail
260 .previous 255 .previous
261 256
262 .section __ex_table,"a" 257 _ASM_EXTABLE(1b,11b)
263 .align 8 258 _ASM_EXTABLE(3b,12b)
264 .quad 1b,11b
265 .quad 3b,12b
266 .previous
267 CFI_ENDPROC 259 CFI_ENDPROC
268ENDPROC(copy_user_generic_string) 260ENDPROC(copy_user_generic_string)
269 261
@@ -294,9 +286,6 @@ ENTRY(copy_user_enhanced_fast_string)
294 jmp copy_user_handle_tail 286 jmp copy_user_handle_tail
295 .previous 287 .previous
296 288
297 .section __ex_table,"a" 289 _ASM_EXTABLE(1b,12b)
298 .align 8
299 .quad 1b,12b
300 .previous
301 CFI_ENDPROC 290 CFI_ENDPROC
302ENDPROC(copy_user_enhanced_fast_string) 291ENDPROC(copy_user_enhanced_fast_string)
diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
index cb0c112386fb..cacddc7163eb 100644
--- a/arch/x86/lib/copy_user_nocache_64.S
+++ b/arch/x86/lib/copy_user_nocache_64.S
@@ -14,6 +14,7 @@
14#include <asm/current.h> 14#include <asm/current.h>
15#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
16#include <asm/thread_info.h> 16#include <asm/thread_info.h>
17#include <asm/asm.h>
17 18
18 .macro ALIGN_DESTINATION 19 .macro ALIGN_DESTINATION
19#ifdef FIX_ALIGNMENT 20#ifdef FIX_ALIGNMENT
@@ -36,11 +37,8 @@
36 jmp copy_user_handle_tail 37 jmp copy_user_handle_tail
37 .previous 38 .previous
38 39
39 .section __ex_table,"a" 40 _ASM_EXTABLE(100b,103b)
40 .align 8 41 _ASM_EXTABLE(101b,103b)
41 .quad 100b,103b
42 .quad 101b,103b
43 .previous
44#endif 42#endif
45 .endm 43 .endm
46 44
@@ -111,27 +109,25 @@ ENTRY(__copy_user_nocache)
111 jmp copy_user_handle_tail 109 jmp copy_user_handle_tail
112 .previous 110 .previous
113 111
114 .section __ex_table,"a" 112 _ASM_EXTABLE(1b,30b)
115 .quad 1b,30b 113 _ASM_EXTABLE(2b,30b)
116 .quad 2b,30b 114 _ASM_EXTABLE(3b,30b)
117 .quad 3b,30b 115 _ASM_EXTABLE(4b,30b)
118 .quad 4b,30b 116 _ASM_EXTABLE(5b,30b)
119 .quad 5b,30b 117 _ASM_EXTABLE(6b,30b)
120 .quad 6b,30b 118 _ASM_EXTABLE(7b,30b)
121 .quad 7b,30b 119 _ASM_EXTABLE(8b,30b)
122 .quad 8b,30b 120 _ASM_EXTABLE(9b,30b)
123 .quad 9b,30b 121 _ASM_EXTABLE(10b,30b)
124 .quad 10b,30b 122 _ASM_EXTABLE(11b,30b)
125 .quad 11b,30b 123 _ASM_EXTABLE(12b,30b)
126 .quad 12b,30b 124 _ASM_EXTABLE(13b,30b)
127 .quad 13b,30b 125 _ASM_EXTABLE(14b,30b)
128 .quad 14b,30b 126 _ASM_EXTABLE(15b,30b)
129 .quad 15b,30b 127 _ASM_EXTABLE(16b,30b)
130 .quad 16b,30b 128 _ASM_EXTABLE(18b,40b)
131 .quad 18b,40b 129 _ASM_EXTABLE(19b,40b)
132 .quad 19b,40b 130 _ASM_EXTABLE(21b,50b)
133 .quad 21b,50b 131 _ASM_EXTABLE(22b,50b)
134 .quad 22b,50b
135 .previous
136 CFI_ENDPROC 132 CFI_ENDPROC
137ENDPROC(__copy_user_nocache) 133ENDPROC(__copy_user_nocache)
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index fb903b758da8..2419d5fefae3 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -8,6 +8,7 @@
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/dwarf2.h> 9#include <asm/dwarf2.h>
10#include <asm/errno.h> 10#include <asm/errno.h>
11#include <asm/asm.h>
11 12
12/* 13/*
13 * Checksum copy with exception handling. 14 * Checksum copy with exception handling.
@@ -31,26 +32,17 @@
31 32
32 .macro source 33 .macro source
3310: 3410:
34 .section __ex_table, "a" 35 _ASM_EXTABLE(10b, .Lbad_source)
35 .align 8
36 .quad 10b, .Lbad_source
37 .previous
38 .endm 36 .endm
39 37
40 .macro dest 38 .macro dest
4120: 3920:
42 .section __ex_table, "a" 40 _ASM_EXTABLE(20b, .Lbad_dest)
43 .align 8
44 .quad 20b, .Lbad_dest
45 .previous
46 .endm 41 .endm
47 42
48 .macro ignore L=.Lignore 43 .macro ignore L=.Lignore
4930: 4430:
50 .section __ex_table, "a" 45 _ASM_EXTABLE(30b, \L)
51 .align 8
52 .quad 30b, \L
53 .previous
54 .endm 46 .endm
55 47
56 48
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 51f1504cddd9..b33b1fb1e6d4 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -95,10 +95,9 @@ bad_get_user:
95 CFI_ENDPROC 95 CFI_ENDPROC
96END(bad_get_user) 96END(bad_get_user)
97 97
98.section __ex_table,"a" 98 _ASM_EXTABLE(1b,bad_get_user)
99 _ASM_PTR 1b,bad_get_user 99 _ASM_EXTABLE(2b,bad_get_user)
100 _ASM_PTR 2b,bad_get_user 100 _ASM_EXTABLE(3b,bad_get_user)
101 _ASM_PTR 3b,bad_get_user
102#ifdef CONFIG_X86_64 101#ifdef CONFIG_X86_64
103 _ASM_PTR 4b,bad_get_user 102 _ASM_EXTABLE(4b,bad_get_user)
104#endif 103#endif
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index 36b0d15ae6e9..7f951c8f76c4 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -86,12 +86,10 @@ bad_put_user:
86 EXIT 86 EXIT
87END(bad_put_user) 87END(bad_put_user)
88 88
89.section __ex_table,"a" 89 _ASM_EXTABLE(1b,bad_put_user)
90 _ASM_PTR 1b,bad_put_user 90 _ASM_EXTABLE(2b,bad_put_user)
91 _ASM_PTR 2b,bad_put_user 91 _ASM_EXTABLE(3b,bad_put_user)
92 _ASM_PTR 3b,bad_put_user 92 _ASM_EXTABLE(4b,bad_put_user)
93 _ASM_PTR 4b,bad_put_user
94#ifdef CONFIG_X86_32 93#ifdef CONFIG_X86_32
95 _ASM_PTR 5b,bad_put_user 94 _ASM_EXTABLE(5b,bad_put_user)
96#endif 95#endif
97.previous
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index ef2a6a5d78e3..883b216c60b2 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -13,6 +13,7 @@
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15#include <asm/mmx.h> 15#include <asm/mmx.h>
16#include <asm/asm.h>
16 17
17#ifdef CONFIG_X86_INTEL_USERCOPY 18#ifdef CONFIG_X86_INTEL_USERCOPY
18/* 19/*
@@ -127,10 +128,7 @@ long strnlen_user(const char __user *s, long n)
127 "3: movb $1,%%al\n" 128 "3: movb $1,%%al\n"
128 " jmp 1b\n" 129 " jmp 1b\n"
129 ".previous\n" 130 ".previous\n"
130 ".section __ex_table,\"a\"\n" 131 _ASM_EXTABLE(0b,2b)
131 " .align 4\n"
132 " .long 0b,2b\n"
133 ".previous"
134 :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp) 132 :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
135 :"0" (n), "1" (s), "2" (0), "3" (mask) 133 :"0" (n), "1" (s), "2" (0), "3" (mask)
136 :"cc"); 134 :"cc");
@@ -199,47 +197,44 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
199 "101: lea 0(%%eax,%0,4),%0\n" 197 "101: lea 0(%%eax,%0,4),%0\n"
200 " jmp 100b\n" 198 " jmp 100b\n"
201 ".previous\n" 199 ".previous\n"
202 ".section __ex_table,\"a\"\n" 200 _ASM_EXTABLE(1b,100b)
203 " .align 4\n" 201 _ASM_EXTABLE(2b,100b)
204 " .long 1b,100b\n" 202 _ASM_EXTABLE(3b,100b)
205 " .long 2b,100b\n" 203 _ASM_EXTABLE(4b,100b)
206 " .long 3b,100b\n" 204 _ASM_EXTABLE(5b,100b)
207 " .long 4b,100b\n" 205 _ASM_EXTABLE(6b,100b)
208 " .long 5b,100b\n" 206 _ASM_EXTABLE(7b,100b)
209 " .long 6b,100b\n" 207 _ASM_EXTABLE(8b,100b)
210 " .long 7b,100b\n" 208 _ASM_EXTABLE(9b,100b)
211 " .long 8b,100b\n" 209 _ASM_EXTABLE(10b,100b)
212 " .long 9b,100b\n" 210 _ASM_EXTABLE(11b,100b)
213 " .long 10b,100b\n" 211 _ASM_EXTABLE(12b,100b)
214 " .long 11b,100b\n" 212 _ASM_EXTABLE(13b,100b)
215 " .long 12b,100b\n" 213 _ASM_EXTABLE(14b,100b)
216 " .long 13b,100b\n" 214 _ASM_EXTABLE(15b,100b)
217 " .long 14b,100b\n" 215 _ASM_EXTABLE(16b,100b)
218 " .long 15b,100b\n" 216 _ASM_EXTABLE(17b,100b)
219 " .long 16b,100b\n" 217 _ASM_EXTABLE(18b,100b)
220 " .long 17b,100b\n" 218 _ASM_EXTABLE(19b,100b)
221 " .long 18b,100b\n" 219 _ASM_EXTABLE(20b,100b)
222 " .long 19b,100b\n" 220 _ASM_EXTABLE(21b,100b)
223 " .long 20b,100b\n" 221 _ASM_EXTABLE(22b,100b)
224 " .long 21b,100b\n" 222 _ASM_EXTABLE(23b,100b)
225 " .long 22b,100b\n" 223 _ASM_EXTABLE(24b,100b)
226 " .long 23b,100b\n" 224 _ASM_EXTABLE(25b,100b)
227 " .long 24b,100b\n" 225 _ASM_EXTABLE(26b,100b)
228 " .long 25b,100b\n" 226 _ASM_EXTABLE(27b,100b)
229 " .long 26b,100b\n" 227 _ASM_EXTABLE(28b,100b)
230 " .long 27b,100b\n" 228 _ASM_EXTABLE(29b,100b)
231 " .long 28b,100b\n" 229 _ASM_EXTABLE(30b,100b)
232 " .long 29b,100b\n" 230 _ASM_EXTABLE(31b,100b)
233 " .long 30b,100b\n" 231 _ASM_EXTABLE(32b,100b)
234 " .long 31b,100b\n" 232 _ASM_EXTABLE(33b,100b)
235 " .long 32b,100b\n" 233 _ASM_EXTABLE(34b,100b)
236 " .long 33b,100b\n" 234 _ASM_EXTABLE(35b,100b)
237 " .long 34b,100b\n" 235 _ASM_EXTABLE(36b,100b)
238 " .long 35b,100b\n" 236 _ASM_EXTABLE(37b,100b)
239 " .long 36b,100b\n" 237 _ASM_EXTABLE(99b,101b)
240 " .long 37b,100b\n"
241 " .long 99b,101b\n"
242 ".previous"
243 : "=&c"(size), "=&D" (d0), "=&S" (d1) 238 : "=&c"(size), "=&D" (d0), "=&S" (d1)
244 : "1"(to), "2"(from), "0"(size) 239 : "1"(to), "2"(from), "0"(size)
245 : "eax", "edx", "memory"); 240 : "eax", "edx", "memory");
@@ -312,29 +307,26 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
312 " popl %0\n" 307 " popl %0\n"
313 " jmp 8b\n" 308 " jmp 8b\n"
314 ".previous\n" 309 ".previous\n"
315 ".section __ex_table,\"a\"\n" 310 _ASM_EXTABLE(0b,16b)
316 " .align 4\n" 311 _ASM_EXTABLE(1b,16b)
317 " .long 0b,16b\n" 312 _ASM_EXTABLE(2b,16b)
318 " .long 1b,16b\n" 313 _ASM_EXTABLE(21b,16b)
319 " .long 2b,16b\n" 314 _ASM_EXTABLE(3b,16b)
320 " .long 21b,16b\n" 315 _ASM_EXTABLE(31b,16b)
321 " .long 3b,16b\n" 316 _ASM_EXTABLE(4b,16b)
322 " .long 31b,16b\n" 317 _ASM_EXTABLE(41b,16b)
323 " .long 4b,16b\n" 318 _ASM_EXTABLE(10b,16b)
324 " .long 41b,16b\n" 319 _ASM_EXTABLE(51b,16b)
325 " .long 10b,16b\n" 320 _ASM_EXTABLE(11b,16b)
326 " .long 51b,16b\n" 321 _ASM_EXTABLE(61b,16b)
327 " .long 11b,16b\n" 322 _ASM_EXTABLE(12b,16b)
328 " .long 61b,16b\n" 323 _ASM_EXTABLE(71b,16b)
329 " .long 12b,16b\n" 324 _ASM_EXTABLE(13b,16b)
330 " .long 71b,16b\n" 325 _ASM_EXTABLE(81b,16b)
331 " .long 13b,16b\n" 326 _ASM_EXTABLE(14b,16b)
332 " .long 81b,16b\n" 327 _ASM_EXTABLE(91b,16b)
333 " .long 14b,16b\n" 328 _ASM_EXTABLE(6b,9b)
334 " .long 91b,16b\n" 329 _ASM_EXTABLE(7b,16b)
335 " .long 6b,9b\n"
336 " .long 7b,16b\n"
337 ".previous"
338 : "=&c"(size), "=&D" (d0), "=&S" (d1) 330 : "=&c"(size), "=&D" (d0), "=&S" (d1)
339 : "1"(to), "2"(from), "0"(size) 331 : "1"(to), "2"(from), "0"(size)
340 : "eax", "edx", "memory"); 332 : "eax", "edx", "memory");
@@ -414,29 +406,26 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
414 " popl %0\n" 406 " popl %0\n"
415 " jmp 8b\n" 407 " jmp 8b\n"
416 ".previous\n" 408 ".previous\n"
417 ".section __ex_table,\"a\"\n" 409 _ASM_EXTABLE(0b,16b)
418 " .align 4\n" 410 _ASM_EXTABLE(1b,16b)
419 " .long 0b,16b\n" 411 _ASM_EXTABLE(2b,16b)
420 " .long 1b,16b\n" 412 _ASM_EXTABLE(21b,16b)
421 " .long 2b,16b\n" 413 _ASM_EXTABLE(3b,16b)
422 " .long 21b,16b\n" 414 _ASM_EXTABLE(31b,16b)
423 " .long 3b,16b\n" 415 _ASM_EXTABLE(4b,16b)
424 " .long 31b,16b\n" 416 _ASM_EXTABLE(41b,16b)
425 " .long 4b,16b\n" 417 _ASM_EXTABLE(10b,16b)
426 " .long 41b,16b\n" 418 _ASM_EXTABLE(51b,16b)
427 " .long 10b,16b\n" 419 _ASM_EXTABLE(11b,16b)
428 " .long 51b,16b\n" 420 _ASM_EXTABLE(61b,16b)
429 " .long 11b,16b\n" 421 _ASM_EXTABLE(12b,16b)
430 " .long 61b,16b\n" 422 _ASM_EXTABLE(71b,16b)
431 " .long 12b,16b\n" 423 _ASM_EXTABLE(13b,16b)
432 " .long 71b,16b\n" 424 _ASM_EXTABLE(81b,16b)
433 " .long 13b,16b\n" 425 _ASM_EXTABLE(14b,16b)
434 " .long 81b,16b\n" 426 _ASM_EXTABLE(91b,16b)
435 " .long 14b,16b\n" 427 _ASM_EXTABLE(6b,9b)
436 " .long 91b,16b\n" 428 _ASM_EXTABLE(7b,16b)
437 " .long 6b,9b\n"
438 " .long 7b,16b\n"
439 ".previous"
440 : "=&c"(size), "=&D" (d0), "=&S" (d1) 429 : "=&c"(size), "=&D" (d0), "=&S" (d1)
441 : "1"(to), "2"(from), "0"(size) 430 : "1"(to), "2"(from), "0"(size)
442 : "eax", "edx", "memory"); 431 : "eax", "edx", "memory");
@@ -505,29 +494,26 @@ static unsigned long __copy_user_intel_nocache(void *to,
505 "9: lea 0(%%eax,%0,4),%0\n" 494 "9: lea 0(%%eax,%0,4),%0\n"
506 "16: jmp 8b\n" 495 "16: jmp 8b\n"
507 ".previous\n" 496 ".previous\n"
508 ".section __ex_table,\"a\"\n" 497 _ASM_EXTABLE(0b,16b)
509 " .align 4\n" 498 _ASM_EXTABLE(1b,16b)
510 " .long 0b,16b\n" 499 _ASM_EXTABLE(2b,16b)
511 " .long 1b,16b\n" 500 _ASM_EXTABLE(21b,16b)
512 " .long 2b,16b\n" 501 _ASM_EXTABLE(3b,16b)
513 " .long 21b,16b\n" 502 _ASM_EXTABLE(31b,16b)
514 " .long 3b,16b\n" 503 _ASM_EXTABLE(4b,16b)
515 " .long 31b,16b\n" 504 _ASM_EXTABLE(41b,16b)
516 " .long 4b,16b\n" 505 _ASM_EXTABLE(10b,16b)
517 " .long 41b,16b\n" 506 _ASM_EXTABLE(51b,16b)
518 " .long 10b,16b\n" 507 _ASM_EXTABLE(11b,16b)
519 " .long 51b,16b\n" 508 _ASM_EXTABLE(61b,16b)
520 " .long 11b,16b\n" 509 _ASM_EXTABLE(12b,16b)
521 " .long 61b,16b\n" 510 _ASM_EXTABLE(71b,16b)
522 " .long 12b,16b\n" 511 _ASM_EXTABLE(13b,16b)
523 " .long 71b,16b\n" 512 _ASM_EXTABLE(81b,16b)
524 " .long 13b,16b\n" 513 _ASM_EXTABLE(14b,16b)
525 " .long 81b,16b\n" 514 _ASM_EXTABLE(91b,16b)
526 " .long 14b,16b\n" 515 _ASM_EXTABLE(6b,9b)
527 " .long 91b,16b\n" 516 _ASM_EXTABLE(7b,16b)
528 " .long 6b,9b\n"
529 " .long 7b,16b\n"
530 ".previous"
531 : "=&c"(size), "=&D" (d0), "=&S" (d1) 517 : "=&c"(size), "=&D" (d0), "=&S" (d1)
532 : "1"(to), "2"(from), "0"(size) 518 : "1"(to), "2"(from), "0"(size)
533 : "eax", "edx", "memory"); 519 : "eax", "edx", "memory");
@@ -574,12 +560,9 @@ do { \
574 "3: lea 0(%3,%0,4),%0\n" \ 560 "3: lea 0(%3,%0,4),%0\n" \
575 " jmp 2b\n" \ 561 " jmp 2b\n" \
576 ".previous\n" \ 562 ".previous\n" \
577 ".section __ex_table,\"a\"\n" \ 563 _ASM_EXTABLE(4b,5b) \
578 " .align 4\n" \ 564 _ASM_EXTABLE(0b,3b) \
579 " .long 4b,5b\n" \ 565 _ASM_EXTABLE(1b,2b) \
580 " .long 0b,3b\n" \
581 " .long 1b,2b\n" \
582 ".previous" \
583 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ 566 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
584 : "3"(size), "0"(size), "1"(to), "2"(from) \ 567 : "3"(size), "0"(size), "1"(to), "2"(from) \
585 : "memory"); \ 568 : "memory"); \
@@ -616,12 +599,9 @@ do { \
616 " popl %0\n" \ 599 " popl %0\n" \
617 " jmp 2b\n" \ 600 " jmp 2b\n" \
618 ".previous\n" \ 601 ".previous\n" \
619 ".section __ex_table,\"a\"\n" \ 602 _ASM_EXTABLE(4b,5b) \
620 " .align 4\n" \ 603 _ASM_EXTABLE(0b,3b) \
621 " .long 4b,5b\n" \ 604 _ASM_EXTABLE(1b,6b) \
622 " .long 0b,3b\n" \
623 " .long 1b,6b\n" \
624 ".previous" \
625 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ 605 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
626 : "3"(size), "0"(size), "1"(to), "2"(from) \ 606 : "3"(size), "0"(size), "1"(to), "2"(from) \
627 : "memory"); \ 607 : "memory"); \
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 1fb85dbe390a..903ec1e9c326 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -1,11 +1,23 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/spinlock.h> 2#include <linux/spinlock.h>
3#include <linux/sort.h>
3#include <asm/uaccess.h> 4#include <asm/uaccess.h>
4 5
6static inline unsigned long
7ex_insn_addr(const struct exception_table_entry *x)
8{
9 return (unsigned long)&x->insn + x->insn;
10}
11static inline unsigned long
12ex_fixup_addr(const struct exception_table_entry *x)
13{
14 return (unsigned long)&x->fixup + x->fixup;
15}
5 16
6int fixup_exception(struct pt_regs *regs) 17int fixup_exception(struct pt_regs *regs)
7{ 18{
8 const struct exception_table_entry *fixup; 19 const struct exception_table_entry *fixup;
20 unsigned long new_ip;
9 21
10#ifdef CONFIG_PNPBIOS 22#ifdef CONFIG_PNPBIOS
11 if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) { 23 if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
@@ -23,15 +35,135 @@ int fixup_exception(struct pt_regs *regs)
23 35
24 fixup = search_exception_tables(regs->ip); 36 fixup = search_exception_tables(regs->ip);
25 if (fixup) { 37 if (fixup) {
26 /* If fixup is less than 16, it means uaccess error */ 38 new_ip = ex_fixup_addr(fixup);
27 if (fixup->fixup < 16) { 39
40 if (fixup->fixup - fixup->insn >= 0x7ffffff0 - 4) {
41 /* Special hack for uaccess_err */
28 current_thread_info()->uaccess_err = 1; 42 current_thread_info()->uaccess_err = 1;
29 regs->ip += fixup->fixup; 43 new_ip -= 0x7ffffff0;
30 return 1;
31 } 44 }
32 regs->ip = fixup->fixup; 45 regs->ip = new_ip;
33 return 1; 46 return 1;
34 } 47 }
35 48
36 return 0; 49 return 0;
37} 50}
51
52/* Restricted version used during very early boot */
53int __init early_fixup_exception(unsigned long *ip)
54{
55 const struct exception_table_entry *fixup;
56 unsigned long new_ip;
57
58 fixup = search_exception_tables(*ip);
59 if (fixup) {
60 new_ip = ex_fixup_addr(fixup);
61
62 if (fixup->fixup - fixup->insn >= 0x7ffffff0 - 4) {
63 /* uaccess handling not supported during early boot */
64 return 0;
65 }
66
67 *ip = new_ip;
68 return 1;
69 }
70
71 return 0;
72}
73
74/*
75 * Search one exception table for an entry corresponding to the
76 * given instruction address, and return the address of the entry,
77 * or NULL if none is found.
78 * We use a binary search, and thus we assume that the table is
79 * already sorted.
80 */
81const struct exception_table_entry *
82search_extable(const struct exception_table_entry *first,
83 const struct exception_table_entry *last,
84 unsigned long value)
85{
86 while (first <= last) {
87 const struct exception_table_entry *mid;
88 unsigned long addr;
89
90 mid = ((last - first) >> 1) + first;
91 addr = ex_insn_addr(mid);
92 if (addr < value)
93 first = mid + 1;
94 else if (addr > value)
95 last = mid - 1;
96 else
97 return mid;
98 }
99 return NULL;
100}
101
102/*
103 * The exception table needs to be sorted so that the binary
104 * search that we use to find entries in it works properly.
105 * This is used both for the kernel exception table and for
106 * the exception tables of modules that get loaded.
107 *
108 */
109static int cmp_ex(const void *a, const void *b)
110{
111 const struct exception_table_entry *x = a, *y = b;
112
113 /*
114 * This value will always end up fittin in an int, because on
115 * both i386 and x86-64 the kernel symbol-reachable address
116 * space is < 2 GiB.
117 *
118 * This compare is only valid after normalization.
119 */
120 return x->insn - y->insn;
121}
122
123void sort_extable(struct exception_table_entry *start,
124 struct exception_table_entry *finish)
125{
126 struct exception_table_entry *p;
127 int i;
128
129 /* Convert all entries to being relative to the start of the section */
130 i = 0;
131 for (p = start; p < finish; p++) {
132 p->insn += i;
133 i += 4;
134 p->fixup += i;
135 i += 4;
136 }
137
138 sort(start, finish - start, sizeof(struct exception_table_entry),
139 cmp_ex, NULL);
140
141 /* Denormalize all entries */
142 i = 0;
143 for (p = start; p < finish; p++) {
144 p->insn -= i;
145 i += 4;
146 p->fixup -= i;
147 i += 4;
148 }
149}
150
151#ifdef CONFIG_MODULES
152/*
153 * If the exception table is sorted, any referring to the module init
154 * will be at the beginning or the end.
155 */
156void trim_init_extable(struct module *m)
157{
158 /*trim the beginning*/
159 while (m->num_exentries &&
160 within_module_init(ex_insn_addr(&m->extable[0]), m)) {
161 m->extable++;
162 m->num_exentries--;
163 }
164 /*trim the end*/
165 while (m->num_exentries &&
166 within_module_init(ex_insn_addr(&m->extable[m->num_exentries-1]), m))
167 m->num_exentries--;
168}
169#endif /* CONFIG_MODULES */
diff --git a/arch/x86/um/checksum_32.S b/arch/x86/um/checksum_32.S
index f058d2f82e18..8d0c420465cc 100644
--- a/arch/x86/um/checksum_32.S
+++ b/arch/x86/um/checksum_32.S
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <asm/errno.h> 28#include <asm/errno.h>
29#include <asm/asm.h>
29 30
30/* 31/*
31 * computes a partial checksum, e.g. for TCP/UDP fragments 32 * computes a partial checksum, e.g. for TCP/UDP fragments
@@ -232,15 +233,11 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
232 233
233#define SRC(y...) \ 234#define SRC(y...) \
234 9999: y; \ 235 9999: y; \
235 .section __ex_table, "a"; \ 236 _ASM_EXTABLE(9999b, 6001f)
236 .long 9999b, 6001f ; \
237 .previous
238 237
239#define DST(y...) \ 238#define DST(y...) \
240 9999: y; \ 239 9999: y; \
241 .section __ex_table, "a"; \ 240 _ASM_EXTABLE(9999b, 6002f)
242 .long 9999b, 6002f ; \
243 .previous
244 241
245.align 4 242.align 4
246 243
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index b040b0e518ca..f9643fc50de5 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -14,6 +14,7 @@
14#include <asm/thread_info.h> 14#include <asm/thread_info.h>
15#include <asm/processor-flags.h> 15#include <asm/processor-flags.h>
16#include <asm/segment.h> 16#include <asm/segment.h>
17#include <asm/asm.h>
17 18
18#include <xen/interface/xen.h> 19#include <xen/interface/xen.h>
19 20
@@ -137,10 +138,7 @@ iret_restore_end:
137 138
1381: iret 1391: iret
139xen_iret_end_crit: 140xen_iret_end_crit:
140.section __ex_table, "a" 141 _ASM_EXTABLE(1b, iret_exc)
141 .align 4
142 .long 1b, iret_exc
143.previous
144 142
145hyper_iret: 143hyper_iret:
146 /* put this out of line since its very rarely used */ 144 /* put this out of line since its very rarely used */