aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig21
-rw-r--r--arch/x86/Makefile16
-rw-r--r--arch/x86/ia32/ia32_signal.c24
-rw-r--r--arch/x86/ia32/sys_ia32.c40
-rw-r--r--arch/x86/include/asm/Kbuild2
-rw-r--r--arch/x86/include/asm/compat.h40
-rw-r--r--arch/x86/include/asm/elf.h31
-rw-r--r--arch/x86/include/asm/i387.h307
-rw-r--r--arch/x86/include/asm/ia32.h18
-rw-r--r--arch/x86/include/asm/kvm_emulate.h16
-rw-r--r--arch/x86/include/asm/mtrr.h28
-rw-r--r--arch/x86/include/asm/perf_event.h8
-rw-r--r--arch/x86/include/asm/posix_types.h4
-rw-r--r--arch/x86/include/asm/posix_types_32.h75
-rw-r--r--arch/x86/include/asm/posix_types_64.h106
-rw-r--r--arch/x86/include/asm/posix_types_x32.h19
-rw-r--r--arch/x86/include/asm/processor.h14
-rw-r--r--arch/x86/include/asm/ptrace.h1
-rw-r--r--arch/x86/include/asm/sigcontext.h57
-rw-r--r--arch/x86/include/asm/sigframe.h13
-rw-r--r--arch/x86/include/asm/sighandling.h24
-rw-r--r--arch/x86/include/asm/sys_ia32.h7
-rw-r--r--arch/x86/include/asm/syscall.h5
-rw-r--r--arch/x86/include/asm/thread_info.h20
-rw-r--r--arch/x86/include/asm/traps.h25
-rw-r--r--arch/x86/include/asm/unistd.h15
-rw-r--r--arch/x86/kernel/asm-offsets_64.c6
-rw-r--r--arch/x86/kernel/cpu/common.c5
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c44
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event.h8
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c37
-rw-r--r--arch/x86/kernel/dumpstack.c2
-rw-r--r--arch/x86/kernel/entry_64.S53
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/microcode_amd.c1
-rw-r--r--arch/x86/kernel/process_32.c26
-rw-r--r--arch/x86/kernel/process_64.c57
-rw-r--r--arch/x86/kernel/ptrace.c102
-rw-r--r--arch/x86/kernel/signal.c140
-rw-r--r--arch/x86/kernel/sys_x86_64.c6
-rw-r--r--arch/x86/kernel/syscall_64.c8
-rw-r--r--arch/x86/kernel/traps.c176
-rw-r--r--arch/x86/kernel/vm86_32.c2
-rw-r--r--arch/x86/kernel/vsyscall_64.c2
-rw-r--r--arch/x86/kernel/xsave.c12
-rw-r--r--arch/x86/kvm/emulate.c51
-rw-r--r--arch/x86/kvm/svm.c5
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c45
-rw-r--r--arch/x86/math-emu/fpu_entry.c5
-rw-r--r--arch/x86/mm/fault.c10
-rw-r--r--arch/x86/oprofile/backtrace.c2
-rw-r--r--arch/x86/pci/xen.c2
-rw-r--r--arch/x86/syscalls/Makefile22
-rw-r--r--arch/x86/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/syscalls/syscall_64.tbl579
-rw-r--r--arch/x86/um/sys_call_table_64.c3
-rw-r--r--arch/x86/um/user-offsets.c2
-rw-r--r--arch/x86/vdso/.gitignore2
-rw-r--r--arch/x86/vdso/Makefile46
-rw-r--r--arch/x86/vdso/vdso32-setup.c5
-rw-r--r--arch/x86/vdso/vdsox32.S22
-rw-r--r--arch/x86/vdso/vdsox32.lds.S28
-rw-r--r--arch/x86/vdso/vma.c78
-rw-r--r--arch/x86/xen/enlighten.c6
-rw-r--r--arch/x86/xen/mmu.c8
-rw-r--r--arch/x86/xen/smp.c7
70 files changed, 1733 insertions, 843 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 481dbfcf14e..d2a540f7d6c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2168,9 +2168,9 @@ config IA32_EMULATION
2168 depends on X86_64 2168 depends on X86_64
2169 select COMPAT_BINFMT_ELF 2169 select COMPAT_BINFMT_ELF
2170 ---help--- 2170 ---help---
2171 Include code to run 32-bit programs under a 64-bit kernel. You should 2171 Include code to run legacy 32-bit programs under a
2172 likely turn this on, unless you're 100% sure that you don't have any 2172 64-bit kernel. You should likely turn this on, unless you're
2173 32-bit programs left. 2173 100% sure that you don't have any 32-bit programs left.
2174 2174
2175config IA32_AOUT 2175config IA32_AOUT
2176 tristate "IA32 a.out support" 2176 tristate "IA32 a.out support"
@@ -2178,9 +2178,22 @@ config IA32_AOUT
2178 ---help--- 2178 ---help---
2179 Support old a.out binaries in the 32bit emulation. 2179 Support old a.out binaries in the 32bit emulation.
2180 2180
2181config X86_X32
2182 bool "x32 ABI for 64-bit mode (EXPERIMENTAL)"
2183 depends on X86_64 && IA32_EMULATION && EXPERIMENTAL
2184 ---help---
2185 Include code to run binaries for the x32 native 32-bit ABI
2186 for 64-bit processors. An x32 process gets access to the
2187 full 64-bit register file and wide data path while leaving
2188 pointers at 32 bits for smaller memory footprint.
2189
2190 You will need a recent binutils (2.22 or later) with
2191 elf32_x86_64 support enabled to compile a kernel with this
2192 option set.
2193
2181config COMPAT 2194config COMPAT
2182 def_bool y 2195 def_bool y
2183 depends on IA32_EMULATION 2196 depends on IA32_EMULATION || X86_X32
2184 2197
2185config COMPAT_FOR_U64_ALIGNMENT 2198config COMPAT_FOR_U64_ALIGNMENT
2186 def_bool COMPAT 2199 def_bool COMPAT
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 209ba129459..968dbe24a25 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -82,6 +82,22 @@ ifdef CONFIG_CC_STACKPROTECTOR
82 endif 82 endif
83endif 83endif
84 84
85ifdef CONFIG_X86_X32
86 x32_ld_ok := $(call try-run,\
87 /bin/echo -e '1: .quad 1b' | \
88 $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" - && \
89 $(OBJCOPY) -O elf32-x86-64 "$$TMP" "$$TMPO" && \
90 $(LD) -m elf32_x86_64 "$$TMPO" -o "$$TMP",y,n)
91 ifeq ($(x32_ld_ok),y)
92 CONFIG_X86_X32_ABI := y
93 KBUILD_AFLAGS += -DCONFIG_X86_X32_ABI
94 KBUILD_CFLAGS += -DCONFIG_X86_X32_ABI
95 else
96 $(warning CONFIG_X86_X32 enabled but no binutils support)
97 endif
98endif
99export CONFIG_X86_X32_ABI
100
85# Don't unroll struct assignments with kmemcheck enabled 101# Don't unroll struct assignments with kmemcheck enabled
86ifeq ($(CONFIG_KMEMCHECK),y) 102ifeq ($(CONFIG_KMEMCHECK),y)
87 KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) 103 KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 65577698cab..45b4fdd4e1d 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -12,10 +12,8 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/signal.h>
16#include <linux/errno.h> 15#include <linux/errno.h>
17#include <linux/wait.h> 16#include <linux/wait.h>
18#include <linux/ptrace.h>
19#include <linux/unistd.h> 17#include <linux/unistd.h>
20#include <linux/stddef.h> 18#include <linux/stddef.h>
21#include <linux/personality.h> 19#include <linux/personality.h>
@@ -31,20 +29,15 @@
31#include <asm/proto.h> 29#include <asm/proto.h>
32#include <asm/vdso.h> 30#include <asm/vdso.h>
33#include <asm/sigframe.h> 31#include <asm/sigframe.h>
32#include <asm/sighandling.h>
34#include <asm/sys_ia32.h> 33#include <asm/sys_ia32.h>
35 34
36#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 35#define FIX_EFLAGS __FIX_EFLAGS
37
38#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
39 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
40 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
41 X86_EFLAGS_CF)
42
43void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
44 36
45int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) 37int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
46{ 38{
47 int err = 0; 39 int err = 0;
40 bool ia32 = !is_ia32_task();
48 41
49 if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) 42 if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
50 return -EFAULT; 43 return -EFAULT;
@@ -74,8 +67,13 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
74 case __SI_FAULT >> 16: 67 case __SI_FAULT >> 16:
75 break; 68 break;
76 case __SI_CHLD >> 16: 69 case __SI_CHLD >> 16:
77 put_user_ex(from->si_utime, &to->si_utime); 70 if (ia32) {
78 put_user_ex(from->si_stime, &to->si_stime); 71 put_user_ex(from->si_utime, &to->si_utime);
72 put_user_ex(from->si_stime, &to->si_stime);
73 } else {
74 put_user_ex(from->si_utime, &to->_sifields._sigchld_x32._utime);
75 put_user_ex(from->si_stime, &to->_sifields._sigchld_x32._stime);
76 }
79 put_user_ex(from->si_status, &to->si_status); 77 put_user_ex(from->si_status, &to->si_status);
80 /* FALL THROUGH */ 78 /* FALL THROUGH */
81 default: 79 default:
@@ -347,7 +345,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
347 put_user_ex(regs->dx, &sc->dx); 345 put_user_ex(regs->dx, &sc->dx);
348 put_user_ex(regs->cx, &sc->cx); 346 put_user_ex(regs->cx, &sc->cx);
349 put_user_ex(regs->ax, &sc->ax); 347 put_user_ex(regs->ax, &sc->ax);
350 put_user_ex(current->thread.trap_no, &sc->trapno); 348 put_user_ex(current->thread.trap_nr, &sc->trapno);
351 put_user_ex(current->thread.error_code, &sc->err); 349 put_user_ex(current->thread.error_code, &sc->err);
352 put_user_ex(regs->ip, &sc->ip); 350 put_user_ex(regs->ip, &sc->ip);
353 put_user_ex(regs->cs, (unsigned int __user *)&sc->cs); 351 put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index f6f5c53dc90..aec2202a596 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -287,46 +287,6 @@ asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
287 return ret; 287 return ret;
288} 288}
289 289
290asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
291 compat_sigset_t __user *oset,
292 unsigned int sigsetsize)
293{
294 sigset_t s;
295 compat_sigset_t s32;
296 int ret;
297 mm_segment_t old_fs = get_fs();
298
299 if (set) {
300 if (copy_from_user(&s32, set, sizeof(compat_sigset_t)))
301 return -EFAULT;
302 switch (_NSIG_WORDS) {
303 case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
304 case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
305 case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
306 case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
307 }
308 }
309 set_fs(KERNEL_DS);
310 ret = sys_rt_sigprocmask(how,
311 set ? (sigset_t __user *)&s : NULL,
312 oset ? (sigset_t __user *)&s : NULL,
313 sigsetsize);
314 set_fs(old_fs);
315 if (ret)
316 return ret;
317 if (oset) {
318 switch (_NSIG_WORDS) {
319 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
320 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
321 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
322 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
323 }
324 if (copy_to_user(oset, &s32, sizeof(compat_sigset_t)))
325 return -EFAULT;
326 }
327 return 0;
328}
329
330asmlinkage long sys32_alarm(unsigned int seconds) 290asmlinkage long sys32_alarm(unsigned int seconds)
331{ 291{
332 return alarm_setitimer(seconds); 292 return alarm_setitimer(seconds);
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index b57e6a43a37..f9c0d3ba9e8 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -14,6 +14,7 @@ header-y += msr.h
14header-y += mtrr.h 14header-y += mtrr.h
15header-y += posix_types_32.h 15header-y += posix_types_32.h
16header-y += posix_types_64.h 16header-y += posix_types_64.h
17header-y += posix_types_x32.h
17header-y += prctl.h 18header-y += prctl.h
18header-y += processor-flags.h 19header-y += processor-flags.h
19header-y += ptrace-abi.h 20header-y += ptrace-abi.h
@@ -24,3 +25,4 @@ header-y += vsyscall.h
24 25
25genhdr-y += unistd_32.h 26genhdr-y += unistd_32.h
26genhdr-y += unistd_64.h 27genhdr-y += unistd_64.h
28genhdr-y += unistd_x32.h
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 30d737ef2a4..d6805798d6f 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -6,7 +6,9 @@
6 */ 6 */
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <asm/processor.h>
9#include <asm/user32.h> 10#include <asm/user32.h>
11#include <asm/unistd.h>
10 12
11#define COMPAT_USER_HZ 100 13#define COMPAT_USER_HZ 100
12#define COMPAT_UTS_MACHINE "i686\0\0" 14#define COMPAT_UTS_MACHINE "i686\0\0"
@@ -186,7 +188,20 @@ struct compat_shmid64_ds {
186/* 188/*
187 * The type of struct elf_prstatus.pr_reg in compatible core dumps. 189 * The type of struct elf_prstatus.pr_reg in compatible core dumps.
188 */ 190 */
191#ifdef CONFIG_X86_X32_ABI
192typedef struct user_regs_struct compat_elf_gregset_t;
193
194#define PR_REG_SIZE(S) (test_thread_flag(TIF_IA32) ? 68 : 216)
195#define PRSTATUS_SIZE(S) (test_thread_flag(TIF_IA32) ? 144 : 296)
196#define SET_PR_FPVALID(S,V) \
197 do { *(int *) (((void *) &((S)->pr_reg)) + PR_REG_SIZE(0)) = (V); } \
198 while (0)
199
200#define COMPAT_USE_64BIT_TIME \
201 (!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
202#else
189typedef struct user_regs_struct32 compat_elf_gregset_t; 203typedef struct user_regs_struct32 compat_elf_gregset_t;
204#endif
190 205
191/* 206/*
192 * A pointer passed in from user mode. This should not 207 * A pointer passed in from user mode. This should not
@@ -208,13 +223,30 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
208 223
209static inline void __user *arch_compat_alloc_user_space(long len) 224static inline void __user *arch_compat_alloc_user_space(long len)
210{ 225{
211 struct pt_regs *regs = task_pt_regs(current); 226 compat_uptr_t sp;
212 return (void __user *)regs->sp - len; 227
228 if (test_thread_flag(TIF_IA32)) {
229 sp = task_pt_regs(current)->sp;
230 } else {
231 /* -128 for the x32 ABI redzone */
232 sp = percpu_read(old_rsp) - 128;
233 }
234
235 return (void __user *)round_down(sp - len, 16);
236}
237
238static inline bool is_x32_task(void)
239{
240#ifdef CONFIG_X86_X32_ABI
241 if (task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT)
242 return true;
243#endif
244 return false;
213} 245}
214 246
215static inline int is_compat_task(void) 247static inline bool is_compat_task(void)
216{ 248{
217 return current_thread_info()->status & TS_COMPAT; 249 return is_ia32_task() || is_x32_task();
218} 250}
219 251
220#endif /* _ASM_X86_COMPAT_H */ 252#endif /* _ASM_X86_COMPAT_H */
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 5f962df30d0..1e40634591a 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -156,7 +156,12 @@ do { \
156#define elf_check_arch(x) \ 156#define elf_check_arch(x) \
157 ((x)->e_machine == EM_X86_64) 157 ((x)->e_machine == EM_X86_64)
158 158
159#define compat_elf_check_arch(x) elf_check_arch_ia32(x) 159#define compat_elf_check_arch(x) \
160 (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
161
162#if __USER32_DS != __USER_DS
163# error "The following code assumes __USER32_DS == __USER_DS"
164#endif
160 165
161static inline void elf_common_init(struct thread_struct *t, 166static inline void elf_common_init(struct thread_struct *t,
162 struct pt_regs *regs, const u16 ds) 167 struct pt_regs *regs, const u16 ds)
@@ -179,8 +184,9 @@ static inline void elf_common_init(struct thread_struct *t,
179void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp); 184void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp);
180#define compat_start_thread start_thread_ia32 185#define compat_start_thread start_thread_ia32
181 186
182void set_personality_ia32(void); 187void set_personality_ia32(bool);
183#define COMPAT_SET_PERSONALITY(ex) set_personality_ia32() 188#define COMPAT_SET_PERSONALITY(ex) \
189 set_personality_ia32((ex).e_machine == EM_X86_64)
184 190
185#define COMPAT_ELF_PLATFORM ("i686") 191#define COMPAT_ELF_PLATFORM ("i686")
186 192
@@ -287,7 +293,7 @@ do { \
287#define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */ 293#define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */
288 294
289/* 1GB for 64bit, 8MB for 32bit */ 295/* 1GB for 64bit, 8MB for 32bit */
290#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) 296#define STACK_RND_MASK (test_thread_flag(TIF_ADDR32) ? 0x7ff : 0x3fffff)
291 297
292#define ARCH_DLINFO \ 298#define ARCH_DLINFO \
293do { \ 299do { \
@@ -296,9 +302,20 @@ do { \
296 (unsigned long)current->mm->context.vdso); \ 302 (unsigned long)current->mm->context.vdso); \
297} while (0) 303} while (0)
298 304
305#define ARCH_DLINFO_X32 \
306do { \
307 if (vdso_enabled) \
308 NEW_AUX_ENT(AT_SYSINFO_EHDR, \
309 (unsigned long)current->mm->context.vdso); \
310} while (0)
311
299#define AT_SYSINFO 32 312#define AT_SYSINFO 32
300 313
301#define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(sysctl_vsyscall32) 314#define COMPAT_ARCH_DLINFO \
315if (test_thread_flag(TIF_X32)) \
316 ARCH_DLINFO_X32; \
317else \
318 ARCH_DLINFO_IA32(sysctl_vsyscall32)
302 319
303#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) 320#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
304 321
@@ -314,6 +331,8 @@ struct linux_binprm;
314#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 331#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
315extern int arch_setup_additional_pages(struct linux_binprm *bprm, 332extern int arch_setup_additional_pages(struct linux_binprm *bprm,
316 int uses_interp); 333 int uses_interp);
334extern int x32_setup_additional_pages(struct linux_binprm *bprm,
335 int uses_interp);
317 336
318extern int syscall32_setup_pages(struct linux_binprm *, int exstack); 337extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
319#define compat_arch_setup_additional_pages syscall32_setup_pages 338#define compat_arch_setup_additional_pages syscall32_setup_pages
@@ -330,7 +349,7 @@ static inline int mmap_is_ia32(void)
330 return 1; 349 return 1;
331#endif 350#endif
332#ifdef CONFIG_IA32_EMULATION 351#ifdef CONFIG_IA32_EMULATION
333 if (test_thread_flag(TIF_IA32)) 352 if (test_thread_flag(TIF_ADDR32))
334 return 1; 353 return 1;
335#endif 354#endif
336 return 0; 355 return 0;
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 6919e936345..247904945d3 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -29,10 +29,11 @@ extern unsigned int sig_xstate_size;
29extern void fpu_init(void); 29extern void fpu_init(void);
30extern void mxcsr_feature_mask_init(void); 30extern void mxcsr_feature_mask_init(void);
31extern int init_fpu(struct task_struct *child); 31extern int init_fpu(struct task_struct *child);
32extern asmlinkage void math_state_restore(void); 32extern void math_state_restore(void);
33extern void __math_state_restore(void);
34extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); 33extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
35 34
35DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
36
36extern user_regset_active_fn fpregs_active, xfpregs_active; 37extern user_regset_active_fn fpregs_active, xfpregs_active;
37extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, 38extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
38 xstateregs_get; 39 xstateregs_get;
@@ -212,19 +213,11 @@ static inline void fpu_fxsave(struct fpu *fpu)
212 213
213#endif /* CONFIG_X86_64 */ 214#endif /* CONFIG_X86_64 */
214 215
215/* We need a safe address that is cheap to find and that is already
216 in L1 during context switch. The best choices are unfortunately
217 different for UP and SMP */
218#ifdef CONFIG_SMP
219#define safe_address (__per_cpu_offset[0])
220#else
221#define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER])
222#endif
223
224/* 216/*
225 * These must be called with preempt disabled 217 * These must be called with preempt disabled. Returns
218 * 'true' if the FPU state is still intact.
226 */ 219 */
227static inline void fpu_save_init(struct fpu *fpu) 220static inline int fpu_save_init(struct fpu *fpu)
228{ 221{
229 if (use_xsave()) { 222 if (use_xsave()) {
230 fpu_xsave(fpu); 223 fpu_xsave(fpu);
@@ -233,33 +226,33 @@ static inline void fpu_save_init(struct fpu *fpu)
233 * xsave header may indicate the init state of the FP. 226 * xsave header may indicate the init state of the FP.
234 */ 227 */
235 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) 228 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
236 return; 229 return 1;
237 } else if (use_fxsr()) { 230 } else if (use_fxsr()) {
238 fpu_fxsave(fpu); 231 fpu_fxsave(fpu);
239 } else { 232 } else {
240 asm volatile("fnsave %[fx]; fwait" 233 asm volatile("fnsave %[fx]; fwait"
241 : [fx] "=m" (fpu->state->fsave)); 234 : [fx] "=m" (fpu->state->fsave));
242 return; 235 return 0;
243 } 236 }
244 237
245 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) 238 /*
239 * If exceptions are pending, we need to clear them so
240 * that we don't randomly get exceptions later.
241 *
242 * FIXME! Is this perhaps only true for the old-style
243 * irq13 case? Maybe we could leave the x87 state
244 * intact otherwise?
245 */
246 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
246 asm volatile("fnclex"); 247 asm volatile("fnclex");
247 248 return 0;
248 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 249 }
249 is pending. Clear the x87 state here by setting it to fixed 250 return 1;
250 values. safe_address is a random variable that should be in L1 */
251 alternative_input(
252 ASM_NOP8 ASM_NOP2,
253 "emms\n\t" /* clear stack tags */
254 "fildl %P[addr]", /* set F?P to defined value */
255 X86_FEATURE_FXSAVE_LEAK,
256 [addr] "m" (safe_address));
257} 251}
258 252
259static inline void __save_init_fpu(struct task_struct *tsk) 253static inline int __save_init_fpu(struct task_struct *tsk)
260{ 254{
261 fpu_save_init(&tsk->thread.fpu); 255 return fpu_save_init(&tsk->thread.fpu);
262 task_thread_info(tsk)->status &= ~TS_USEDFPU;
263} 256}
264 257
265static inline int fpu_fxrstor_checking(struct fpu *fpu) 258static inline int fpu_fxrstor_checking(struct fpu *fpu)
@@ -277,44 +270,212 @@ static inline int fpu_restore_checking(struct fpu *fpu)
277 270
278static inline int restore_fpu_checking(struct task_struct *tsk) 271static inline int restore_fpu_checking(struct task_struct *tsk)
279{ 272{
273 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
274 is pending. Clear the x87 state here by setting it to fixed
275 values. "m" is a random variable that should be in L1 */
276 alternative_input(
277 ASM_NOP8 ASM_NOP2,
278 "emms\n\t" /* clear stack tags */
279 "fildl %P[addr]", /* set F?P to defined value */
280 X86_FEATURE_FXSAVE_LEAK,
281 [addr] "m" (tsk->thread.fpu.has_fpu));
282
280 return fpu_restore_checking(&tsk->thread.fpu); 283 return fpu_restore_checking(&tsk->thread.fpu);
281} 284}
282 285
283/* 286/*
284 * Signal frame handlers... 287 * Software FPU state helpers. Careful: these need to
288 * be preemption protection *and* they need to be
289 * properly paired with the CR0.TS changes!
285 */ 290 */
286extern int save_i387_xstate(void __user *buf); 291static inline int __thread_has_fpu(struct task_struct *tsk)
287extern int restore_i387_xstate(void __user *buf); 292{
293 return tsk->thread.fpu.has_fpu;
294}
288 295
289static inline void __unlazy_fpu(struct task_struct *tsk) 296/* Must be paired with an 'stts' after! */
297static inline void __thread_clear_has_fpu(struct task_struct *tsk)
290{ 298{
291 if (task_thread_info(tsk)->status & TS_USEDFPU) { 299 tsk->thread.fpu.has_fpu = 0;
292 __save_init_fpu(tsk); 300 percpu_write(fpu_owner_task, NULL);
293 stts(); 301}
294 } else 302
295 tsk->fpu_counter = 0; 303/* Must be paired with a 'clts' before! */
304static inline void __thread_set_has_fpu(struct task_struct *tsk)
305{
306 tsk->thread.fpu.has_fpu = 1;
307 percpu_write(fpu_owner_task, tsk);
308}
309
310/*
311 * Encapsulate the CR0.TS handling together with the
312 * software flag.
313 *
314 * These generally need preemption protection to work,
315 * do try to avoid using these on their own.
316 */
317static inline void __thread_fpu_end(struct task_struct *tsk)
318{
319 __thread_clear_has_fpu(tsk);
320 stts();
321}
322
323static inline void __thread_fpu_begin(struct task_struct *tsk)
324{
325 clts();
326 __thread_set_has_fpu(tsk);
327}
328
329/*
330 * FPU state switching for scheduling.
331 *
332 * This is a two-stage process:
333 *
334 * - switch_fpu_prepare() saves the old state and
335 * sets the new state of the CR0.TS bit. This is
336 * done within the context of the old process.
337 *
338 * - switch_fpu_finish() restores the new state as
339 * necessary.
340 */
341typedef struct { int preload; } fpu_switch_t;
342
343/*
344 * FIXME! We could do a totally lazy restore, but we need to
345 * add a per-cpu "this was the task that last touched the FPU
346 * on this CPU" variable, and the task needs to have a "I last
347 * touched the FPU on this CPU" and check them.
348 *
349 * We don't do that yet, so "fpu_lazy_restore()" always returns
350 * false, but some day..
351 */
352static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
353{
354 return new == percpu_read_stable(fpu_owner_task) &&
355 cpu == new->thread.fpu.last_cpu;
356}
357
358static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
359{
360 fpu_switch_t fpu;
361
362 fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
363 if (__thread_has_fpu(old)) {
364 if (!__save_init_fpu(old))
365 cpu = ~0;
366 old->thread.fpu.last_cpu = cpu;
367 old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */
368
369 /* Don't change CR0.TS if we just switch! */
370 if (fpu.preload) {
371 new->fpu_counter++;
372 __thread_set_has_fpu(new);
373 prefetch(new->thread.fpu.state);
374 } else
375 stts();
376 } else {
377 old->fpu_counter = 0;
378 old->thread.fpu.last_cpu = ~0;
379 if (fpu.preload) {
380 new->fpu_counter++;
381 if (fpu_lazy_restore(new, cpu))
382 fpu.preload = 0;
383 else
384 prefetch(new->thread.fpu.state);
385 __thread_fpu_begin(new);
386 }
387 }
388 return fpu;
389}
390
391/*
392 * By the time this gets called, we've already cleared CR0.TS and
393 * given the process the FPU if we are going to preload the FPU
394 * state - all we need to do is to conditionally restore the register
395 * state itself.
396 */
397static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
398{
399 if (fpu.preload) {
400 if (unlikely(restore_fpu_checking(new)))
401 __thread_fpu_end(new);
402 }
296} 403}
297 404
405/*
406 * Signal frame handlers...
407 */
408extern int save_i387_xstate(void __user *buf);
409extern int restore_i387_xstate(void __user *buf);
410
298static inline void __clear_fpu(struct task_struct *tsk) 411static inline void __clear_fpu(struct task_struct *tsk)
299{ 412{
300 if (task_thread_info(tsk)->status & TS_USEDFPU) { 413 if (__thread_has_fpu(tsk)) {
301 /* Ignore delayed exceptions from user space */ 414 /* Ignore delayed exceptions from user space */
302 asm volatile("1: fwait\n" 415 asm volatile("1: fwait\n"
303 "2:\n" 416 "2:\n"
304 _ASM_EXTABLE(1b, 2b)); 417 _ASM_EXTABLE(1b, 2b));
305 task_thread_info(tsk)->status &= ~TS_USEDFPU; 418 __thread_fpu_end(tsk);
306 stts();
307 } 419 }
308} 420}
309 421
422/*
423 * Were we in an interrupt that interrupted kernel mode?
424 *
425 * We can do a kernel_fpu_begin/end() pair *ONLY* if that
426 * pair does nothing at all: the thread must not have fpu (so
427 * that we don't try to save the FPU state), and TS must
428 * be set (so that the clts/stts pair does nothing that is
429 * visible in the interrupted kernel thread).
430 */
431static inline bool interrupted_kernel_fpu_idle(void)
432{
433 return !__thread_has_fpu(current) &&
434 (read_cr0() & X86_CR0_TS);
435}
436
437/*
438 * Were we in user mode (or vm86 mode) when we were
439 * interrupted?
440 *
441 * Doing kernel_fpu_begin/end() is ok if we are running
442 * in an interrupt context from user mode - we'll just
443 * save the FPU state as required.
444 */
445static inline bool interrupted_user_mode(void)
446{
447 struct pt_regs *regs = get_irq_regs();
448 return regs && user_mode_vm(regs);
449}
450
451/*
452 * Can we use the FPU in kernel mode with the
453 * whole "kernel_fpu_begin/end()" sequence?
454 *
455 * It's always ok in process context (ie "not interrupt")
456 * but it is sometimes ok even from an irq.
457 */
458static inline bool irq_fpu_usable(void)
459{
460 return !in_interrupt() ||
461 interrupted_user_mode() ||
462 interrupted_kernel_fpu_idle();
463}
464
310static inline void kernel_fpu_begin(void) 465static inline void kernel_fpu_begin(void)
311{ 466{
312 struct thread_info *me = current_thread_info(); 467 struct task_struct *me = current;
468
469 WARN_ON_ONCE(!irq_fpu_usable());
313 preempt_disable(); 470 preempt_disable();
314 if (me->status & TS_USEDFPU) 471 if (__thread_has_fpu(me)) {
315 __save_init_fpu(me->task); 472 __save_init_fpu(me);
316 else 473 __thread_clear_has_fpu(me);
474 /* We do 'stts()' in kernel_fpu_end() */
475 } else {
476 percpu_write(fpu_owner_task, NULL);
317 clts(); 477 clts();
478 }
318} 479}
319 480
320static inline void kernel_fpu_end(void) 481static inline void kernel_fpu_end(void)
@@ -323,14 +484,6 @@ static inline void kernel_fpu_end(void)
323 preempt_enable(); 484 preempt_enable();
324} 485}
325 486
326static inline bool irq_fpu_usable(void)
327{
328 struct pt_regs *regs;
329
330 return !in_interrupt() || !(regs = get_irq_regs()) || \
331 user_mode(regs) || (read_cr0() & X86_CR0_TS);
332}
333
334/* 487/*
335 * Some instructions like VIA's padlock instructions generate a spurious 488 * Some instructions like VIA's padlock instructions generate a spurious
336 * DNA fault but don't modify SSE registers. And these instructions 489 * DNA fault but don't modify SSE registers. And these instructions
@@ -363,20 +516,64 @@ static inline void irq_ts_restore(int TS_state)
363} 516}
364 517
365/* 518/*
519 * The question "does this thread have fpu access?"
520 * is slightly racy, since preemption could come in
521 * and revoke it immediately after the test.
522 *
523 * However, even in that very unlikely scenario,
524 * we can just assume we have FPU access - typically
525 * to save the FP state - we'll just take a #NM
526 * fault and get the FPU access back.
527 *
528 * The actual user_fpu_begin/end() functions
529 * need to be preemption-safe, though.
530 *
531 * NOTE! user_fpu_end() must be used only after you
532 * have saved the FP state, and user_fpu_begin() must
533 * be used only immediately before restoring it.
534 * These functions do not do any save/restore on
535 * their own.
536 */
537static inline int user_has_fpu(void)
538{
539 return __thread_has_fpu(current);
540}
541
542static inline void user_fpu_end(void)
543{
544 preempt_disable();
545 __thread_fpu_end(current);
546 preempt_enable();
547}
548
549static inline void user_fpu_begin(void)
550{
551 preempt_disable();
552 if (!user_has_fpu())
553 __thread_fpu_begin(current);
554 preempt_enable();
555}
556
557/*
366 * These disable preemption on their own and are safe 558 * These disable preemption on their own and are safe
367 */ 559 */
368static inline void save_init_fpu(struct task_struct *tsk) 560static inline void save_init_fpu(struct task_struct *tsk)
369{ 561{
562 WARN_ON_ONCE(!__thread_has_fpu(tsk));
370 preempt_disable(); 563 preempt_disable();
371 __save_init_fpu(tsk); 564 __save_init_fpu(tsk);
372 stts(); 565 __thread_fpu_end(tsk);
373 preempt_enable(); 566 preempt_enable();
374} 567}
375 568
376static inline void unlazy_fpu(struct task_struct *tsk) 569static inline void unlazy_fpu(struct task_struct *tsk)
377{ 570{
378 preempt_disable(); 571 preempt_disable();
379 __unlazy_fpu(tsk); 572 if (__thread_has_fpu(tsk)) {
573 __save_init_fpu(tsk);
574 __thread_fpu_end(tsk);
575 } else
576 tsk->fpu_counter = 0;
380 preempt_enable(); 577 preempt_enable();
381} 578}
382 579
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index 1f7e6251728..7d0c1858770 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -43,6 +43,15 @@ struct ucontext_ia32 {
43 compat_sigset_t uc_sigmask; /* mask last for extensibility */ 43 compat_sigset_t uc_sigmask; /* mask last for extensibility */
44}; 44};
45 45
46struct ucontext_x32 {
47 unsigned int uc_flags;
48 unsigned int uc_link;
49 stack_ia32_t uc_stack;
50 unsigned int uc__pad0; /* needed for alignment */
51 struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */
52 compat_sigset_t uc_sigmask; /* mask last for extensibility */
53};
54
46/* This matches struct stat64 in glibc2.2, hence the absolutely 55/* This matches struct stat64 in glibc2.2, hence the absolutely
47 * insane amounts of padding around dev_t's. 56 * insane amounts of padding around dev_t's.
48 */ 57 */
@@ -116,6 +125,15 @@ typedef struct compat_siginfo {
116 compat_clock_t _stime; 125 compat_clock_t _stime;
117 } _sigchld; 126 } _sigchld;
118 127
128 /* SIGCHLD (x32 version) */
129 struct {
130 unsigned int _pid; /* which child */
131 unsigned int _uid; /* sender's uid */
132 int _status; /* exit code */
133 s64 _utime;
134 s64 _stime;
135 } _sigchld_x32;
136
119 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ 137 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
120 struct { 138 struct {
121 unsigned int _addr; /* faulting insn/memory ref. */ 139 unsigned int _addr; /* faulting insn/memory ref. */
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index ab4092e3214..7b9cfc4878a 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -190,6 +190,9 @@ struct x86_emulate_ops {
190 int (*intercept)(struct x86_emulate_ctxt *ctxt, 190 int (*intercept)(struct x86_emulate_ctxt *ctxt,
191 struct x86_instruction_info *info, 191 struct x86_instruction_info *info,
192 enum x86_intercept_stage stage); 192 enum x86_intercept_stage stage);
193
194 bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
195 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
193}; 196};
194 197
195typedef u32 __attribute__((vector_size(16))) sse128_t; 198typedef u32 __attribute__((vector_size(16))) sse128_t;
@@ -298,6 +301,19 @@ struct x86_emulate_ctxt {
298#define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \ 301#define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \
299 X86EMUL_MODE_PROT64) 302 X86EMUL_MODE_PROT64)
300 303
304/* CPUID vendors */
305#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
306#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
307#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
308
309#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41
310#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574
311#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273
312
313#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547
314#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e
315#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69
316
301enum x86_intercept_stage { 317enum x86_intercept_stage {
302 X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */ 318 X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */
303 X86_ICPT_PRE_EXCEPT, 319 X86_ICPT_PRE_EXCEPT,
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 4365ffdb461..7e3f17f92c6 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -29,18 +29,18 @@
29 29
30#define MTRR_IOCTL_BASE 'M' 30#define MTRR_IOCTL_BASE 'M'
31 31
32struct mtrr_sentry {
33 unsigned long base; /* Base address */
34 unsigned int size; /* Size of region */
35 unsigned int type; /* Type of region */
36};
37
38/* Warning: this structure has a different order from i386 32/* Warning: this structure has a different order from i386
39 on x86-64. The 32bit emulation code takes care of that. 33 on x86-64. The 32bit emulation code takes care of that.
40 But you need to use this for 64bit, otherwise your X server 34 But you need to use this for 64bit, otherwise your X server
41 will break. */ 35 will break. */
42 36
43#ifdef __i386__ 37#ifdef __i386__
38struct mtrr_sentry {
39 unsigned long base; /* Base address */
40 unsigned int size; /* Size of region */
41 unsigned int type; /* Type of region */
42};
43
44struct mtrr_gentry { 44struct mtrr_gentry {
45 unsigned int regnum; /* Register number */ 45 unsigned int regnum; /* Register number */
46 unsigned long base; /* Base address */ 46 unsigned long base; /* Base address */
@@ -50,12 +50,20 @@ struct mtrr_gentry {
50 50
51#else /* __i386__ */ 51#else /* __i386__ */
52 52
53struct mtrr_sentry {
54 __u64 base; /* Base address */
55 __u32 size; /* Size of region */
56 __u32 type; /* Type of region */
57};
58
53struct mtrr_gentry { 59struct mtrr_gentry {
54 unsigned long base; /* Base address */ 60 __u64 base; /* Base address */
55 unsigned int size; /* Size of region */ 61 __u32 size; /* Size of region */
56 unsigned int regnum; /* Register number */ 62 __u32 regnum; /* Register number */
57 unsigned int type; /* Type of region */ 63 __u32 type; /* Type of region */
64 __u32 _pad; /* Unused */
58}; 65};
66
59#endif /* !__i386__ */ 67#endif /* !__i386__ */
60 68
61struct mtrr_var_range { 69struct mtrr_var_range {
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 9b922c13625..e8fb2c7a5f4 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -240,4 +240,12 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
240static inline void perf_events_lapic_init(void) { } 240static inline void perf_events_lapic_init(void) { }
241#endif 241#endif
242 242
243#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
244 extern void amd_pmu_enable_virt(void);
245 extern void amd_pmu_disable_virt(void);
246#else
247 static inline void amd_pmu_enable_virt(void) { }
248 static inline void amd_pmu_disable_virt(void) { }
249#endif
250
243#endif /* _ASM_X86_PERF_EVENT_H */ 251#endif /* _ASM_X86_PERF_EVENT_H */
diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h
index bb7133dc155..3427b7798db 100644
--- a/arch/x86/include/asm/posix_types.h
+++ b/arch/x86/include/asm/posix_types.h
@@ -7,7 +7,9 @@
7#else 7#else
8# ifdef __i386__ 8# ifdef __i386__
9# include "posix_types_32.h" 9# include "posix_types_32.h"
10# else 10# elif defined(__LP64__)
11# include "posix_types_64.h" 11# include "posix_types_64.h"
12# else
13# include "posix_types_x32.h"
12# endif 14# endif
13#endif 15#endif
diff --git a/arch/x86/include/asm/posix_types_32.h b/arch/x86/include/asm/posix_types_32.h
index f7d9adf82e5..99f262e04b9 100644
--- a/arch/x86/include/asm/posix_types_32.h
+++ b/arch/x86/include/asm/posix_types_32.h
@@ -7,79 +7,22 @@
7 * assume GCC is being used. 7 * assume GCC is being used.
8 */ 8 */
9 9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned short __kernel_mode_t; 10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t
12
12typedef unsigned short __kernel_nlink_t; 13typedef unsigned short __kernel_nlink_t;
13typedef long __kernel_off_t; 14#define __kernel_nlink_t __kernel_nlink_t
14typedef int __kernel_pid_t; 15
15typedef unsigned short __kernel_ipc_pid_t; 16typedef unsigned short __kernel_ipc_pid_t;
17#define __kernel_ipc_pid_t __kernel_ipc_pid_t
18
16typedef unsigned short __kernel_uid_t; 19typedef unsigned short __kernel_uid_t;
17typedef unsigned short __kernel_gid_t; 20typedef unsigned short __kernel_gid_t;
18typedef unsigned int __kernel_size_t; 21#define __kernel_uid_t __kernel_uid_t
19typedef int __kernel_ssize_t;
20typedef int __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30typedef unsigned int __kernel_uid32_t;
31typedef unsigned int __kernel_gid32_t;
32 22
33typedef unsigned short __kernel_old_uid_t;
34typedef unsigned short __kernel_old_gid_t;
35typedef unsigned short __kernel_old_dev_t; 23typedef unsigned short __kernel_old_dev_t;
24#define __kernel_old_dev_t __kernel_old_dev_t
36 25
37#ifdef __GNUC__ 26#include <asm-generic/posix_types.h>
38typedef long long __kernel_loff_t;
39#endif
40
41typedef struct {
42 int val[2];
43} __kernel_fsid_t;
44
45#if defined(__KERNEL__)
46
47#undef __FD_SET
48#define __FD_SET(fd,fdsetp) \
49 asm volatile("btsl %1,%0": \
50 "+m" (*(__kernel_fd_set *)(fdsetp)) \
51 : "r" ((int)(fd)))
52
53#undef __FD_CLR
54#define __FD_CLR(fd,fdsetp) \
55 asm volatile("btrl %1,%0": \
56 "+m" (*(__kernel_fd_set *)(fdsetp)) \
57 : "r" ((int) (fd)))
58
59#undef __FD_ISSET
60#define __FD_ISSET(fd,fdsetp) \
61 (__extension__ \
62 ({ \
63 unsigned char __result; \
64 asm volatile("btl %1,%2 ; setb %0" \
65 : "=q" (__result) \
66 : "r" ((int)(fd)), \
67 "m" (*(__kernel_fd_set *)(fdsetp))); \
68 __result; \
69}))
70
71#undef __FD_ZERO
72#define __FD_ZERO(fdsetp) \
73do { \
74 int __d0, __d1; \
75 asm volatile("cld ; rep ; stosl" \
76 : "=m" (*(__kernel_fd_set *)(fdsetp)), \
77 "=&c" (__d0), "=&D" (__d1) \
78 : "a" (0), "1" (__FDSET_LONGS), \
79 "2" ((__kernel_fd_set *)(fdsetp)) \
80 : "memory"); \
81} while (0)
82
83#endif /* defined(__KERNEL__) */
84 27
85#endif /* _ASM_X86_POSIX_TYPES_32_H */ 28#endif /* _ASM_X86_POSIX_TYPES_32_H */
diff --git a/arch/x86/include/asm/posix_types_64.h b/arch/x86/include/asm/posix_types_64.h
index eb8d2d92b63..cba0c1ead16 100644
--- a/arch/x86/include/asm/posix_types_64.h
+++ b/arch/x86/include/asm/posix_types_64.h
@@ -7,113 +7,13 @@
7 * assume GCC is being used. 7 * assume GCC is being used.
8 */ 8 */
9 9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned int __kernel_mode_t;
12typedef unsigned long __kernel_nlink_t;
13typedef long __kernel_off_t;
14typedef int __kernel_pid_t;
15typedef int __kernel_ipc_pid_t;
16typedef unsigned int __kernel_uid_t;
17typedef unsigned int __kernel_gid_t;
18typedef unsigned long __kernel_size_t;
19typedef long __kernel_ssize_t;
20typedef long __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30
31#ifdef __GNUC__
32typedef long long __kernel_loff_t;
33#endif
34
35typedef struct {
36 int val[2];
37} __kernel_fsid_t;
38
39typedef unsigned short __kernel_old_uid_t; 10typedef unsigned short __kernel_old_uid_t;
40typedef unsigned short __kernel_old_gid_t; 11typedef unsigned short __kernel_old_gid_t;
41typedef __kernel_uid_t __kernel_uid32_t; 12#define __kernel_old_uid_t __kernel_old_uid_t
42typedef __kernel_gid_t __kernel_gid32_t;
43 13
44typedef unsigned long __kernel_old_dev_t; 14typedef unsigned long __kernel_old_dev_t;
15#define __kernel_old_dev_t __kernel_old_dev_t
45 16
46#ifdef __KERNEL__ 17#include <asm-generic/posix_types.h>
47
48#undef __FD_SET
49static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
50{
51 unsigned long _tmp = fd / __NFDBITS;
52 unsigned long _rem = fd % __NFDBITS;
53 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
54}
55
56#undef __FD_CLR
57static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
58{
59 unsigned long _tmp = fd / __NFDBITS;
60 unsigned long _rem = fd % __NFDBITS;
61 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
62}
63
64#undef __FD_ISSET
65static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
66{
67 unsigned long _tmp = fd / __NFDBITS;
68 unsigned long _rem = fd % __NFDBITS;
69 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
70}
71
72/*
73 * This will unroll the loop for the normal constant cases (8 or 32 longs,
74 * for 256 and 1024-bit fd_sets respectively)
75 */
76#undef __FD_ZERO
77static inline void __FD_ZERO(__kernel_fd_set *p)
78{
79 unsigned long *tmp = p->fds_bits;
80 int i;
81
82 if (__builtin_constant_p(__FDSET_LONGS)) {
83 switch (__FDSET_LONGS) {
84 case 32:
85 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
86 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
87 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
88 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
89 tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
90 tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
91 tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
92 tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
93 return;
94 case 16:
95 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
96 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
97 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
98 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
99 return;
100 case 8:
101 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
102 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
103 return;
104 case 4:
105 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
106 return;
107 }
108 }
109 i = __FDSET_LONGS;
110 while (i) {
111 i--;
112 *tmp = 0;
113 tmp++;
114 }
115}
116
117#endif /* defined(__KERNEL__) */
118 18
119#endif /* _ASM_X86_POSIX_TYPES_64_H */ 19#endif /* _ASM_X86_POSIX_TYPES_64_H */
diff --git a/arch/x86/include/asm/posix_types_x32.h b/arch/x86/include/asm/posix_types_x32.h
new file mode 100644
index 00000000000..85f9bdafa93
--- /dev/null
+++ b/arch/x86/include/asm/posix_types_x32.h
@@ -0,0 +1,19 @@
1#ifndef _ASM_X86_POSIX_TYPES_X32_H
2#define _ASM_X86_POSIX_TYPES_X32_H
3
4/*
5 * This file is only used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 *
9 * These types should generally match the ones used by the 64-bit kernel,
10 *
11 */
12
13typedef long long __kernel_long_t;
14typedef unsigned long long __kernel_ulong_t;
15#define __kernel_long_t __kernel_long_t
16
17#include <asm/posix_types_64.h>
18
19#endif /* _ASM_X86_POSIX_TYPES_X32_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index aa9088c2693..f6d0d2eb083 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -374,6 +374,8 @@ union thread_xstate {
374}; 374};
375 375
376struct fpu { 376struct fpu {
377 unsigned int last_cpu;
378 unsigned int has_fpu;
377 union thread_xstate *state; 379 union thread_xstate *state;
378}; 380};
379 381
@@ -451,7 +453,7 @@ struct thread_struct {
451 unsigned long ptrace_dr7; 453 unsigned long ptrace_dr7;
452 /* Fault info: */ 454 /* Fault info: */
453 unsigned long cr2; 455 unsigned long cr2;
454 unsigned long trap_no; 456 unsigned long trap_nr;
455 unsigned long error_code; 457 unsigned long error_code;
456 /* floating point and extended processor state */ 458 /* floating point and extended processor state */
457 struct fpu fpu; 459 struct fpu fpu;
@@ -924,9 +926,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
924#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ 926#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
925 0xc0000000 : 0xFFFFe000) 927 0xc0000000 : 0xFFFFe000)
926 928
927#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ 929#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
928 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 930 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
929#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ 931#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
930 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 932 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
931 933
932#define STACK_TOP TASK_SIZE 934#define STACK_TOP TASK_SIZE
@@ -948,6 +950,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
948 950
949#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) 951#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
950extern unsigned long KSTK_ESP(struct task_struct *task); 952extern unsigned long KSTK_ESP(struct task_struct *task);
953
954/*
955 * User space RSP while inside the SYSCALL fast path
956 */
957DECLARE_PER_CPU(unsigned long, old_rsp);
958
951#endif /* CONFIG_X86_64 */ 959#endif /* CONFIG_X86_64 */
952 960
953extern void start_thread(struct pt_regs *regs, unsigned long new_ip, 961extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 35664547125..dcfde52979c 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -145,7 +145,6 @@ extern unsigned long
145convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); 145convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
146extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 146extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
147 int error_code, int si_code); 147 int error_code, int si_code);
148void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
149 148
150extern long syscall_trace_enter(struct pt_regs *); 149extern long syscall_trace_enter(struct pt_regs *);
151extern void syscall_trace_leave(struct pt_regs *); 150extern void syscall_trace_leave(struct pt_regs *);
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 04459d25e66..4a085383af2 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -230,34 +230,37 @@ struct sigcontext {
230 * User-space might still rely on the old definition: 230 * User-space might still rely on the old definition:
231 */ 231 */
232struct sigcontext { 232struct sigcontext {
233 unsigned long r8; 233 __u64 r8;
234 unsigned long r9; 234 __u64 r9;
235 unsigned long r10; 235 __u64 r10;
236 unsigned long r11; 236 __u64 r11;
237 unsigned long r12; 237 __u64 r12;
238 unsigned long r13; 238 __u64 r13;
239 unsigned long r14; 239 __u64 r14;
240 unsigned long r15; 240 __u64 r15;
241 unsigned long rdi; 241 __u64 rdi;
242 unsigned long rsi; 242 __u64 rsi;
243 unsigned long rbp; 243 __u64 rbp;
244 unsigned long rbx; 244 __u64 rbx;
245 unsigned long rdx; 245 __u64 rdx;
246 unsigned long rax; 246 __u64 rax;
247 unsigned long rcx; 247 __u64 rcx;
248 unsigned long rsp; 248 __u64 rsp;
249 unsigned long rip; 249 __u64 rip;
250 unsigned long eflags; /* RFLAGS */ 250 __u64 eflags; /* RFLAGS */
251 unsigned short cs; 251 __u16 cs;
252 unsigned short gs; 252 __u16 gs;
253 unsigned short fs; 253 __u16 fs;
254 unsigned short __pad0; 254 __u16 __pad0;
255 unsigned long err; 255 __u64 err;
256 unsigned long trapno; 256 __u64 trapno;
257 unsigned long oldmask; 257 __u64 oldmask;
258 unsigned long cr2; 258 __u64 cr2;
259 struct _fpstate __user *fpstate; /* zero when no FPU context */ 259 struct _fpstate __user *fpstate; /* zero when no FPU context */
260 unsigned long reserved1[8]; 260#ifndef __LP64__
261 __u32 __fpstate_pad;
262#endif
263 __u64 reserved1[8];
261}; 264};
262#endif /* !__KERNEL__ */ 265#endif /* !__KERNEL__ */
263 266
diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h
index 4e0fe26d27d..7c7c27c97da 100644
--- a/arch/x86/include/asm/sigframe.h
+++ b/arch/x86/include/asm/sigframe.h
@@ -59,12 +59,25 @@ struct rt_sigframe_ia32 {
59#endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */ 59#endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */
60 60
61#ifdef CONFIG_X86_64 61#ifdef CONFIG_X86_64
62
62struct rt_sigframe { 63struct rt_sigframe {
63 char __user *pretcode; 64 char __user *pretcode;
64 struct ucontext uc; 65 struct ucontext uc;
65 struct siginfo info; 66 struct siginfo info;
66 /* fp state follows here */ 67 /* fp state follows here */
67}; 68};
69
70#ifdef CONFIG_X86_X32_ABI
71
72struct rt_sigframe_x32 {
73 u64 pretcode;
74 struct ucontext_x32 uc;
75 compat_siginfo_t info;
76 /* fp state follows here */
77};
78
79#endif /* CONFIG_X86_X32_ABI */
80
68#endif /* CONFIG_X86_64 */ 81#endif /* CONFIG_X86_64 */
69 82
70#endif /* _ASM_X86_SIGFRAME_H */ 83#endif /* _ASM_X86_SIGFRAME_H */
diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h
new file mode 100644
index 00000000000..ada93b3b8c6
--- /dev/null
+++ b/arch/x86/include/asm/sighandling.h
@@ -0,0 +1,24 @@
1#ifndef _ASM_X86_SIGHANDLING_H
2#define _ASM_X86_SIGHANDLING_H
3
4#include <linux/compiler.h>
5#include <linux/ptrace.h>
6#include <linux/signal.h>
7
8#include <asm/processor-flags.h>
9
10#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
11
12#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
13 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
14 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
15 X86_EFLAGS_CF)
16
17void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
18
19int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
20 unsigned long *pax);
21int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
22 struct pt_regs *regs, unsigned long mask);
23
24#endif /* _ASM_X86_SIGHANDLING_H */
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index cb238526a9f..3fda9db4881 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -10,6 +10,8 @@
10#ifndef _ASM_X86_SYS_IA32_H 10#ifndef _ASM_X86_SYS_IA32_H
11#define _ASM_X86_SYS_IA32_H 11#define _ASM_X86_SYS_IA32_H
12 12
13#ifdef CONFIG_COMPAT
14
13#include <linux/compiler.h> 15#include <linux/compiler.h>
14#include <linux/linkage.h> 16#include <linux/linkage.h>
15#include <linux/types.h> 17#include <linux/types.h>
@@ -36,8 +38,6 @@ asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *,
36 struct sigaction32 __user *, unsigned int); 38 struct sigaction32 __user *, unsigned int);
37asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *, 39asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
38 struct old_sigaction32 __user *); 40 struct old_sigaction32 __user *);
39asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
40 compat_sigset_t __user *, unsigned int);
41asmlinkage long sys32_alarm(unsigned int); 41asmlinkage long sys32_alarm(unsigned int);
42 42
43asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); 43asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
@@ -83,4 +83,7 @@ asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32);
83 83
84asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int, 84asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
85 const char __user *); 85 const char __user *);
86
87#endif /* CONFIG_COMPAT */
88
86#endif /* _ASM_X86_SYS_IA32_H */ 89#endif /* _ASM_X86_SYS_IA32_H */
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index d962e5652a7..386b78686c4 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -16,6 +16,7 @@
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <asm/asm-offsets.h> /* For NR_syscalls */ 18#include <asm/asm-offsets.h> /* For NR_syscalls */
19#include <asm/unistd.h>
19 20
20extern const unsigned long sys_call_table[]; 21extern const unsigned long sys_call_table[];
21 22
@@ -26,13 +27,13 @@ extern const unsigned long sys_call_table[];
26 */ 27 */
27static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) 28static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
28{ 29{
29 return regs->orig_ax; 30 return regs->orig_ax & __SYSCALL_MASK;
30} 31}
31 32
32static inline void syscall_rollback(struct task_struct *task, 33static inline void syscall_rollback(struct task_struct *task,
33 struct pt_regs *regs) 34 struct pt_regs *regs)
34{ 35{
35 regs->ax = regs->orig_ax; 36 regs->ax = regs->orig_ax & __SYSCALL_MASK;
36} 37}
37 38
38static inline long syscall_get_error(struct task_struct *task, 39static inline long syscall_get_error(struct task_struct *task,
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index bc817cd8b44..ad6df8ccd71 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -86,7 +86,7 @@ struct thread_info {
86#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ 86#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
87#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ 87#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
88#define TIF_NOTSC 16 /* TSC is not accessible in userland */ 88#define TIF_NOTSC 16 /* TSC is not accessible in userland */
89#define TIF_IA32 17 /* 32bit process */ 89#define TIF_IA32 17 /* IA32 compatibility process */
90#define TIF_FORK 18 /* ret_from_fork */ 90#define TIF_FORK 18 /* ret_from_fork */
91#define TIF_MEMDIE 20 /* is terminating due to OOM killer */ 91#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
92#define TIF_DEBUG 21 /* uses debug registers */ 92#define TIF_DEBUG 21 /* uses debug registers */
@@ -95,6 +95,8 @@ struct thread_info {
95#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ 95#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
96#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ 96#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
97#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ 97#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
98#define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
99#define TIF_X32 30 /* 32-bit native x86-64 binary */
98 100
99#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 101#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
100#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 102#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -116,6 +118,8 @@ struct thread_info {
116#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) 118#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
117#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) 119#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
118#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) 120#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
121#define _TIF_ADDR32 (1 << TIF_ADDR32)
122#define _TIF_X32 (1 << TIF_X32)
119 123
120/* work to do in syscall_trace_enter() */ 124/* work to do in syscall_trace_enter() */
121#define _TIF_WORK_SYSCALL_ENTRY \ 125#define _TIF_WORK_SYSCALL_ENTRY \
@@ -247,8 +251,6 @@ static inline struct thread_info *current_thread_info(void)
247 * ever touches our thread-synchronous status, so we don't 251 * ever touches our thread-synchronous status, so we don't
248 * have to worry about atomic accesses. 252 * have to worry about atomic accesses.
249 */ 253 */
250#define TS_USEDFPU 0x0001 /* FPU was used by this task
251 this quantum (SMP) */
252#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ 254#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
253#define TS_POLLING 0x0004 /* idle task polling need_resched, 255#define TS_POLLING 0x0004 /* idle task polling need_resched,
254 skip sending interrupt */ 256 skip sending interrupt */
@@ -264,6 +266,18 @@ static inline void set_restore_sigmask(void)
264 ti->status |= TS_RESTORE_SIGMASK; 266 ti->status |= TS_RESTORE_SIGMASK;
265 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); 267 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
266} 268}
269
270static inline bool is_ia32_task(void)
271{
272#ifdef CONFIG_X86_32
273 return true;
274#endif
275#ifdef CONFIG_IA32_EMULATION
276 if (current_thread_info()->status & TS_COMPAT)
277 return true;
278#endif
279 return false;
280}
267#endif /* !__ASSEMBLY__ */ 281#endif /* !__ASSEMBLY__ */
268 282
269#ifndef __ASSEMBLY__ 283#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 0012d0902c5..88eae2aec61 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -89,4 +89,29 @@ asmlinkage void smp_thermal_interrupt(void);
89asmlinkage void mce_threshold_interrupt(void); 89asmlinkage void mce_threshold_interrupt(void);
90#endif 90#endif
91 91
92/* Interrupts/Exceptions */
93enum {
94 X86_TRAP_DE = 0, /* 0, Divide-by-zero */
95 X86_TRAP_DB, /* 1, Debug */
96 X86_TRAP_NMI, /* 2, Non-maskable Interrupt */
97 X86_TRAP_BP, /* 3, Breakpoint */
98 X86_TRAP_OF, /* 4, Overflow */
99 X86_TRAP_BR, /* 5, Bound Range Exceeded */
100 X86_TRAP_UD, /* 6, Invalid Opcode */
101 X86_TRAP_NM, /* 7, Device Not Available */
102 X86_TRAP_DF, /* 8, Double Fault */
103 X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */
104 X86_TRAP_TS, /* 10, Invalid TSS */
105 X86_TRAP_NP, /* 11, Segment Not Present */
106 X86_TRAP_SS, /* 12, Stack Segment Fault */
107 X86_TRAP_GP, /* 13, General Protection Fault */
108 X86_TRAP_PF, /* 14, Page Fault */
109 X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */
110 X86_TRAP_MF, /* 16, x87 Floating-Point Exception */
111 X86_TRAP_AC, /* 17, Alignment Check */
112 X86_TRAP_MC, /* 18, Machine Check */
113 X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */
114 X86_TRAP_IRET = 32, /* 32, IRET Exception */
115};
116
92#endif /* _ASM_X86_TRAPS_H */ 117#endif /* _ASM_X86_TRAPS_H */
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 21f77b89e47..37cdc9d99bb 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -1,7 +1,17 @@
1#ifndef _ASM_X86_UNISTD_H 1#ifndef _ASM_X86_UNISTD_H
2#define _ASM_X86_UNISTD_H 1 2#define _ASM_X86_UNISTD_H 1
3 3
4/* x32 syscall flag bit */
5#define __X32_SYSCALL_BIT 0x40000000
6
4#ifdef __KERNEL__ 7#ifdef __KERNEL__
8
9# ifdef CONFIG_X86_X32_ABI
10# define __SYSCALL_MASK (~(__X32_SYSCALL_BIT))
11# else
12# define __SYSCALL_MASK (~0)
13# endif
14
5# ifdef CONFIG_X86_32 15# ifdef CONFIG_X86_32
6 16
7# include <asm/unistd_32.h> 17# include <asm/unistd_32.h>
@@ -14,6 +24,7 @@
14# else 24# else
15 25
16# include <asm/unistd_64.h> 26# include <asm/unistd_64.h>
27# include <asm/unistd_64_x32.h>
17# define __ARCH_WANT_COMPAT_SYS_TIME 28# define __ARCH_WANT_COMPAT_SYS_TIME
18 29
19# endif 30# endif
@@ -52,8 +63,10 @@
52#else 63#else
53# ifdef __i386__ 64# ifdef __i386__
54# include <asm/unistd_32.h> 65# include <asm/unistd_32.h>
55# else 66# elif defined(__LP64__)
56# include <asm/unistd_64.h> 67# include <asm/unistd_64.h>
68# else
69# include <asm/unistd_x32.h>
57# endif 70# endif
58#endif 71#endif
59 72
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 834e897b1e2..1b4754f82ba 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -1,6 +1,12 @@
1#include <asm/ia32.h> 1#include <asm/ia32.h>
2 2
3#define __SYSCALL_64(nr, sym, compat) [nr] = 1, 3#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
4#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
5#ifdef CONFIG_X86_X32_ABI
6# define __SYSCALL_X32(nr, sym, compat) [nr] = 1,
7#else
8# define __SYSCALL_X32(nr, sym, compat) /* nothing */
9#endif
4static char syscalls_64[] = { 10static char syscalls_64[] = {
5#include <asm/syscalls_64.h> 11#include <asm/syscalls_64.h>
6}; 12};
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d43cad74f16..c0f7d68d318 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1044,6 +1044,9 @@ DEFINE_PER_CPU(char *, irq_stack_ptr) =
1044 1044
1045DEFINE_PER_CPU(unsigned int, irq_count) = -1; 1045DEFINE_PER_CPU(unsigned int, irq_count) = -1;
1046 1046
1047DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
1048EXPORT_PER_CPU_SYMBOL(fpu_owner_task);
1049
1047/* 1050/*
1048 * Special IST stacks which the CPU switches to when it calls 1051 * Special IST stacks which the CPU switches to when it calls
1049 * an IST-marked descriptor entry. Up to 7 stacks (hardware 1052 * an IST-marked descriptor entry. Up to 7 stacks (hardware
@@ -1111,6 +1114,8 @@ void debug_stack_reset(void)
1111 1114
1112DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 1115DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1113EXPORT_PER_CPU_SYMBOL(current_task); 1116EXPORT_PER_CPU_SYMBOL(current_task);
1117DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
1118EXPORT_PER_CPU_SYMBOL(fpu_owner_task);
1114 1119
1115#ifdef CONFIG_CC_STACKPROTECTOR 1120#ifdef CONFIG_CC_STACKPROTECTOR
1116DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 1121DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 6b45e5e7a90..73d08ed98a6 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -326,8 +326,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
326 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; 326 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
327} 327}
328 328
329static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, 329static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
330 int index)
331{ 330{
332 int node; 331 int node;
333 332
@@ -725,14 +724,16 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
725#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) 724#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
726 725
727#ifdef CONFIG_SMP 726#ifdef CONFIG_SMP
728static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 727
728static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
729{ 729{
730 struct _cpuid4_info *this_leaf, *sibling_leaf; 730 struct _cpuid4_info *this_leaf;
731 unsigned long num_threads_sharing; 731 int ret, i, sibling;
732 int index_msb, i, sibling;
733 struct cpuinfo_x86 *c = &cpu_data(cpu); 732 struct cpuinfo_x86 *c = &cpu_data(cpu);
734 733
735 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { 734 ret = 0;
735 if (index == 3) {
736 ret = 1;
736 for_each_cpu(i, cpu_llc_shared_mask(cpu)) { 737 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
737 if (!per_cpu(ici_cpuid4_info, i)) 738 if (!per_cpu(ici_cpuid4_info, i))
738 continue; 739 continue;
@@ -743,8 +744,35 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
743 set_bit(sibling, this_leaf->shared_cpu_map); 744 set_bit(sibling, this_leaf->shared_cpu_map);
744 } 745 }
745 } 746 }
746 return; 747 } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) {
748 ret = 1;
749 for_each_cpu(i, cpu_sibling_mask(cpu)) {
750 if (!per_cpu(ici_cpuid4_info, i))
751 continue;
752 this_leaf = CPUID4_INFO_IDX(i, index);
753 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
754 if (!cpu_online(sibling))
755 continue;
756 set_bit(sibling, this_leaf->shared_cpu_map);
757 }
758 }
747 } 759 }
760
761 return ret;
762}
763
764static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
765{
766 struct _cpuid4_info *this_leaf, *sibling_leaf;
767 unsigned long num_threads_sharing;
768 int index_msb, i;
769 struct cpuinfo_x86 *c = &cpu_data(cpu);
770
771 if (c->x86_vendor == X86_VENDOR_AMD) {
772 if (cache_shared_amd_cpu_map_setup(cpu, index))
773 return;
774 }
775
748 this_leaf = CPUID4_INFO_IDX(cpu, index); 776 this_leaf = CPUID4_INFO_IDX(cpu, index);
749 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; 777 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
750 778
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 786e76a8632..e4eeaaf58a4 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -528,6 +528,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
528 528
529 sprintf(name, "threshold_bank%i", bank); 529 sprintf(name, "threshold_bank%i", bank);
530 530
531#ifdef CONFIG_SMP
531 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ 532 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
532 i = cpumask_first(cpu_llc_shared_mask(cpu)); 533 i = cpumask_first(cpu_llc_shared_mask(cpu));
533 534
@@ -553,6 +554,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
553 554
554 goto out; 555 goto out;
555 } 556 }
557#endif
556 558
557 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); 559 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
558 if (!b) { 560 if (!b) {
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 79289632cb2..a041e094b8b 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -167,6 +167,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
167{ 167{
168 int err = 0; 168 int err = 0;
169 mtrr_type type; 169 mtrr_type type;
170 unsigned long base;
170 unsigned long size; 171 unsigned long size;
171 struct mtrr_sentry sentry; 172 struct mtrr_sentry sentry;
172 struct mtrr_gentry gentry; 173 struct mtrr_gentry gentry;
@@ -267,14 +268,14 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
267#endif 268#endif
268 if (gentry.regnum >= num_var_ranges) 269 if (gentry.regnum >= num_var_ranges)
269 return -EINVAL; 270 return -EINVAL;
270 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); 271 mtrr_if->get(gentry.regnum, &base, &size, &type);
271 272
272 /* Hide entries that go above 4GB */ 273 /* Hide entries that go above 4GB */
273 if (gentry.base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)) 274 if (base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))
274 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))) 275 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)))
275 gentry.base = gentry.size = gentry.type = 0; 276 gentry.base = gentry.size = gentry.type = 0;
276 else { 277 else {
277 gentry.base <<= PAGE_SHIFT; 278 gentry.base = base << PAGE_SHIFT;
278 gentry.size = size << PAGE_SHIFT; 279 gentry.size = size << PAGE_SHIFT;
279 gentry.type = type; 280 gentry.type = type;
280 } 281 }
@@ -321,11 +322,12 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
321#endif 322#endif
322 if (gentry.regnum >= num_var_ranges) 323 if (gentry.regnum >= num_var_ranges)
323 return -EINVAL; 324 return -EINVAL;
324 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); 325 mtrr_if->get(gentry.regnum, &base, &size, &type);
325 /* Hide entries that would overflow */ 326 /* Hide entries that would overflow */
326 if (size != (__typeof__(gentry.size))size) 327 if (size != (__typeof__(gentry.size))size)
327 gentry.base = gentry.size = gentry.type = 0; 328 gentry.base = gentry.size = gentry.type = 0;
328 else { 329 else {
330 gentry.base = base;
329 gentry.size = size; 331 gentry.size = size;
330 gentry.type = type; 332 gentry.type = type;
331 } 333 }
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3c44b712380..1c52bdbb9b8 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -29,7 +29,6 @@
29#include <asm/apic.h> 29#include <asm/apic.h>
30#include <asm/stacktrace.h> 30#include <asm/stacktrace.h>
31#include <asm/nmi.h> 31#include <asm/nmi.h>
32#include <asm/compat.h>
33#include <asm/smp.h> 32#include <asm/smp.h>
34#include <asm/alternative.h> 33#include <asm/alternative.h>
35#include <asm/timer.h> 34#include <asm/timer.h>
@@ -988,6 +987,9 @@ static void x86_pmu_start(struct perf_event *event, int flags)
988 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 987 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
989 int idx = event->hw.idx; 988 int idx = event->hw.idx;
990 989
990 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
991 return;
992
991 if (WARN_ON_ONCE(idx == -1)) 993 if (WARN_ON_ONCE(idx == -1))
992 return; 994 return;
993 995
@@ -1674,6 +1676,9 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1674} 1676}
1675 1677
1676#ifdef CONFIG_COMPAT 1678#ifdef CONFIG_COMPAT
1679
1680#include <asm/compat.h>
1681
1677static inline int 1682static inline int
1678perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) 1683perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1679{ 1684{
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 513d617b93c..82db83b5c3b 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -147,7 +147,9 @@ struct cpu_hw_events {
147 /* 147 /*
148 * AMD specific bits 148 * AMD specific bits
149 */ 149 */
150 struct amd_nb *amd_nb; 150 struct amd_nb *amd_nb;
151 /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
152 u64 perf_ctr_virt_mask;
151 153
152 void *kfree_on_online; 154 void *kfree_on_online;
153}; 155};
@@ -425,9 +427,11 @@ void x86_pmu_disable_all(void);
425static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, 427static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
426 u64 enable_mask) 428 u64 enable_mask)
427{ 429{
430 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
431
428 if (hwc->extra_reg.reg) 432 if (hwc->extra_reg.reg)
429 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); 433 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
430 wrmsrl(hwc->config_base, hwc->config | enable_mask); 434 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
431} 435}
432 436
433void x86_pmu_enable_all(int added); 437void x86_pmu_enable_all(int added);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 0397b23be8e..67250a52430 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -1,4 +1,5 @@
1#include <linux/perf_event.h> 1#include <linux/perf_event.h>
2#include <linux/export.h>
2#include <linux/types.h> 3#include <linux/types.h>
3#include <linux/init.h> 4#include <linux/init.h>
4#include <linux/slab.h> 5#include <linux/slab.h>
@@ -357,7 +358,9 @@ static void amd_pmu_cpu_starting(int cpu)
357 struct amd_nb *nb; 358 struct amd_nb *nb;
358 int i, nb_id; 359 int i, nb_id;
359 360
360 if (boot_cpu_data.x86_max_cores < 2) 361 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
362
363 if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
361 return; 364 return;
362 365
363 nb_id = amd_get_nb_id(cpu); 366 nb_id = amd_get_nb_id(cpu);
@@ -587,9 +590,9 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
587 .put_event_constraints = amd_put_event_constraints, 590 .put_event_constraints = amd_put_event_constraints,
588 591
589 .cpu_prepare = amd_pmu_cpu_prepare, 592 .cpu_prepare = amd_pmu_cpu_prepare,
590 .cpu_starting = amd_pmu_cpu_starting,
591 .cpu_dead = amd_pmu_cpu_dead, 593 .cpu_dead = amd_pmu_cpu_dead,
592#endif 594#endif
595 .cpu_starting = amd_pmu_cpu_starting,
593}; 596};
594 597
595__init int amd_pmu_init(void) 598__init int amd_pmu_init(void)
@@ -621,3 +624,33 @@ __init int amd_pmu_init(void)
621 624
622 return 0; 625 return 0;
623} 626}
627
628void amd_pmu_enable_virt(void)
629{
630 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
631
632 cpuc->perf_ctr_virt_mask = 0;
633
634 /* Reload all events */
635 x86_pmu_disable_all();
636 x86_pmu_enable_all(0);
637}
638EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
639
640void amd_pmu_disable_virt(void)
641{
642 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
643
644 /*
645 * We only mask out the Host-only bit so that host-only counting works
646 * when SVM is disabled. If someone sets up a guest-only counter when
647 * SVM is disabled the Guest-only bits still gets set and the counter
648 * will not count anything.
649 */
650 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
651
652 /* Reload all events */
653 x86_pmu_disable_all();
654 x86_pmu_enable_all(0);
655}
656EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 4025fe4f928..28f98706b08 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -265,7 +265,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
265#endif 265#endif
266 printk("\n"); 266 printk("\n");
267 if (notify_die(DIE_OOPS, str, regs, err, 267 if (notify_die(DIE_OOPS, str, regs, err,
268 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) 268 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
269 return 1; 269 return 1;
270 270
271 show_registers(regs); 271 show_registers(regs);
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 3fe8239fd8f..2925e14fb1d 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -482,7 +482,12 @@ GLOBAL(system_call_after_swapgs)
482 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 482 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
483 jnz tracesys 483 jnz tracesys
484system_call_fastpath: 484system_call_fastpath:
485#if __SYSCALL_MASK == ~0
485 cmpq $__NR_syscall_max,%rax 486 cmpq $__NR_syscall_max,%rax
487#else
488 andl $__SYSCALL_MASK,%eax
489 cmpl $__NR_syscall_max,%eax
490#endif
486 ja badsys 491 ja badsys
487 movq %r10,%rcx 492 movq %r10,%rcx
488 call *sys_call_table(,%rax,8) # XXX: rip relative 493 call *sys_call_table(,%rax,8) # XXX: rip relative
@@ -596,7 +601,12 @@ tracesys:
596 */ 601 */
597 LOAD_ARGS ARGOFFSET, 1 602 LOAD_ARGS ARGOFFSET, 1
598 RESTORE_REST 603 RESTORE_REST
604#if __SYSCALL_MASK == ~0
599 cmpq $__NR_syscall_max,%rax 605 cmpq $__NR_syscall_max,%rax
606#else
607 andl $__SYSCALL_MASK,%eax
608 cmpl $__NR_syscall_max,%eax
609#endif
600 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ 610 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
601 movq %r10,%rcx /* fixup for C */ 611 movq %r10,%rcx /* fixup for C */
602 call *sys_call_table(,%rax,8) 612 call *sys_call_table(,%rax,8)
@@ -736,6 +746,40 @@ ENTRY(stub_rt_sigreturn)
736 CFI_ENDPROC 746 CFI_ENDPROC
737END(stub_rt_sigreturn) 747END(stub_rt_sigreturn)
738 748
749#ifdef CONFIG_X86_X32_ABI
750 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
751
752ENTRY(stub_x32_rt_sigreturn)
753 CFI_STARTPROC
754 addq $8, %rsp
755 PARTIAL_FRAME 0
756 SAVE_REST
757 movq %rsp,%rdi
758 FIXUP_TOP_OF_STACK %r11
759 call sys32_x32_rt_sigreturn
760 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
761 RESTORE_REST
762 jmp int_ret_from_sys_call
763 CFI_ENDPROC
764END(stub_x32_rt_sigreturn)
765
766ENTRY(stub_x32_execve)
767 CFI_STARTPROC
768 addq $8, %rsp
769 PARTIAL_FRAME 0
770 SAVE_REST
771 FIXUP_TOP_OF_STACK %r11
772 movq %rsp, %rcx
773 call sys32_execve
774 RESTORE_TOP_OF_STACK %r11
775 movq %rax,RAX(%rsp)
776 RESTORE_REST
777 jmp int_ret_from_sys_call
778 CFI_ENDPROC
779END(stub_x32_execve)
780
781#endif
782
739/* 783/*
740 * Build the entry stubs and pointer table with some assembler magic. 784 * Build the entry stubs and pointer table with some assembler magic.
741 * We pack 7 stubs into a single 32-byte chunk, which will fit in a 785 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
@@ -1532,10 +1576,17 @@ ENTRY(nmi)
1532 pushq_cfi %rdx 1576 pushq_cfi %rdx
1533 1577
1534 /* 1578 /*
1579 * If %cs was not the kernel segment, then the NMI triggered in user
1580 * space, which means it is definitely not nested.
1581 */
1582 cmpl $__KERNEL_CS, 16(%rsp)
1583 jne first_nmi
1584
1585 /*
1535 * Check the special variable on the stack to see if NMIs are 1586 * Check the special variable on the stack to see if NMIs are
1536 * executing. 1587 * executing.
1537 */ 1588 */
1538 cmp $1, -8(%rsp) 1589 cmpl $1, -8(%rsp)
1539 je nested_nmi 1590 je nested_nmi
1540 1591
1541 /* 1592 /*
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 313fb5cddbc..7b77062dea1 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -61,7 +61,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
61 outb(0, 0xF0); 61 outb(0, 0xF0);
62 if (ignore_fpu_irq || !boot_cpu_data.hard_math) 62 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
63 return IRQ_NONE; 63 return IRQ_NONE;
64 math_error(get_irq_regs(), 0, 16); 64 math_error(get_irq_regs(), 0, X86_TRAP_MF);
65 return IRQ_HANDLED; 65 return IRQ_HANDLED;
66} 66}
67 67
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index ac0417be913..73465aab28f 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -360,7 +360,6 @@ out:
360static enum ucode_state 360static enum ucode_state
361request_microcode_user(int cpu, const void __user *buf, size_t size) 361request_microcode_user(int cpu, const void __user *buf, size_t size)
362{ 362{
363 pr_info("AMD microcode update via /dev/cpu/microcode not supported\n");
364 return UCODE_ERROR; 363 return UCODE_ERROR;
365} 364}
366 365
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 485204f58cd..c08d1ff12b7 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -214,6 +214,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
214 214
215 task_user_gs(p) = get_user_gs(regs); 215 task_user_gs(p) = get_user_gs(regs);
216 216
217 p->fpu_counter = 0;
217 p->thread.io_bitmap_ptr = NULL; 218 p->thread.io_bitmap_ptr = NULL;
218 tsk = current; 219 tsk = current;
219 err = -ENOMEM; 220 err = -ENOMEM;
@@ -299,22 +300,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
299 *next = &next_p->thread; 300 *next = &next_p->thread;
300 int cpu = smp_processor_id(); 301 int cpu = smp_processor_id();
301 struct tss_struct *tss = &per_cpu(init_tss, cpu); 302 struct tss_struct *tss = &per_cpu(init_tss, cpu);
302 bool preload_fpu; 303 fpu_switch_t fpu;
303 304
304 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ 305 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
305 306
306 /* 307 fpu = switch_fpu_prepare(prev_p, next_p, cpu);
307 * If the task has used fpu the last 5 timeslices, just do a full
308 * restore of the math state immediately to avoid the trap; the
309 * chances of needing FPU soon are obviously high now
310 */
311 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
312
313 __unlazy_fpu(prev_p);
314
315 /* we're going to use this soon, after a few expensive things */
316 if (preload_fpu)
317 prefetch(next->fpu.state);
318 308
319 /* 309 /*
320 * Reload esp0. 310 * Reload esp0.
@@ -354,11 +344,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
354 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) 344 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
355 __switch_to_xtra(prev_p, next_p, tss); 345 __switch_to_xtra(prev_p, next_p, tss);
356 346
357 /* If we're going to preload the fpu context, make sure clts
358 is run while we're batching the cpu state updates. */
359 if (preload_fpu)
360 clts();
361
362 /* 347 /*
363 * Leave lazy mode, flushing any hypercalls made here. 348 * Leave lazy mode, flushing any hypercalls made here.
364 * This must be done before restoring TLS segments so 349 * This must be done before restoring TLS segments so
@@ -368,15 +353,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
368 */ 353 */
369 arch_end_context_switch(next_p); 354 arch_end_context_switch(next_p);
370 355
371 if (preload_fpu)
372 __math_state_restore();
373
374 /* 356 /*
375 * Restore %gs if needed (which is common) 357 * Restore %gs if needed (which is common)
376 */ 358 */
377 if (prev->gs | next->gs) 359 if (prev->gs | next->gs)
378 lazy_load_gs(next->gs); 360 lazy_load_gs(next->gs);
379 361
362 switch_fpu_finish(next_p, fpu);
363
380 percpu_write(current_task, next_p); 364 percpu_write(current_task, next_p);
381 365
382 return prev_p; 366 return prev_p;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9b9fe4a85c8..550e77b1b94 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -286,6 +286,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
286 286
287 set_tsk_thread_flag(p, TIF_FORK); 287 set_tsk_thread_flag(p, TIF_FORK);
288 288
289 p->fpu_counter = 0;
289 p->thread.io_bitmap_ptr = NULL; 290 p->thread.io_bitmap_ptr = NULL;
290 291
291 savesegment(gs, p->thread.gsindex); 292 savesegment(gs, p->thread.gsindex);
@@ -364,7 +365,9 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
364void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp) 365void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
365{ 366{
366 start_thread_common(regs, new_ip, new_sp, 367 start_thread_common(regs, new_ip, new_sp,
367 __USER32_CS, __USER32_DS, __USER32_DS); 368 test_thread_flag(TIF_X32)
369 ? __USER_CS : __USER32_CS,
370 __USER_DS, __USER_DS);
368} 371}
369#endif 372#endif
370 373
@@ -386,18 +389,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
386 int cpu = smp_processor_id(); 389 int cpu = smp_processor_id();
387 struct tss_struct *tss = &per_cpu(init_tss, cpu); 390 struct tss_struct *tss = &per_cpu(init_tss, cpu);
388 unsigned fsindex, gsindex; 391 unsigned fsindex, gsindex;
389 bool preload_fpu; 392 fpu_switch_t fpu;
390 393
391 /* 394 fpu = switch_fpu_prepare(prev_p, next_p, cpu);
392 * If the task has used fpu the last 5 timeslices, just do a full
393 * restore of the math state immediately to avoid the trap; the
394 * chances of needing FPU soon are obviously high now
395 */
396 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
397
398 /* we're going to use this soon, after a few expensive things */
399 if (preload_fpu)
400 prefetch(next->fpu.state);
401 395
402 /* 396 /*
403 * Reload esp0, LDT and the page table pointer: 397 * Reload esp0, LDT and the page table pointer:
@@ -427,13 +421,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
427 421
428 load_TLS(next, cpu); 422 load_TLS(next, cpu);
429 423
430 /* Must be after DS reload */
431 __unlazy_fpu(prev_p);
432
433 /* Make sure cpu is ready for new context */
434 if (preload_fpu)
435 clts();
436
437 /* 424 /*
438 * Leave lazy mode, flushing any hypercalls made here. 425 * Leave lazy mode, flushing any hypercalls made here.
439 * This must be done before restoring TLS segments so 426 * This must be done before restoring TLS segments so
@@ -474,6 +461,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
474 wrmsrl(MSR_KERNEL_GS_BASE, next->gs); 461 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
475 prev->gsindex = gsindex; 462 prev->gsindex = gsindex;
476 463
464 switch_fpu_finish(next_p, fpu);
465
477 /* 466 /*
478 * Switch the PDA and FPU contexts. 467 * Switch the PDA and FPU contexts.
479 */ 468 */
@@ -492,13 +481,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
492 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) 481 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
493 __switch_to_xtra(prev_p, next_p, tss); 482 __switch_to_xtra(prev_p, next_p, tss);
494 483
495 /*
496 * Preload the FPU context, now that we've determined that the
497 * task is likely to be using it.
498 */
499 if (preload_fpu)
500 __math_state_restore();
501
502 return prev_p; 484 return prev_p;
503} 485}
504 486
@@ -508,6 +490,8 @@ void set_personality_64bit(void)
508 490
509 /* Make sure to be in 64bit mode */ 491 /* Make sure to be in 64bit mode */
510 clear_thread_flag(TIF_IA32); 492 clear_thread_flag(TIF_IA32);
493 clear_thread_flag(TIF_ADDR32);
494 clear_thread_flag(TIF_X32);
511 495
512 /* Ensure the corresponding mm is not marked. */ 496 /* Ensure the corresponding mm is not marked. */
513 if (current->mm) 497 if (current->mm)
@@ -520,20 +504,31 @@ void set_personality_64bit(void)
520 current->personality &= ~READ_IMPLIES_EXEC; 504 current->personality &= ~READ_IMPLIES_EXEC;
521} 505}
522 506
523void set_personality_ia32(void) 507void set_personality_ia32(bool x32)
524{ 508{
525 /* inherit personality from parent */ 509 /* inherit personality from parent */
526 510
527 /* Make sure to be in 32bit mode */ 511 /* Make sure to be in 32bit mode */
528 set_thread_flag(TIF_IA32); 512 set_thread_flag(TIF_ADDR32);
529 current->personality |= force_personality32;
530 513
531 /* Mark the associated mm as containing 32-bit tasks. */ 514 /* Mark the associated mm as containing 32-bit tasks. */
532 if (current->mm) 515 if (current->mm)
533 current->mm->context.ia32_compat = 1; 516 current->mm->context.ia32_compat = 1;
534 517
535 /* Prepare the first "return" to user space */ 518 if (x32) {
536 current_thread_info()->status |= TS_COMPAT; 519 clear_thread_flag(TIF_IA32);
520 set_thread_flag(TIF_X32);
521 current->personality &= ~READ_IMPLIES_EXEC;
522 /* is_compat_task() uses the presence of the x32
523 syscall bit flag to determine compat status */
524 current_thread_info()->status &= ~TS_COMPAT;
525 } else {
526 set_thread_flag(TIF_IA32);
527 clear_thread_flag(TIF_X32);
528 current->personality |= force_personality32;
529 /* Prepare the first "return" to user space */
530 current_thread_info()->status |= TS_COMPAT;
531 }
537} 532}
538 533
539unsigned long get_wchan(struct task_struct *p) 534unsigned long get_wchan(struct task_struct *p)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 50267386b76..6fb330adc7c 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -33,6 +33,7 @@
33#include <asm/prctl.h> 33#include <asm/prctl.h>
34#include <asm/proto.h> 34#include <asm/proto.h>
35#include <asm/hw_breakpoint.h> 35#include <asm/hw_breakpoint.h>
36#include <asm/traps.h>
36 37
37#include "tls.h" 38#include "tls.h"
38 39
@@ -1130,6 +1131,100 @@ static int genregs32_set(struct task_struct *target,
1130 return ret; 1131 return ret;
1131} 1132}
1132 1133
1134#ifdef CONFIG_X86_X32_ABI
1135static long x32_arch_ptrace(struct task_struct *child,
1136 compat_long_t request, compat_ulong_t caddr,
1137 compat_ulong_t cdata)
1138{
1139 unsigned long addr = caddr;
1140 unsigned long data = cdata;
1141 void __user *datap = compat_ptr(data);
1142 int ret;
1143
1144 switch (request) {
1145 /* Read 32bits at location addr in the USER area. Only allow
1146 to return the lower 32bits of segment and debug registers. */
1147 case PTRACE_PEEKUSR: {
1148 u32 tmp;
1149
1150 ret = -EIO;
1151 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1152 addr < offsetof(struct user_regs_struct, cs))
1153 break;
1154
1155 tmp = 0; /* Default return condition */
1156 if (addr < sizeof(struct user_regs_struct))
1157 tmp = getreg(child, addr);
1158 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1159 addr <= offsetof(struct user, u_debugreg[7])) {
1160 addr -= offsetof(struct user, u_debugreg[0]);
1161 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1162 }
1163 ret = put_user(tmp, (__u32 __user *)datap);
1164 break;
1165 }
1166
1167 /* Write the word at location addr in the USER area. Only allow
1168 to update segment and debug registers with the upper 32bits
1169 zero-extended. */
1170 case PTRACE_POKEUSR:
1171 ret = -EIO;
1172 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1173 addr < offsetof(struct user_regs_struct, cs))
1174 break;
1175
1176 if (addr < sizeof(struct user_regs_struct))
1177 ret = putreg(child, addr, data);
1178 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1179 addr <= offsetof(struct user, u_debugreg[7])) {
1180 addr -= offsetof(struct user, u_debugreg[0]);
1181 ret = ptrace_set_debugreg(child,
1182 addr / sizeof(data), data);
1183 }
1184 break;
1185
1186 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1187 return copy_regset_to_user(child,
1188 task_user_regset_view(current),
1189 REGSET_GENERAL,
1190 0, sizeof(struct user_regs_struct),
1191 datap);
1192
1193 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1194 return copy_regset_from_user(child,
1195 task_user_regset_view(current),
1196 REGSET_GENERAL,
1197 0, sizeof(struct user_regs_struct),
1198 datap);
1199
1200 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1201 return copy_regset_to_user(child,
1202 task_user_regset_view(current),
1203 REGSET_FP,
1204 0, sizeof(struct user_i387_struct),
1205 datap);
1206
1207 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1208 return copy_regset_from_user(child,
1209 task_user_regset_view(current),
1210 REGSET_FP,
1211 0, sizeof(struct user_i387_struct),
1212 datap);
1213
1214 /* normal 64bit interface to access TLS data.
1215 Works just like arch_prctl, except that the arguments
1216 are reversed. */
1217 case PTRACE_ARCH_PRCTL:
1218 return do_arch_prctl(child, data, addr);
1219
1220 default:
1221 return compat_ptrace_request(child, request, addr, data);
1222 }
1223
1224 return ret;
1225}
1226#endif
1227
1133long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1228long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1134 compat_ulong_t caddr, compat_ulong_t cdata) 1229 compat_ulong_t caddr, compat_ulong_t cdata)
1135{ 1230{
@@ -1139,6 +1234,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1139 int ret; 1234 int ret;
1140 __u32 val; 1235 __u32 val;
1141 1236
1237#ifdef CONFIG_X86_X32_ABI
1238 if (!is_ia32_task())
1239 return x32_arch_ptrace(child, request, caddr, cdata);
1240#endif
1241
1142 switch (request) { 1242 switch (request) {
1143 case PTRACE_PEEKUSR: 1243 case PTRACE_PEEKUSR:
1144 ret = getreg32(child, addr, &val); 1244 ret = getreg32(child, addr, &val);
@@ -1326,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
1326 int error_code, int si_code, 1426 int error_code, int si_code,
1327 struct siginfo *info) 1427 struct siginfo *info)
1328{ 1428{
1329 tsk->thread.trap_no = 1; 1429 tsk->thread.trap_nr = X86_TRAP_DB;
1330 tsk->thread.error_code = error_code; 1430 tsk->thread.error_code = error_code;
1331 1431
1332 memset(info, 0, sizeof(*info)); 1432 memset(info, 0, sizeof(*info));
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 46a01bdc27e..9c73acc1c86 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -10,10 +10,8 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/signal.h>
14#include <linux/errno.h> 13#include <linux/errno.h>
15#include <linux/wait.h> 14#include <linux/wait.h>
16#include <linux/ptrace.h>
17#include <linux/tracehook.h> 15#include <linux/tracehook.h>
18#include <linux/unistd.h> 16#include <linux/unistd.h>
19#include <linux/stddef.h> 17#include <linux/stddef.h>
@@ -26,10 +24,12 @@
26#include <asm/i387.h> 24#include <asm/i387.h>
27#include <asm/vdso.h> 25#include <asm/vdso.h>
28#include <asm/mce.h> 26#include <asm/mce.h>
27#include <asm/sighandling.h>
29 28
30#ifdef CONFIG_X86_64 29#ifdef CONFIG_X86_64
31#include <asm/proto.h> 30#include <asm/proto.h>
32#include <asm/ia32_unistd.h> 31#include <asm/ia32_unistd.h>
32#include <asm/sys_ia32.h>
33#endif /* CONFIG_X86_64 */ 33#endif /* CONFIG_X86_64 */
34 34
35#include <asm/syscall.h> 35#include <asm/syscall.h>
@@ -37,13 +37,6 @@
37 37
38#include <asm/sigframe.h> 38#include <asm/sigframe.h>
39 39
40#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
41
42#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
43 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
44 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
45 X86_EFLAGS_CF)
46
47#ifdef CONFIG_X86_32 40#ifdef CONFIG_X86_32
48# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) 41# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF)
49#else 42#else
@@ -68,9 +61,8 @@
68 regs->seg = GET_SEG(seg) | 3; \ 61 regs->seg = GET_SEG(seg) | 3; \
69} while (0) 62} while (0)
70 63
71static int 64int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
72restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, 65 unsigned long *pax)
73 unsigned long *pax)
74{ 66{
75 void __user *buf; 67 void __user *buf;
76 unsigned int tmpflags; 68 unsigned int tmpflags;
@@ -125,9 +117,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
125 return err; 117 return err;
126} 118}
127 119
128static int 120int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
129setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, 121 struct pt_regs *regs, unsigned long mask)
130 struct pt_regs *regs, unsigned long mask)
131{ 122{
132 int err = 0; 123 int err = 0;
133 124
@@ -159,7 +150,7 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
159 put_user_ex(regs->r15, &sc->r15); 150 put_user_ex(regs->r15, &sc->r15);
160#endif /* CONFIG_X86_64 */ 151#endif /* CONFIG_X86_64 */
161 152
162 put_user_ex(current->thread.trap_no, &sc->trapno); 153 put_user_ex(current->thread.trap_nr, &sc->trapno);
163 put_user_ex(current->thread.error_code, &sc->err); 154 put_user_ex(current->thread.error_code, &sc->err);
164 put_user_ex(regs->ip, &sc->ip); 155 put_user_ex(regs->ip, &sc->ip);
165#ifdef CONFIG_X86_32 156#ifdef CONFIG_X86_32
@@ -642,6 +633,16 @@ static int signr_convert(int sig)
642#define is_ia32 0 633#define is_ia32 0
643#endif /* CONFIG_IA32_EMULATION */ 634#endif /* CONFIG_IA32_EMULATION */
644 635
636#ifdef CONFIG_X86_X32_ABI
637#define is_x32 test_thread_flag(TIF_X32)
638
639static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
640 siginfo_t *info, compat_sigset_t *set,
641 struct pt_regs *regs);
642#else /* !CONFIG_X86_X32_ABI */
643#define is_x32 0
644#endif /* CONFIG_X86_X32_ABI */
645
645int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 646int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
646 sigset_t *set, struct pt_regs *regs); 647 sigset_t *set, struct pt_regs *regs);
647int ia32_setup_frame(int sig, struct k_sigaction *ka, 648int ia32_setup_frame(int sig, struct k_sigaction *ka,
@@ -666,8 +667,14 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
666 ret = ia32_setup_rt_frame(usig, ka, info, set, regs); 667 ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
667 else 668 else
668 ret = ia32_setup_frame(usig, ka, set, regs); 669 ret = ia32_setup_frame(usig, ka, set, regs);
669 } else 670#ifdef CONFIG_X86_X32_ABI
671 } else if (is_x32) {
672 ret = x32_setup_rt_frame(usig, ka, info,
673 (compat_sigset_t *)set, regs);
674#endif
675 } else {
670 ret = __setup_rt_frame(sig, ka, info, set, regs); 676 ret = __setup_rt_frame(sig, ka, info, set, regs);
677 }
671 678
672 if (ret) { 679 if (ret) {
673 force_sigsegv(sig, current); 680 force_sigsegv(sig, current);
@@ -850,3 +857,102 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
850 857
851 force_sig(SIGSEGV, me); 858 force_sig(SIGSEGV, me);
852} 859}
860
861#ifdef CONFIG_X86_X32_ABI
862static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
863 siginfo_t *info, compat_sigset_t *set,
864 struct pt_regs *regs)
865{
866 struct rt_sigframe_x32 __user *frame;
867 void __user *restorer;
868 int err = 0;
869 void __user *fpstate = NULL;
870
871 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
872
873 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
874 return -EFAULT;
875
876 if (ka->sa.sa_flags & SA_SIGINFO) {
877 if (copy_siginfo_to_user32(&frame->info, info))
878 return -EFAULT;
879 }
880
881 put_user_try {
882 /* Create the ucontext. */
883 if (cpu_has_xsave)
884 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
885 else
886 put_user_ex(0, &frame->uc.uc_flags);
887 put_user_ex(0, &frame->uc.uc_link);
888 put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
889 put_user_ex(sas_ss_flags(regs->sp),
890 &frame->uc.uc_stack.ss_flags);
891 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
892 put_user_ex(0, &frame->uc.uc__pad0);
893 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
894 regs, set->sig[0]);
895 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
896
897 if (ka->sa.sa_flags & SA_RESTORER) {
898 restorer = ka->sa.sa_restorer;
899 } else {
900 /* could use a vstub here */
901 restorer = NULL;
902 err |= -EFAULT;
903 }
904 put_user_ex(restorer, &frame->pretcode);
905 } put_user_catch(err);
906
907 if (err)
908 return -EFAULT;
909
910 /* Set up registers for signal handler */
911 regs->sp = (unsigned long) frame;
912 regs->ip = (unsigned long) ka->sa.sa_handler;
913
914 /* We use the x32 calling convention here... */
915 regs->di = sig;
916 regs->si = (unsigned long) &frame->info;
917 regs->dx = (unsigned long) &frame->uc;
918
919 loadsegment(ds, __USER_DS);
920 loadsegment(es, __USER_DS);
921
922 regs->cs = __USER_CS;
923 regs->ss = __USER_DS;
924
925 return 0;
926}
927
928asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs)
929{
930 struct rt_sigframe_x32 __user *frame;
931 sigset_t set;
932 unsigned long ax;
933 struct pt_regs tregs;
934
935 frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
936
937 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
938 goto badframe;
939 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
940 goto badframe;
941
942 sigdelsetmask(&set, ~_BLOCKABLE);
943 set_current_blocked(&set);
944
945 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
946 goto badframe;
947
948 tregs = *regs;
949 if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT)
950 goto badframe;
951
952 return ax;
953
954badframe:
955 signal_fault(regs, frame, "x32 rt_sigreturn");
956 return 0;
957}
958#endif
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 051489082d5..f921df8c209 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -98,7 +98,7 @@ out:
98static void find_start_end(unsigned long flags, unsigned long *begin, 98static void find_start_end(unsigned long flags, unsigned long *begin,
99 unsigned long *end) 99 unsigned long *end)
100{ 100{
101 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { 101 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
102 unsigned long new_begin; 102 unsigned long new_begin;
103 /* This is usually used needed to map code in small 103 /* This is usually used needed to map code in small
104 model, so it needs to be in the first 31bit. Limit 104 model, so it needs to be in the first 31bit. Limit
@@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
144 (!vma || addr + len <= vma->vm_start)) 144 (!vma || addr + len <= vma->vm_start))
145 return addr; 145 return addr;
146 } 146 }
147 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) 147 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
148 && len <= mm->cached_hole_size) { 148 && len <= mm->cached_hole_size) {
149 mm->cached_hole_size = 0; 149 mm->cached_hole_size = 0;
150 mm->free_area_cache = begin; 150 mm->free_area_cache = begin;
@@ -205,7 +205,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
205 return addr; 205 return addr;
206 206
207 /* for MAP_32BIT mappings we force the legact mmap base */ 207 /* for MAP_32BIT mappings we force the legact mmap base */
208 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) 208 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
209 goto bottomup; 209 goto bottomup;
210 210
211 /* requesting a specific address */ 211 /* requesting a specific address */
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c
index 7ac7943be02..5c7f8c20da7 100644
--- a/arch/x86/kernel/syscall_64.c
+++ b/arch/x86/kernel/syscall_64.c
@@ -5,6 +5,14 @@
5#include <linux/cache.h> 5#include <linux/cache.h>
6#include <asm/asm-offsets.h> 6#include <asm/asm-offsets.h>
7 7
8#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
9
10#ifdef CONFIG_X86_X32_ABI
11# define __SYSCALL_X32(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
12#else
13# define __SYSCALL_X32(nr, sym, compat) /* nothing */
14#endif
15
8#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ; 16#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
9#include <asm/syscalls_64.h> 17#include <asm/syscalls_64.h>
10#undef __SYSCALL_64 18#undef __SYSCALL_64
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 482ec3af206..c6d17ad59b8 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -119,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
119 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 119 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
120 * On nmi (interrupt 2), do_trap should not be called. 120 * On nmi (interrupt 2), do_trap should not be called.
121 */ 121 */
122 if (trapnr < 6) 122 if (trapnr < X86_TRAP_UD)
123 goto vm86_trap; 123 goto vm86_trap;
124 goto trap_signal; 124 goto trap_signal;
125 } 125 }
@@ -132,7 +132,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
132trap_signal: 132trap_signal:
133#endif 133#endif
134 /* 134 /*
135 * We want error_code and trap_no set for userspace faults and 135 * We want error_code and trap_nr set for userspace faults and
136 * kernelspace faults which result in die(), but not 136 * kernelspace faults which result in die(), but not
137 * kernelspace faults which are fixed up. die() gives the 137 * kernelspace faults which are fixed up. die() gives the
138 * process no chance to handle the signal and notice the 138 * process no chance to handle the signal and notice the
@@ -141,7 +141,7 @@ trap_signal:
141 * delivered, faults. See also do_general_protection below. 141 * delivered, faults. See also do_general_protection below.
142 */ 142 */
143 tsk->thread.error_code = error_code; 143 tsk->thread.error_code = error_code;
144 tsk->thread.trap_no = trapnr; 144 tsk->thread.trap_nr = trapnr;
145 145
146#ifdef CONFIG_X86_64 146#ifdef CONFIG_X86_64
147 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 147 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
@@ -164,7 +164,7 @@ trap_signal:
164kernel_trap: 164kernel_trap:
165 if (!fixup_exception(regs)) { 165 if (!fixup_exception(regs)) {
166 tsk->thread.error_code = error_code; 166 tsk->thread.error_code = error_code;
167 tsk->thread.trap_no = trapnr; 167 tsk->thread.trap_nr = trapnr;
168 die(str, regs, error_code); 168 die(str, regs, error_code);
169 } 169 }
170 return; 170 return;
@@ -203,27 +203,31 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
203 do_trap(trapnr, signr, str, regs, error_code, &info); \ 203 do_trap(trapnr, signr, str, regs, error_code, &info); \
204} 204}
205 205
206DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) 206DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
207DO_ERROR(4, SIGSEGV, "overflow", overflow) 207 regs->ip)
208DO_ERROR(5, SIGSEGV, "bounds", bounds) 208DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
209DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) 209DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
210DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 210DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
211DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 211 regs->ip)
212DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 212DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
213 coprocessor_segment_overrun)
214DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
215DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
213#ifdef CONFIG_X86_32 216#ifdef CONFIG_X86_32
214DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 217DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
215#endif 218#endif
216DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) 219DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
220 BUS_ADRALN, 0)
217 221
218#ifdef CONFIG_X86_64 222#ifdef CONFIG_X86_64
219/* Runs on IST stack */ 223/* Runs on IST stack */
220dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 224dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
221{ 225{
222 if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 226 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
223 12, SIGBUS) == NOTIFY_STOP) 227 X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
224 return; 228 return;
225 preempt_conditional_sti(regs); 229 preempt_conditional_sti(regs);
226 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); 230 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
227 preempt_conditional_cli(regs); 231 preempt_conditional_cli(regs);
228} 232}
229 233
@@ -233,10 +237,10 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
233 struct task_struct *tsk = current; 237 struct task_struct *tsk = current;
234 238
235 /* Return not checked because double check cannot be ignored */ 239 /* Return not checked because double check cannot be ignored */
236 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV); 240 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
237 241
238 tsk->thread.error_code = error_code; 242 tsk->thread.error_code = error_code;
239 tsk->thread.trap_no = 8; 243 tsk->thread.trap_nr = X86_TRAP_DF;
240 244
241 /* 245 /*
242 * This is always a kernel trap and never fixable (and thus must 246 * This is always a kernel trap and never fixable (and thus must
@@ -264,7 +268,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
264 goto gp_in_kernel; 268 goto gp_in_kernel;
265 269
266 tsk->thread.error_code = error_code; 270 tsk->thread.error_code = error_code;
267 tsk->thread.trap_no = 13; 271 tsk->thread.trap_nr = X86_TRAP_GP;
268 272
269 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 273 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
270 printk_ratelimit()) { 274 printk_ratelimit()) {
@@ -291,9 +295,9 @@ gp_in_kernel:
291 return; 295 return;
292 296
293 tsk->thread.error_code = error_code; 297 tsk->thread.error_code = error_code;
294 tsk->thread.trap_no = 13; 298 tsk->thread.trap_nr = X86_TRAP_GP;
295 if (notify_die(DIE_GPF, "general protection fault", regs, 299 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
296 error_code, 13, SIGSEGV) == NOTIFY_STOP) 300 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
297 return; 301 return;
298 die("general protection fault", regs, error_code); 302 die("general protection fault", regs, error_code);
299} 303}
@@ -302,13 +306,13 @@ gp_in_kernel:
302dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) 306dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
303{ 307{
304#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 308#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
305 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 309 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
306 == NOTIFY_STOP) 310 SIGTRAP) == NOTIFY_STOP)
307 return; 311 return;
308#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 312#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
309 313
310 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 314 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
311 == NOTIFY_STOP) 315 SIGTRAP) == NOTIFY_STOP)
312 return; 316 return;
313 317
314 /* 318 /*
@@ -317,7 +321,7 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
317 */ 321 */
318 debug_stack_usage_inc(); 322 debug_stack_usage_inc();
319 preempt_conditional_sti(regs); 323 preempt_conditional_sti(regs);
320 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); 324 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
321 preempt_conditional_cli(regs); 325 preempt_conditional_cli(regs);
322 debug_stack_usage_dec(); 326 debug_stack_usage_dec();
323} 327}
@@ -422,8 +426,8 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
422 preempt_conditional_sti(regs); 426 preempt_conditional_sti(regs);
423 427
424 if (regs->flags & X86_VM_MASK) { 428 if (regs->flags & X86_VM_MASK) {
425 handle_vm86_trap((struct kernel_vm86_regs *) regs, 429 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
426 error_code, 1); 430 X86_TRAP_DB);
427 preempt_conditional_cli(regs); 431 preempt_conditional_cli(regs);
428 debug_stack_usage_dec(); 432 debug_stack_usage_dec();
429 return; 433 return;
@@ -460,7 +464,8 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
460 struct task_struct *task = current; 464 struct task_struct *task = current;
461 siginfo_t info; 465 siginfo_t info;
462 unsigned short err; 466 unsigned short err;
463 char *str = (trapnr == 16) ? "fpu exception" : "simd exception"; 467 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
468 "simd exception";
464 469
465 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) 470 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
466 return; 471 return;
@@ -470,7 +475,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
470 { 475 {
471 if (!fixup_exception(regs)) { 476 if (!fixup_exception(regs)) {
472 task->thread.error_code = error_code; 477 task->thread.error_code = error_code;
473 task->thread.trap_no = trapnr; 478 task->thread.trap_nr = trapnr;
474 die(str, regs, error_code); 479 die(str, regs, error_code);
475 } 480 }
476 return; 481 return;
@@ -480,12 +485,12 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
480 * Save the info for the exception handler and clear the error. 485 * Save the info for the exception handler and clear the error.
481 */ 486 */
482 save_init_fpu(task); 487 save_init_fpu(task);
483 task->thread.trap_no = trapnr; 488 task->thread.trap_nr = trapnr;
484 task->thread.error_code = error_code; 489 task->thread.error_code = error_code;
485 info.si_signo = SIGFPE; 490 info.si_signo = SIGFPE;
486 info.si_errno = 0; 491 info.si_errno = 0;
487 info.si_addr = (void __user *)regs->ip; 492 info.si_addr = (void __user *)regs->ip;
488 if (trapnr == 16) { 493 if (trapnr == X86_TRAP_MF) {
489 unsigned short cwd, swd; 494 unsigned short cwd, swd;
490 /* 495 /*
491 * (~cwd & swd) will mask out exceptions that are not set to unmasked 496 * (~cwd & swd) will mask out exceptions that are not set to unmasked
@@ -529,10 +534,11 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
529 info.si_code = FPE_FLTRES; 534 info.si_code = FPE_FLTRES;
530 } else { 535 } else {
531 /* 536 /*
532 * If we're using IRQ 13, or supposedly even some trap 16 537 * If we're using IRQ 13, or supposedly even some trap
533 * implementations, it's possible we get a spurious trap... 538 * X86_TRAP_MF implementations, it's possible
539 * we get a spurious trap, which is not an error.
534 */ 540 */
535 return; /* Spurious trap, no error */ 541 return;
536 } 542 }
537 force_sig_info(SIGFPE, &info, task); 543 force_sig_info(SIGFPE, &info, task);
538} 544}
@@ -543,13 +549,13 @@ dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
543 ignore_fpu_irq = 1; 549 ignore_fpu_irq = 1;
544#endif 550#endif
545 551
546 math_error(regs, error_code, 16); 552 math_error(regs, error_code, X86_TRAP_MF);
547} 553}
548 554
549dotraplinkage void 555dotraplinkage void
550do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 556do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
551{ 557{
552 math_error(regs, error_code, 19); 558 math_error(regs, error_code, X86_TRAP_XF);
553} 559}
554 560
555dotraplinkage void 561dotraplinkage void
@@ -571,41 +577,18 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
571} 577}
572 578
573/* 579/*
574 * __math_state_restore assumes that cr0.TS is already clear and the
575 * fpu state is all ready for use. Used during context switch.
576 */
577void __math_state_restore(void)
578{
579 struct thread_info *thread = current_thread_info();
580 struct task_struct *tsk = thread->task;
581
582 /*
583 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
584 */
585 if (unlikely(restore_fpu_checking(tsk))) {
586 stts();
587 force_sig(SIGSEGV, tsk);
588 return;
589 }
590
591 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
592 tsk->fpu_counter++;
593}
594
595/*
596 * 'math_state_restore()' saves the current math information in the 580 * 'math_state_restore()' saves the current math information in the
597 * old math state array, and gets the new ones from the current task 581 * old math state array, and gets the new ones from the current task
598 * 582 *
599 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 583 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
600 * Don't touch unless you *really* know how it works. 584 * Don't touch unless you *really* know how it works.
601 * 585 *
602 * Must be called with kernel preemption disabled (in this case, 586 * Must be called with kernel preemption disabled (eg with local
603 * local interrupts are disabled at the call-site in entry.S). 587 * local interrupts as in the case of do_device_not_available).
604 */ 588 */
605asmlinkage void math_state_restore(void) 589void math_state_restore(void)
606{ 590{
607 struct thread_info *thread = current_thread_info(); 591 struct task_struct *tsk = current;
608 struct task_struct *tsk = thread->task;
609 592
610 if (!tsk_used_math(tsk)) { 593 if (!tsk_used_math(tsk)) {
611 local_irq_enable(); 594 local_irq_enable();
@@ -622,9 +605,17 @@ asmlinkage void math_state_restore(void)
622 local_irq_disable(); 605 local_irq_disable();
623 } 606 }
624 607
625 clts(); /* Allow maths ops (or we recurse) */ 608 __thread_fpu_begin(tsk);
609 /*
610 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
611 */
612 if (unlikely(restore_fpu_checking(tsk))) {
613 __thread_fpu_end(tsk);
614 force_sig(SIGSEGV, tsk);
615 return;
616 }
626 617
627 __math_state_restore(); 618 tsk->fpu_counter++;
628} 619}
629EXPORT_SYMBOL_GPL(math_state_restore); 620EXPORT_SYMBOL_GPL(math_state_restore);
630 621
@@ -658,20 +649,21 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
658 info.si_errno = 0; 649 info.si_errno = 0;
659 info.si_code = ILL_BADSTK; 650 info.si_code = ILL_BADSTK;
660 info.si_addr = NULL; 651 info.si_addr = NULL;
661 if (notify_die(DIE_TRAP, "iret exception", 652 if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
662 regs, error_code, 32, SIGILL) == NOTIFY_STOP) 653 X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
663 return; 654 return;
664 do_trap(32, SIGILL, "iret exception", regs, error_code, &info); 655 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
656 &info);
665} 657}
666#endif 658#endif
667 659
668/* Set of traps needed for early debugging. */ 660/* Set of traps needed for early debugging. */
669void __init early_trap_init(void) 661void __init early_trap_init(void)
670{ 662{
671 set_intr_gate_ist(1, &debug, DEBUG_STACK); 663 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
672 /* int3 can be called from all */ 664 /* int3 can be called from all */
673 set_system_intr_gate_ist(3, &int3, DEBUG_STACK); 665 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
674 set_intr_gate(14, &page_fault); 666 set_intr_gate(X86_TRAP_PF, &page_fault);
675 load_idt(&idt_descr); 667 load_idt(&idt_descr);
676} 668}
677 669
@@ -687,30 +679,30 @@ void __init trap_init(void)
687 early_iounmap(p, 4); 679 early_iounmap(p, 4);
688#endif 680#endif
689 681
690 set_intr_gate(0, &divide_error); 682 set_intr_gate(X86_TRAP_DE, &divide_error);
691 set_intr_gate_ist(2, &nmi, NMI_STACK); 683 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
692 /* int4 can be called from all */ 684 /* int4 can be called from all */
693 set_system_intr_gate(4, &overflow); 685 set_system_intr_gate(X86_TRAP_OF, &overflow);
694 set_intr_gate(5, &bounds); 686 set_intr_gate(X86_TRAP_BR, &bounds);
695 set_intr_gate(6, &invalid_op); 687 set_intr_gate(X86_TRAP_UD, &invalid_op);
696 set_intr_gate(7, &device_not_available); 688 set_intr_gate(X86_TRAP_NM, &device_not_available);
697#ifdef CONFIG_X86_32 689#ifdef CONFIG_X86_32
698 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); 690 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
699#else 691#else
700 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); 692 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
701#endif 693#endif
702 set_intr_gate(9, &coprocessor_segment_overrun); 694 set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
703 set_intr_gate(10, &invalid_TSS); 695 set_intr_gate(X86_TRAP_TS, &invalid_TSS);
704 set_intr_gate(11, &segment_not_present); 696 set_intr_gate(X86_TRAP_NP, &segment_not_present);
705 set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); 697 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
706 set_intr_gate(13, &general_protection); 698 set_intr_gate(X86_TRAP_GP, &general_protection);
707 set_intr_gate(15, &spurious_interrupt_bug); 699 set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
708 set_intr_gate(16, &coprocessor_error); 700 set_intr_gate(X86_TRAP_MF, &coprocessor_error);
709 set_intr_gate(17, &alignment_check); 701 set_intr_gate(X86_TRAP_AC, &alignment_check);
710#ifdef CONFIG_X86_MCE 702#ifdef CONFIG_X86_MCE
711 set_intr_gate_ist(18, &machine_check, MCE_STACK); 703 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
712#endif 704#endif
713 set_intr_gate(19, &simd_coprocessor_error); 705 set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
714 706
715 /* Reserve all the builtin and the syscall vector: */ 707 /* Reserve all the builtin and the syscall vector: */
716 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 708 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
@@ -735,7 +727,7 @@ void __init trap_init(void)
735 727
736#ifdef CONFIG_X86_64 728#ifdef CONFIG_X86_64
737 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); 729 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16);
738 set_nmi_gate(1, &debug); 730 set_nmi_gate(X86_TRAP_DB, &debug);
739 set_nmi_gate(3, &int3); 731 set_nmi_gate(X86_TRAP_BP, &int3);
740#endif 732#endif
741} 733}
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index b466cab5ba1..a1315ab2d6b 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -567,7 +567,7 @@ int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
567 } 567 }
568 if (trapno != 1) 568 if (trapno != 1)
569 return 1; /* we let this handle by the calling routine */ 569 return 1; /* we let this handle by the calling routine */
570 current->thread.trap_no = trapno; 570 current->thread.trap_nr = trapno;
571 current->thread.error_code = error_code; 571 current->thread.error_code = error_code;
572 force_sig(SIGTRAP, current); 572 force_sig(SIGTRAP, current);
573 return 0; 573 return 0;
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index b07ba939356..327509b95e0 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -153,7 +153,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
153 153
154 thread->error_code = 6; /* user fault, no page, write */ 154 thread->error_code = 6; /* user fault, no page, write */
155 thread->cr2 = ptr; 155 thread->cr2 = ptr;
156 thread->trap_no = 14; 156 thread->trap_nr = X86_TRAP_PF;
157 157
158 memset(&info, 0, sizeof(info)); 158 memset(&info, 0, sizeof(info));
159 info.si_signo = SIGSEGV; 159 info.si_signo = SIGSEGV;
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index a3911343976..71109111411 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -47,7 +47,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
47 if (!fx) 47 if (!fx)
48 return; 48 return;
49 49
50 BUG_ON(task_thread_info(tsk)->status & TS_USEDFPU); 50 BUG_ON(__thread_has_fpu(tsk));
51 51
52 xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; 52 xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv;
53 53
@@ -168,7 +168,7 @@ int save_i387_xstate(void __user *buf)
168 if (!used_math()) 168 if (!used_math())
169 return 0; 169 return 0;
170 170
171 if (task_thread_info(tsk)->status & TS_USEDFPU) { 171 if (user_has_fpu()) {
172 if (use_xsave()) 172 if (use_xsave())
173 err = xsave_user(buf); 173 err = xsave_user(buf);
174 else 174 else
@@ -176,8 +176,7 @@ int save_i387_xstate(void __user *buf)
176 176
177 if (err) 177 if (err)
178 return err; 178 return err;
179 task_thread_info(tsk)->status &= ~TS_USEDFPU; 179 user_fpu_end();
180 stts();
181 } else { 180 } else {
182 sanitize_i387_state(tsk); 181 sanitize_i387_state(tsk);
183 if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, 182 if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave,
@@ -292,10 +291,7 @@ int restore_i387_xstate(void __user *buf)
292 return err; 291 return err;
293 } 292 }
294 293
295 if (!(task_thread_info(current)->status & TS_USEDFPU)) { 294 user_fpu_begin();
296 clts();
297 task_thread_info(current)->status |= TS_USEDFPU;
298 }
299 if (use_xsave()) 295 if (use_xsave())
300 err = restore_user_xstate(buf); 296 err = restore_user_xstate(buf);
301 else 297 else
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 05a562b8502..0982507b962 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1891,6 +1891,51 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1891 ss->p = 1; 1891 ss->p = 1;
1892} 1892}
1893 1893
1894static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
1895{
1896 struct x86_emulate_ops *ops = ctxt->ops;
1897 u32 eax, ebx, ecx, edx;
1898
1899 /*
1900 * syscall should always be enabled in longmode - so only become
1901 * vendor specific (cpuid) if other modes are active...
1902 */
1903 if (ctxt->mode == X86EMUL_MODE_PROT64)
1904 return true;
1905
1906 eax = 0x00000000;
1907 ecx = 0x00000000;
1908 if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) {
1909 /*
1910 * Intel ("GenuineIntel")
1911 * remark: Intel CPUs only support "syscall" in 64bit
1912 * longmode. Also an 64bit guest with a
1913 * 32bit compat-app running will #UD !! While this
1914 * behaviour can be fixed (by emulating) into AMD
1915 * response - CPUs of AMD can't behave like Intel.
1916 */
1917 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
1918 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
1919 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
1920 return false;
1921
1922 /* AMD ("AuthenticAMD") */
1923 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
1924 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
1925 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
1926 return true;
1927
1928 /* AMD ("AMDisbetter!") */
1929 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
1930 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
1931 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
1932 return true;
1933 }
1934
1935 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
1936 return false;
1937}
1938
1894static int em_syscall(struct x86_emulate_ctxt *ctxt) 1939static int em_syscall(struct x86_emulate_ctxt *ctxt)
1895{ 1940{
1896 struct x86_emulate_ops *ops = ctxt->ops; 1941 struct x86_emulate_ops *ops = ctxt->ops;
@@ -1904,9 +1949,15 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
1904 ctxt->mode == X86EMUL_MODE_VM86) 1949 ctxt->mode == X86EMUL_MODE_VM86)
1905 return emulate_ud(ctxt); 1950 return emulate_ud(ctxt);
1906 1951
1952 if (!(em_syscall_is_enabled(ctxt)))
1953 return emulate_ud(ctxt);
1954
1907 ops->get_msr(ctxt, MSR_EFER, &efer); 1955 ops->get_msr(ctxt, MSR_EFER, &efer);
1908 setup_syscalls_segments(ctxt, &cs, &ss); 1956 setup_syscalls_segments(ctxt, &cs, &ss);
1909 1957
1958 if (!(efer & EFER_SCE))
1959 return emulate_ud(ctxt);
1960
1910 ops->get_msr(ctxt, MSR_STAR, &msr_data); 1961 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1911 msr_data >>= 32; 1962 msr_data >>= 32;
1912 cs_sel = (u16)(msr_data & 0xfffc); 1963 cs_sel = (u16)(msr_data & 0xfffc);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 5fa553babe5..e385214711c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -29,6 +29,7 @@
29#include <linux/ftrace_event.h> 29#include <linux/ftrace_event.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31 31
32#include <asm/perf_event.h>
32#include <asm/tlbflush.h> 33#include <asm/tlbflush.h>
33#include <asm/desc.h> 34#include <asm/desc.h>
34#include <asm/kvm_para.h> 35#include <asm/kvm_para.h>
@@ -575,6 +576,8 @@ static void svm_hardware_disable(void *garbage)
575 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); 576 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
576 577
577 cpu_svm_disable(); 578 cpu_svm_disable();
579
580 amd_pmu_disable_virt();
578} 581}
579 582
580static int svm_hardware_enable(void *garbage) 583static int svm_hardware_enable(void *garbage)
@@ -622,6 +625,8 @@ static int svm_hardware_enable(void *garbage)
622 625
623 svm_init_erratum_383(); 626 svm_init_erratum_383();
624 627
628 amd_pmu_enable_virt();
629
625 return 0; 630 return 0;
626} 631}
627 632
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d29216c462b..3b4c8d8ad90 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1457,7 +1457,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1457#ifdef CONFIG_X86_64 1457#ifdef CONFIG_X86_64
1458 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 1458 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1459#endif 1459#endif
1460 if (current_thread_info()->status & TS_USEDFPU) 1460 if (__thread_has_fpu(current))
1461 clts(); 1461 clts();
1462 load_gdt(&__get_cpu_var(host_gdt)); 1462 load_gdt(&__get_cpu_var(host_gdt));
1463} 1463}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 14d6cadc4ba..9cbfc069811 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1495,6 +1495,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
1495 1495
1496int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1496int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1497{ 1497{
1498 bool pr = false;
1499
1498 switch (msr) { 1500 switch (msr) {
1499 case MSR_EFER: 1501 case MSR_EFER:
1500 return set_efer(vcpu, data); 1502 return set_efer(vcpu, data);
@@ -1635,6 +1637,18 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1635 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " 1637 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1636 "0x%x data 0x%llx\n", msr, data); 1638 "0x%x data 0x%llx\n", msr, data);
1637 break; 1639 break;
1640 case MSR_P6_PERFCTR0:
1641 case MSR_P6_PERFCTR1:
1642 pr = true;
1643 case MSR_P6_EVNTSEL0:
1644 case MSR_P6_EVNTSEL1:
1645 if (kvm_pmu_msr(vcpu, msr))
1646 return kvm_pmu_set_msr(vcpu, msr, data);
1647
1648 if (pr || data != 0)
1649 pr_unimpl(vcpu, "disabled perfctr wrmsr: "
1650 "0x%x data 0x%llx\n", msr, data);
1651 break;
1638 case MSR_K7_CLK_CTL: 1652 case MSR_K7_CLK_CTL:
1639 /* 1653 /*
1640 * Ignore all writes to this no longer documented MSR. 1654 * Ignore all writes to this no longer documented MSR.
@@ -1835,6 +1849,14 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1835 case MSR_FAM10H_MMIO_CONF_BASE: 1849 case MSR_FAM10H_MMIO_CONF_BASE:
1836 data = 0; 1850 data = 0;
1837 break; 1851 break;
1852 case MSR_P6_PERFCTR0:
1853 case MSR_P6_PERFCTR1:
1854 case MSR_P6_EVNTSEL0:
1855 case MSR_P6_EVNTSEL1:
1856 if (kvm_pmu_msr(vcpu, msr))
1857 return kvm_pmu_get_msr(vcpu, msr, pdata);
1858 data = 0;
1859 break;
1838 case MSR_IA32_UCODE_REV: 1860 case MSR_IA32_UCODE_REV:
1839 data = 0x100000000ULL; 1861 data = 0x100000000ULL;
1840 break; 1862 break;
@@ -4180,6 +4202,28 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
4180 return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); 4202 return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
4181} 4203}
4182 4204
4205static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
4206 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
4207{
4208 struct kvm_cpuid_entry2 *cpuid = NULL;
4209
4210 if (eax && ecx)
4211 cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt),
4212 *eax, *ecx);
4213
4214 if (cpuid) {
4215 *eax = cpuid->eax;
4216 *ecx = cpuid->ecx;
4217 if (ebx)
4218 *ebx = cpuid->ebx;
4219 if (edx)
4220 *edx = cpuid->edx;
4221 return true;
4222 }
4223
4224 return false;
4225}
4226
4183static struct x86_emulate_ops emulate_ops = { 4227static struct x86_emulate_ops emulate_ops = {
4184 .read_std = kvm_read_guest_virt_system, 4228 .read_std = kvm_read_guest_virt_system,
4185 .write_std = kvm_write_guest_virt_system, 4229 .write_std = kvm_write_guest_virt_system,
@@ -4211,6 +4255,7 @@ static struct x86_emulate_ops emulate_ops = {
4211 .get_fpu = emulator_get_fpu, 4255 .get_fpu = emulator_get_fpu,
4212 .put_fpu = emulator_put_fpu, 4256 .put_fpu = emulator_put_fpu,
4213 .intercept = emulator_intercept, 4257 .intercept = emulator_intercept,
4258 .get_cpuid = emulator_get_cpuid,
4214}; 4259};
4215 4260
4216static void cache_all_regs(struct kvm_vcpu *vcpu) 4261static void cache_all_regs(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 7718541541d..9b868124128 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -28,6 +28,7 @@
28#include <linux/regset.h> 28#include <linux/regset.h>
29 29
30#include <asm/uaccess.h> 30#include <asm/uaccess.h>
31#include <asm/traps.h>
31#include <asm/desc.h> 32#include <asm/desc.h>
32#include <asm/user.h> 33#include <asm/user.h>
33#include <asm/i387.h> 34#include <asm/i387.h>
@@ -269,7 +270,7 @@ void math_emulate(struct math_emu_info *info)
269 FPU_EIP = FPU_ORIG_EIP; /* Point to current FPU instruction. */ 270 FPU_EIP = FPU_ORIG_EIP; /* Point to current FPU instruction. */
270 271
271 RE_ENTRANT_CHECK_OFF; 272 RE_ENTRANT_CHECK_OFF;
272 current->thread.trap_no = 16; 273 current->thread.trap_nr = X86_TRAP_MF;
273 current->thread.error_code = 0; 274 current->thread.error_code = 0;
274 send_sig(SIGFPE, current, 1); 275 send_sig(SIGFPE, current, 1);
275 return; 276 return;
@@ -662,7 +663,7 @@ static int valid_prefix(u_char *Byte, u_char __user **fpu_eip,
662void math_abort(struct math_emu_info *info, unsigned int signal) 663void math_abort(struct math_emu_info *info, unsigned int signal)
663{ 664{
664 FPU_EIP = FPU_ORIG_EIP; 665 FPU_EIP = FPU_ORIG_EIP;
665 current->thread.trap_no = 16; 666 current->thread.trap_nr = X86_TRAP_MF;
666 current->thread.error_code = 0; 667 current->thread.error_code = 0;
667 send_sig(signal, current, 1); 668 send_sig(signal, current, 1);
668 RE_ENTRANT_CHECK_OFF; 669 RE_ENTRANT_CHECK_OFF;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index f0b4caf85c1..3ecfd1aaf21 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -615,7 +615,7 @@ pgtable_bad(struct pt_regs *regs, unsigned long error_code,
615 dump_pagetable(address); 615 dump_pagetable(address);
616 616
617 tsk->thread.cr2 = address; 617 tsk->thread.cr2 = address;
618 tsk->thread.trap_no = 14; 618 tsk->thread.trap_nr = X86_TRAP_PF;
619 tsk->thread.error_code = error_code; 619 tsk->thread.error_code = error_code;
620 620
621 if (__die("Bad pagetable", regs, error_code)) 621 if (__die("Bad pagetable", regs, error_code))
@@ -636,7 +636,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
636 /* Are we prepared to handle this kernel fault? */ 636 /* Are we prepared to handle this kernel fault? */
637 if (fixup_exception(regs)) { 637 if (fixup_exception(regs)) {
638 if (current_thread_info()->sig_on_uaccess_error && signal) { 638 if (current_thread_info()->sig_on_uaccess_error && signal) {
639 tsk->thread.trap_no = 14; 639 tsk->thread.trap_nr = X86_TRAP_PF;
640 tsk->thread.error_code = error_code | PF_USER; 640 tsk->thread.error_code = error_code | PF_USER;
641 tsk->thread.cr2 = address; 641 tsk->thread.cr2 = address;
642 642
@@ -676,7 +676,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
676 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 676 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
677 677
678 tsk->thread.cr2 = address; 678 tsk->thread.cr2 = address;
679 tsk->thread.trap_no = 14; 679 tsk->thread.trap_nr = X86_TRAP_PF;
680 tsk->thread.error_code = error_code; 680 tsk->thread.error_code = error_code;
681 681
682 sig = SIGKILL; 682 sig = SIGKILL;
@@ -754,7 +754,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
754 /* Kernel addresses are always protection faults: */ 754 /* Kernel addresses are always protection faults: */
755 tsk->thread.cr2 = address; 755 tsk->thread.cr2 = address;
756 tsk->thread.error_code = error_code | (address >= TASK_SIZE); 756 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
757 tsk->thread.trap_no = 14; 757 tsk->thread.trap_nr = X86_TRAP_PF;
758 758
759 force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); 759 force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
760 760
@@ -838,7 +838,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
838 838
839 tsk->thread.cr2 = address; 839 tsk->thread.cr2 = address;
840 tsk->thread.error_code = error_code; 840 tsk->thread.error_code = error_code;
841 tsk->thread.trap_no = 14; 841 tsk->thread.trap_nr = X86_TRAP_PF;
842 842
843#ifdef CONFIG_MEMORY_FAILURE 843#ifdef CONFIG_MEMORY_FAILURE
844 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { 844 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index bff89dfe361..d6aa6e8315d 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -67,7 +67,7 @@ x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
67{ 67{
68 struct stack_frame_ia32 *head; 68 struct stack_frame_ia32 *head;
69 69
70 /* User process is 32-bit */ 70 /* User process is IA32 */
71 if (!current || !test_thread_flag(TIF_IA32)) 71 if (!current || !test_thread_flag(TIF_IA32))
72 return 0; 72 return 0;
73 73
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 492ade8c978..d99346ea8fd 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -374,7 +374,7 @@ int __init pci_xen_init(void)
374 374
375int __init pci_xen_hvm_init(void) 375int __init pci_xen_hvm_init(void)
376{ 376{
377 if (!xen_feature(XENFEAT_hvm_pirqs)) 377 if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
378 return 0; 378 return 0;
379 379
380#ifdef CONFIG_ACPI 380#ifdef CONFIG_ACPI
diff --git a/arch/x86/syscalls/Makefile b/arch/x86/syscalls/Makefile
index 564b2476fed..3236aebc828 100644
--- a/arch/x86/syscalls/Makefile
+++ b/arch/x86/syscalls/Makefile
@@ -10,8 +10,10 @@ syshdr := $(srctree)/$(src)/syscallhdr.sh
10systbl := $(srctree)/$(src)/syscalltbl.sh 10systbl := $(srctree)/$(src)/syscalltbl.sh
11 11
12quiet_cmd_syshdr = SYSHDR $@ 12quiet_cmd_syshdr = SYSHDR $@
13 cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' $< $@ \ 13 cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
14 $(syshdr_abi_$(basetarget)) $(syshdr_pfx_$(basetarget)) 14 '$(syshdr_abi_$(basetarget))' \
15 '$(syshdr_pfx_$(basetarget))' \
16 '$(syshdr_offset_$(basetarget))'
15quiet_cmd_systbl = SYSTBL $@ 17quiet_cmd_systbl = SYSTBL $@
16 cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@ 18 cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@
17 19
@@ -24,18 +26,28 @@ syshdr_pfx_unistd_32_ia32 := ia32_
24$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr) 26$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr)
25 $(call if_changed,syshdr) 27 $(call if_changed,syshdr)
26 28
27syshdr_abi_unistd_64 := 64 29syshdr_abi_unistd_x32 := common,x32
30syshdr_offset_unistd_x32 := __X32_SYSCALL_BIT
31$(out)/unistd_x32.h: $(syscall64) $(syshdr)
32 $(call if_changed,syshdr)
33
34syshdr_abi_unistd_64 := common,64
28$(out)/unistd_64.h: $(syscall64) $(syshdr) 35$(out)/unistd_64.h: $(syscall64) $(syshdr)
29 $(call if_changed,syshdr) 36 $(call if_changed,syshdr)
30 37
38syshdr_abi_unistd_64_x32 := x32
39syshdr_pfx_unistd_64_x32 := x32_
40$(out)/unistd_64_x32.h: $(syscall64) $(syshdr)
41 $(call if_changed,syshdr)
42
31$(out)/syscalls_32.h: $(syscall32) $(systbl) 43$(out)/syscalls_32.h: $(syscall32) $(systbl)
32 $(call if_changed,systbl) 44 $(call if_changed,systbl)
33$(out)/syscalls_64.h: $(syscall64) $(systbl) 45$(out)/syscalls_64.h: $(syscall64) $(systbl)
34 $(call if_changed,systbl) 46 $(call if_changed,systbl)
35 47
36syshdr-y += unistd_32.h unistd_64.h 48syshdr-y += unistd_32.h unistd_64.h unistd_x32.h
37syshdr-y += syscalls_32.h 49syshdr-y += syscalls_32.h
38syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h 50syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h unistd_64_x32.h
39syshdr-$(CONFIG_X86_64) += syscalls_64.h 51syshdr-$(CONFIG_X86_64) += syscalls_64.h
40 52
41targets += $(syshdr-y) 53targets += $(syshdr-y)
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index ce98e287c06..031cef84fe4 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -181,7 +181,7 @@
181172 i386 prctl sys_prctl 181172 i386 prctl sys_prctl
182173 i386 rt_sigreturn ptregs_rt_sigreturn stub32_rt_sigreturn 182173 i386 rt_sigreturn ptregs_rt_sigreturn stub32_rt_sigreturn
183174 i386 rt_sigaction sys_rt_sigaction sys32_rt_sigaction 183174 i386 rt_sigaction sys_rt_sigaction sys32_rt_sigaction
184175 i386 rt_sigprocmask sys_rt_sigprocmask sys32_rt_sigprocmask 184175 i386 rt_sigprocmask sys_rt_sigprocmask
185176 i386 rt_sigpending sys_rt_sigpending sys32_rt_sigpending 185176 i386 rt_sigpending sys_rt_sigpending sys32_rt_sigpending
186177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait 186177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
187178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo sys32_rt_sigqueueinfo 187178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo sys32_rt_sigqueueinfo
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index b440a8f7eef..dd29a9ea27c 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -4,317 +4,350 @@
4# The format is: 4# The format is:
5# <number> <abi> <name> <entry point> 5# <number> <abi> <name> <entry point>
6# 6#
7# The abi is always "64" for this file (for now.) 7# The abi is "common", "64" or "x32" for this file.
8# 8#
90 64 read sys_read 90 common read sys_read
101 64 write sys_write 101 common write sys_write
112 64 open sys_open 112 common open sys_open
123 64 close sys_close 123 common close sys_close
134 64 stat sys_newstat 134 common stat sys_newstat
145 64 fstat sys_newfstat 145 common fstat sys_newfstat
156 64 lstat sys_newlstat 156 common lstat sys_newlstat
167 64 poll sys_poll 167 common poll sys_poll
178 64 lseek sys_lseek 178 common lseek sys_lseek
189 64 mmap sys_mmap 189 common mmap sys_mmap
1910 64 mprotect sys_mprotect 1910 common mprotect sys_mprotect
2011 64 munmap sys_munmap 2011 common munmap sys_munmap
2112 64 brk sys_brk 2112 common brk sys_brk
2213 64 rt_sigaction sys_rt_sigaction 2213 64 rt_sigaction sys_rt_sigaction
2314 64 rt_sigprocmask sys_rt_sigprocmask 2314 common rt_sigprocmask sys_rt_sigprocmask
2415 64 rt_sigreturn stub_rt_sigreturn 2415 64 rt_sigreturn stub_rt_sigreturn
2516 64 ioctl sys_ioctl 2516 64 ioctl sys_ioctl
2617 64 pread64 sys_pread64 2617 common pread64 sys_pread64
2718 64 pwrite64 sys_pwrite64 2718 common pwrite64 sys_pwrite64
2819 64 readv sys_readv 2819 64 readv sys_readv
2920 64 writev sys_writev 2920 64 writev sys_writev
3021 64 access sys_access 3021 common access sys_access
3122 64 pipe sys_pipe 3122 common pipe sys_pipe
3223 64 select sys_select 3223 common select sys_select
3324 64 sched_yield sys_sched_yield 3324 common sched_yield sys_sched_yield
3425 64 mremap sys_mremap 3425 common mremap sys_mremap
3526 64 msync sys_msync 3526 common msync sys_msync
3627 64 mincore sys_mincore 3627 common mincore sys_mincore
3728 64 madvise sys_madvise 3728 common madvise sys_madvise
3829 64 shmget sys_shmget 3829 common shmget sys_shmget
3930 64 shmat sys_shmat 3930 common shmat sys_shmat
4031 64 shmctl sys_shmctl 4031 common shmctl sys_shmctl
4132 64 dup sys_dup 4132 common dup sys_dup
4233 64 dup2 sys_dup2 4233 common dup2 sys_dup2
4334 64 pause sys_pause 4334 common pause sys_pause
4435 64 nanosleep sys_nanosleep 4435 common nanosleep sys_nanosleep
4536 64 getitimer sys_getitimer 4536 common getitimer sys_getitimer
4637 64 alarm sys_alarm 4637 common alarm sys_alarm
4738 64 setitimer sys_setitimer 4738 common setitimer sys_setitimer
4839 64 getpid sys_getpid 4839 common getpid sys_getpid
4940 64 sendfile sys_sendfile64 4940 common sendfile sys_sendfile64
5041 64 socket sys_socket 5041 common socket sys_socket
5142 64 connect sys_connect 5142 common connect sys_connect
5243 64 accept sys_accept 5243 common accept sys_accept
5344 64 sendto sys_sendto 5344 common sendto sys_sendto
5445 64 recvfrom sys_recvfrom 5445 64 recvfrom sys_recvfrom
5546 64 sendmsg sys_sendmsg 5546 64 sendmsg sys_sendmsg
5647 64 recvmsg sys_recvmsg 5647 64 recvmsg sys_recvmsg
5748 64 shutdown sys_shutdown 5748 common shutdown sys_shutdown
5849 64 bind sys_bind 5849 common bind sys_bind
5950 64 listen sys_listen 5950 common listen sys_listen
6051 64 getsockname sys_getsockname 6051 common getsockname sys_getsockname
6152 64 getpeername sys_getpeername 6152 common getpeername sys_getpeername
6253 64 socketpair sys_socketpair 6253 common socketpair sys_socketpair
6354 64 setsockopt sys_setsockopt 6354 common setsockopt sys_setsockopt
6455 64 getsockopt sys_getsockopt 6455 common getsockopt sys_getsockopt
6556 64 clone stub_clone 6556 common clone stub_clone
6657 64 fork stub_fork 6657 common fork stub_fork
6758 64 vfork stub_vfork 6758 common vfork stub_vfork
6859 64 execve stub_execve 6859 64 execve stub_execve
6960 64 exit sys_exit 6960 common exit sys_exit
7061 64 wait4 sys_wait4 7061 common wait4 sys_wait4
7162 64 kill sys_kill 7162 common kill sys_kill
7263 64 uname sys_newuname 7263 common uname sys_newuname
7364 64 semget sys_semget 7364 common semget sys_semget
7465 64 semop sys_semop 7465 common semop sys_semop
7566 64 semctl sys_semctl 7566 common semctl sys_semctl
7667 64 shmdt sys_shmdt 7667 common shmdt sys_shmdt
7768 64 msgget sys_msgget 7768 common msgget sys_msgget
7869 64 msgsnd sys_msgsnd 7869 common msgsnd sys_msgsnd
7970 64 msgrcv sys_msgrcv 7970 common msgrcv sys_msgrcv
8071 64 msgctl sys_msgctl 8071 common msgctl sys_msgctl
8172 64 fcntl sys_fcntl 8172 common fcntl sys_fcntl
8273 64 flock sys_flock 8273 common flock sys_flock
8374 64 fsync sys_fsync 8374 common fsync sys_fsync
8475 64 fdatasync sys_fdatasync 8475 common fdatasync sys_fdatasync
8576 64 truncate sys_truncate 8576 common truncate sys_truncate
8677 64 ftruncate sys_ftruncate 8677 common ftruncate sys_ftruncate
8778 64 getdents sys_getdents 8778 common getdents sys_getdents
8879 64 getcwd sys_getcwd 8879 common getcwd sys_getcwd
8980 64 chdir sys_chdir 8980 common chdir sys_chdir
9081 64 fchdir sys_fchdir 9081 common fchdir sys_fchdir
9182 64 rename sys_rename 9182 common rename sys_rename
9283 64 mkdir sys_mkdir 9283 common mkdir sys_mkdir
9384 64 rmdir sys_rmdir 9384 common rmdir sys_rmdir
9485 64 creat sys_creat 9485 common creat sys_creat
9586 64 link sys_link 9586 common link sys_link
9687 64 unlink sys_unlink 9687 common unlink sys_unlink
9788 64 symlink sys_symlink 9788 common symlink sys_symlink
9889 64 readlink sys_readlink 9889 common readlink sys_readlink
9990 64 chmod sys_chmod 9990 common chmod sys_chmod
10091 64 fchmod sys_fchmod 10091 common fchmod sys_fchmod
10192 64 chown sys_chown 10192 common chown sys_chown
10293 64 fchown sys_fchown 10293 common fchown sys_fchown
10394 64 lchown sys_lchown 10394 common lchown sys_lchown
10495 64 umask sys_umask 10495 common umask sys_umask
10596 64 gettimeofday sys_gettimeofday 10596 common gettimeofday sys_gettimeofday
10697 64 getrlimit sys_getrlimit 10697 common getrlimit sys_getrlimit
10798 64 getrusage sys_getrusage 10798 common getrusage sys_getrusage
10899 64 sysinfo sys_sysinfo 10899 common sysinfo sys_sysinfo
109100 64 times sys_times 109100 common times sys_times
110101 64 ptrace sys_ptrace 110101 64 ptrace sys_ptrace
111102 64 getuid sys_getuid 111102 common getuid sys_getuid
112103 64 syslog sys_syslog 112103 common syslog sys_syslog
113104 64 getgid sys_getgid 113104 common getgid sys_getgid
114105 64 setuid sys_setuid 114105 common setuid sys_setuid
115106 64 setgid sys_setgid 115106 common setgid sys_setgid
116107 64 geteuid sys_geteuid 116107 common geteuid sys_geteuid
117108 64 getegid sys_getegid 117108 common getegid sys_getegid
118109 64 setpgid sys_setpgid 118109 common setpgid sys_setpgid
119110 64 getppid sys_getppid 119110 common getppid sys_getppid
120111 64 getpgrp sys_getpgrp 120111 common getpgrp sys_getpgrp
121112 64 setsid sys_setsid 121112 common setsid sys_setsid
122113 64 setreuid sys_setreuid 122113 common setreuid sys_setreuid
123114 64 setregid sys_setregid 123114 common setregid sys_setregid
124115 64 getgroups sys_getgroups 124115 common getgroups sys_getgroups
125116 64 setgroups sys_setgroups 125116 common setgroups sys_setgroups
126117 64 setresuid sys_setresuid 126117 common setresuid sys_setresuid
127118 64 getresuid sys_getresuid 127118 common getresuid sys_getresuid
128119 64 setresgid sys_setresgid 128119 common setresgid sys_setresgid
129120 64 getresgid sys_getresgid 129120 common getresgid sys_getresgid
130121 64 getpgid sys_getpgid 130121 common getpgid sys_getpgid
131122 64 setfsuid sys_setfsuid 131122 common setfsuid sys_setfsuid
132123 64 setfsgid sys_setfsgid 132123 common setfsgid sys_setfsgid
133124 64 getsid sys_getsid 133124 common getsid sys_getsid
134125 64 capget sys_capget 134125 common capget sys_capget
135126 64 capset sys_capset 135126 common capset sys_capset
136127 64 rt_sigpending sys_rt_sigpending 136127 64 rt_sigpending sys_rt_sigpending
137128 64 rt_sigtimedwait sys_rt_sigtimedwait 137128 64 rt_sigtimedwait sys_rt_sigtimedwait
138129 64 rt_sigqueueinfo sys_rt_sigqueueinfo 138129 64 rt_sigqueueinfo sys_rt_sigqueueinfo
139130 64 rt_sigsuspend sys_rt_sigsuspend 139130 common rt_sigsuspend sys_rt_sigsuspend
140131 64 sigaltstack stub_sigaltstack 140131 64 sigaltstack stub_sigaltstack
141132 64 utime sys_utime 141132 common utime sys_utime
142133 64 mknod sys_mknod 142133 common mknod sys_mknod
143134 64 uselib 143134 64 uselib
144135 64 personality sys_personality 144135 common personality sys_personality
145136 64 ustat sys_ustat 145136 common ustat sys_ustat
146137 64 statfs sys_statfs 146137 common statfs sys_statfs
147138 64 fstatfs sys_fstatfs 147138 common fstatfs sys_fstatfs
148139 64 sysfs sys_sysfs 148139 common sysfs sys_sysfs
149140 64 getpriority sys_getpriority 149140 common getpriority sys_getpriority
150141 64 setpriority sys_setpriority 150141 common setpriority sys_setpriority
151142 64 sched_setparam sys_sched_setparam 151142 common sched_setparam sys_sched_setparam
152143 64 sched_getparam sys_sched_getparam 152143 common sched_getparam sys_sched_getparam
153144 64 sched_setscheduler sys_sched_setscheduler 153144 common sched_setscheduler sys_sched_setscheduler
154145 64 sched_getscheduler sys_sched_getscheduler 154145 common sched_getscheduler sys_sched_getscheduler
155146 64 sched_get_priority_max sys_sched_get_priority_max 155146 common sched_get_priority_max sys_sched_get_priority_max
156147 64 sched_get_priority_min sys_sched_get_priority_min 156147 common sched_get_priority_min sys_sched_get_priority_min
157148 64 sched_rr_get_interval sys_sched_rr_get_interval 157148 common sched_rr_get_interval sys_sched_rr_get_interval
158149 64 mlock sys_mlock 158149 common mlock sys_mlock
159150 64 munlock sys_munlock 159150 common munlock sys_munlock
160151 64 mlockall sys_mlockall 160151 common mlockall sys_mlockall
161152 64 munlockall sys_munlockall 161152 common munlockall sys_munlockall
162153 64 vhangup sys_vhangup 162153 common vhangup sys_vhangup
163154 64 modify_ldt sys_modify_ldt 163154 common modify_ldt sys_modify_ldt
164155 64 pivot_root sys_pivot_root 164155 common pivot_root sys_pivot_root
165156 64 _sysctl sys_sysctl 165156 64 _sysctl sys_sysctl
166157 64 prctl sys_prctl 166157 common prctl sys_prctl
167158 64 arch_prctl sys_arch_prctl 167158 common arch_prctl sys_arch_prctl
168159 64 adjtimex sys_adjtimex 168159 common adjtimex sys_adjtimex
169160 64 setrlimit sys_setrlimit 169160 common setrlimit sys_setrlimit
170161 64 chroot sys_chroot 170161 common chroot sys_chroot
171162 64 sync sys_sync 171162 common sync sys_sync
172163 64 acct sys_acct 172163 common acct sys_acct
173164 64 settimeofday sys_settimeofday 173164 common settimeofday sys_settimeofday
174165 64 mount sys_mount 174165 common mount sys_mount
175166 64 umount2 sys_umount 175166 common umount2 sys_umount
176167 64 swapon sys_swapon 176167 common swapon sys_swapon
177168 64 swapoff sys_swapoff 177168 common swapoff sys_swapoff
178169 64 reboot sys_reboot 178169 common reboot sys_reboot
179170 64 sethostname sys_sethostname 179170 common sethostname sys_sethostname
180171 64 setdomainname sys_setdomainname 180171 common setdomainname sys_setdomainname
181172 64 iopl stub_iopl 181172 common iopl stub_iopl
182173 64 ioperm sys_ioperm 182173 common ioperm sys_ioperm
183174 64 create_module 183174 64 create_module
184175 64 init_module sys_init_module 184175 common init_module sys_init_module
185176 64 delete_module sys_delete_module 185176 common delete_module sys_delete_module
186177 64 get_kernel_syms 186177 64 get_kernel_syms
187178 64 query_module 187178 64 query_module
188179 64 quotactl sys_quotactl 188179 common quotactl sys_quotactl
189180 64 nfsservctl 189180 64 nfsservctl
190181 64 getpmsg 190181 common getpmsg
191182 64 putpmsg 191182 common putpmsg
192183 64 afs_syscall 192183 common afs_syscall
193184 64 tuxcall 193184 common tuxcall
194185 64 security 194185 common security
195186 64 gettid sys_gettid 195186 common gettid sys_gettid
196187 64 readahead sys_readahead 196187 common readahead sys_readahead
197188 64 setxattr sys_setxattr 197188 common setxattr sys_setxattr
198189 64 lsetxattr sys_lsetxattr 198189 common lsetxattr sys_lsetxattr
199190 64 fsetxattr sys_fsetxattr 199190 common fsetxattr sys_fsetxattr
200191 64 getxattr sys_getxattr 200191 common getxattr sys_getxattr
201192 64 lgetxattr sys_lgetxattr 201192 common lgetxattr sys_lgetxattr
202193 64 fgetxattr sys_fgetxattr 202193 common fgetxattr sys_fgetxattr
203194 64 listxattr sys_listxattr 203194 common listxattr sys_listxattr
204195 64 llistxattr sys_llistxattr 204195 common llistxattr sys_llistxattr
205196 64 flistxattr sys_flistxattr 205196 common flistxattr sys_flistxattr
206197 64 removexattr sys_removexattr 206197 common removexattr sys_removexattr
207198 64 lremovexattr sys_lremovexattr 207198 common lremovexattr sys_lremovexattr
208199 64 fremovexattr sys_fremovexattr 208199 common fremovexattr sys_fremovexattr
209200 64 tkill sys_tkill 209200 common tkill sys_tkill
210201 64 time sys_time 210201 common time sys_time
211202 64 futex sys_futex 211202 common futex sys_futex
212203 64 sched_setaffinity sys_sched_setaffinity 212203 common sched_setaffinity sys_sched_setaffinity
213204 64 sched_getaffinity sys_sched_getaffinity 213204 common sched_getaffinity sys_sched_getaffinity
214205 64 set_thread_area 214205 64 set_thread_area
215206 64 io_setup sys_io_setup 215206 common io_setup sys_io_setup
216207 64 io_destroy sys_io_destroy 216207 common io_destroy sys_io_destroy
217208 64 io_getevents sys_io_getevents 217208 common io_getevents sys_io_getevents
218209 64 io_submit sys_io_submit 218209 common io_submit sys_io_submit
219210 64 io_cancel sys_io_cancel 219210 common io_cancel sys_io_cancel
220211 64 get_thread_area 220211 64 get_thread_area
221212 64 lookup_dcookie sys_lookup_dcookie 221212 common lookup_dcookie sys_lookup_dcookie
222213 64 epoll_create sys_epoll_create 222213 common epoll_create sys_epoll_create
223214 64 epoll_ctl_old 223214 64 epoll_ctl_old
224215 64 epoll_wait_old 224215 64 epoll_wait_old
225216 64 remap_file_pages sys_remap_file_pages 225216 common remap_file_pages sys_remap_file_pages
226217 64 getdents64 sys_getdents64 226217 common getdents64 sys_getdents64
227218 64 set_tid_address sys_set_tid_address 227218 common set_tid_address sys_set_tid_address
228219 64 restart_syscall sys_restart_syscall 228219 common restart_syscall sys_restart_syscall
229220 64 semtimedop sys_semtimedop 229220 common semtimedop sys_semtimedop
230221 64 fadvise64 sys_fadvise64 230221 common fadvise64 sys_fadvise64
231222 64 timer_create sys_timer_create 231222 64 timer_create sys_timer_create
232223 64 timer_settime sys_timer_settime 232223 common timer_settime sys_timer_settime
233224 64 timer_gettime sys_timer_gettime 233224 common timer_gettime sys_timer_gettime
234225 64 timer_getoverrun sys_timer_getoverrun 234225 common timer_getoverrun sys_timer_getoverrun
235226 64 timer_delete sys_timer_delete 235226 common timer_delete sys_timer_delete
236227 64 clock_settime sys_clock_settime 236227 common clock_settime sys_clock_settime
237228 64 clock_gettime sys_clock_gettime 237228 common clock_gettime sys_clock_gettime
238229 64 clock_getres sys_clock_getres 238229 common clock_getres sys_clock_getres
239230 64 clock_nanosleep sys_clock_nanosleep 239230 common clock_nanosleep sys_clock_nanosleep
240231 64 exit_group sys_exit_group 240231 common exit_group sys_exit_group
241232 64 epoll_wait sys_epoll_wait 241232 common epoll_wait sys_epoll_wait
242233 64 epoll_ctl sys_epoll_ctl 242233 common epoll_ctl sys_epoll_ctl
243234 64 tgkill sys_tgkill 243234 common tgkill sys_tgkill
244235 64 utimes sys_utimes 244235 common utimes sys_utimes
245236 64 vserver 245236 64 vserver
246237 64 mbind sys_mbind 246237 common mbind sys_mbind
247238 64 set_mempolicy sys_set_mempolicy 247238 common set_mempolicy sys_set_mempolicy
248239 64 get_mempolicy sys_get_mempolicy 248239 common get_mempolicy sys_get_mempolicy
249240 64 mq_open sys_mq_open 249240 common mq_open sys_mq_open
250241 64 mq_unlink sys_mq_unlink 250241 common mq_unlink sys_mq_unlink
251242 64 mq_timedsend sys_mq_timedsend 251242 common mq_timedsend sys_mq_timedsend
252243 64 mq_timedreceive sys_mq_timedreceive 252243 common mq_timedreceive sys_mq_timedreceive
253244 64 mq_notify sys_mq_notify 253244 64 mq_notify sys_mq_notify
254245 64 mq_getsetattr sys_mq_getsetattr 254245 common mq_getsetattr sys_mq_getsetattr
255246 64 kexec_load sys_kexec_load 255246 64 kexec_load sys_kexec_load
256247 64 waitid sys_waitid 256247 64 waitid sys_waitid
257248 64 add_key sys_add_key 257248 common add_key sys_add_key
258249 64 request_key sys_request_key 258249 common request_key sys_request_key
259250 64 keyctl sys_keyctl 259250 common keyctl sys_keyctl
260251 64 ioprio_set sys_ioprio_set 260251 common ioprio_set sys_ioprio_set
261252 64 ioprio_get sys_ioprio_get 261252 common ioprio_get sys_ioprio_get
262253 64 inotify_init sys_inotify_init 262253 common inotify_init sys_inotify_init
263254 64 inotify_add_watch sys_inotify_add_watch 263254 common inotify_add_watch sys_inotify_add_watch
264255 64 inotify_rm_watch sys_inotify_rm_watch 264255 common inotify_rm_watch sys_inotify_rm_watch
265256 64 migrate_pages sys_migrate_pages 265256 common migrate_pages sys_migrate_pages
266257 64 openat sys_openat 266257 common openat sys_openat
267258 64 mkdirat sys_mkdirat 267258 common mkdirat sys_mkdirat
268259 64 mknodat sys_mknodat 268259 common mknodat sys_mknodat
269260 64 fchownat sys_fchownat 269260 common fchownat sys_fchownat
270261 64 futimesat sys_futimesat 270261 common futimesat sys_futimesat
271262 64 newfstatat sys_newfstatat 271262 common newfstatat sys_newfstatat
272263 64 unlinkat sys_unlinkat 272263 common unlinkat sys_unlinkat
273264 64 renameat sys_renameat 273264 common renameat sys_renameat
274265 64 linkat sys_linkat 274265 common linkat sys_linkat
275266 64 symlinkat sys_symlinkat 275266 common symlinkat sys_symlinkat
276267 64 readlinkat sys_readlinkat 276267 common readlinkat sys_readlinkat
277268 64 fchmodat sys_fchmodat 277268 common fchmodat sys_fchmodat
278269 64 faccessat sys_faccessat 278269 common faccessat sys_faccessat
279270 64 pselect6 sys_pselect6 279270 common pselect6 sys_pselect6
280271 64 ppoll sys_ppoll 280271 common ppoll sys_ppoll
281272 64 unshare sys_unshare 281272 common unshare sys_unshare
282273 64 set_robust_list sys_set_robust_list 282273 64 set_robust_list sys_set_robust_list
283274 64 get_robust_list sys_get_robust_list 283274 64 get_robust_list sys_get_robust_list
284275 64 splice sys_splice 284275 common splice sys_splice
285276 64 tee sys_tee 285276 common tee sys_tee
286277 64 sync_file_range sys_sync_file_range 286277 common sync_file_range sys_sync_file_range
287278 64 vmsplice sys_vmsplice 287278 64 vmsplice sys_vmsplice
288279 64 move_pages sys_move_pages 288279 64 move_pages sys_move_pages
289280 64 utimensat sys_utimensat 289280 common utimensat sys_utimensat
290281 64 epoll_pwait sys_epoll_pwait 290281 common epoll_pwait sys_epoll_pwait
291282 64 signalfd sys_signalfd 291282 common signalfd sys_signalfd
292283 64 timerfd_create sys_timerfd_create 292283 common timerfd_create sys_timerfd_create
293284 64 eventfd sys_eventfd 293284 common eventfd sys_eventfd
294285 64 fallocate sys_fallocate 294285 common fallocate sys_fallocate
295286 64 timerfd_settime sys_timerfd_settime 295286 common timerfd_settime sys_timerfd_settime
296287 64 timerfd_gettime sys_timerfd_gettime 296287 common timerfd_gettime sys_timerfd_gettime
297288 64 accept4 sys_accept4 297288 common accept4 sys_accept4
298289 64 signalfd4 sys_signalfd4 298289 common signalfd4 sys_signalfd4
299290 64 eventfd2 sys_eventfd2 299290 common eventfd2 sys_eventfd2
300291 64 epoll_create1 sys_epoll_create1 300291 common epoll_create1 sys_epoll_create1
301292 64 dup3 sys_dup3 301292 common dup3 sys_dup3
302293 64 pipe2 sys_pipe2 302293 common pipe2 sys_pipe2
303294 64 inotify_init1 sys_inotify_init1 303294 common inotify_init1 sys_inotify_init1
304295 64 preadv sys_preadv 304295 64 preadv sys_preadv
305296 64 pwritev sys_pwritev 305296 64 pwritev sys_pwritev
306297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo 306297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
307298 64 perf_event_open sys_perf_event_open 307298 common perf_event_open sys_perf_event_open
308299 64 recvmmsg sys_recvmmsg 308299 64 recvmmsg sys_recvmmsg
309300 64 fanotify_init sys_fanotify_init 309300 common fanotify_init sys_fanotify_init
310301 64 fanotify_mark sys_fanotify_mark 310301 common fanotify_mark sys_fanotify_mark
311302 64 prlimit64 sys_prlimit64 311302 common prlimit64 sys_prlimit64
312303 64 name_to_handle_at sys_name_to_handle_at 312303 common name_to_handle_at sys_name_to_handle_at
313304 64 open_by_handle_at sys_open_by_handle_at 313304 common open_by_handle_at sys_open_by_handle_at
314305 64 clock_adjtime sys_clock_adjtime 314305 common clock_adjtime sys_clock_adjtime
315306 64 syncfs sys_syncfs 315306 common syncfs sys_syncfs
316307 64 sendmmsg sys_sendmmsg 316307 64 sendmmsg sys_sendmmsg
317308 64 setns sys_setns 317308 common setns sys_setns
318309 64 getcpu sys_getcpu 318309 common getcpu sys_getcpu
319310 64 process_vm_readv sys_process_vm_readv 319310 64 process_vm_readv sys_process_vm_readv
320311 64 process_vm_writev sys_process_vm_writev 320311 64 process_vm_writev sys_process_vm_writev
321#
322# x32-specific system call numbers start at 512 to avoid cache impact
323# for native 64-bit operation.
324#
325512 x32 rt_sigaction sys32_rt_sigaction
326513 x32 rt_sigreturn stub_x32_rt_sigreturn
327514 x32 ioctl compat_sys_ioctl
328515 x32 readv compat_sys_readv
329516 x32 writev compat_sys_writev
330517 x32 recvfrom compat_sys_recvfrom
331518 x32 sendmsg compat_sys_sendmsg
332519 x32 recvmsg compat_sys_recvmsg
333520 x32 execve stub_x32_execve
334521 x32 ptrace compat_sys_ptrace
335522 x32 rt_sigpending sys32_rt_sigpending
336523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait
337524 x32 rt_sigqueueinfo sys32_rt_sigqueueinfo
338525 x32 sigaltstack stub_x32_sigaltstack
339526 x32 timer_create compat_sys_timer_create
340527 x32 mq_notify compat_sys_mq_notify
341528 x32 kexec_load compat_sys_kexec_load
342529 x32 waitid compat_sys_waitid
343530 x32 set_robust_list compat_sys_set_robust_list
344531 x32 get_robust_list compat_sys_get_robust_list
345532 x32 vmsplice compat_sys_vmsplice
346533 x32 move_pages compat_sys_move_pages
347534 x32 preadv compat_sys_preadv64
348535 x32 pwritev compat_sys_pwritev64
349536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
350537 x32 recvmmsg compat_sys_recvmmsg
351538 x32 sendmmsg compat_sys_sendmmsg
352539 x32 process_vm_readv compat_sys_process_vm_readv
353540 x32 process_vm_writev compat_sys_process_vm_writev
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index fe626c3ba01..9924776f426 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -35,6 +35,9 @@
35#define stub_sigaltstack sys_sigaltstack 35#define stub_sigaltstack sys_sigaltstack
36#define stub_rt_sigreturn sys_rt_sigreturn 36#define stub_rt_sigreturn sys_rt_sigreturn
37 37
38#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
39#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
40
38#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ; 41#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
39#include <asm/syscalls_64.h> 42#include <asm/syscalls_64.h>
40 43
diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c
index 5edf4f4bbf5..ce7e3607a87 100644
--- a/arch/x86/um/user-offsets.c
+++ b/arch/x86/um/user-offsets.c
@@ -15,6 +15,8 @@ static char syscalls[] = {
15}; 15};
16#else 16#else
17#define __SYSCALL_64(nr, sym, compat) [nr] = 1, 17#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
18#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
19#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
18static char syscalls[] = { 20static char syscalls[] = {
19#include <asm/syscalls_64.h> 21#include <asm/syscalls_64.h>
20}; 22};
diff --git a/arch/x86/vdso/.gitignore b/arch/x86/vdso/.gitignore
index 60274d5746e..3282874bc61 100644
--- a/arch/x86/vdso/.gitignore
+++ b/arch/x86/vdso/.gitignore
@@ -1,5 +1,7 @@
1vdso.lds 1vdso.lds
2vdso-syms.lds 2vdso-syms.lds
3vdsox32.lds
4vdsox32-syms.lds
3vdso32-syms.lds 5vdso32-syms.lds
4vdso32-syscall-syms.lds 6vdso32-syscall-syms.lds
5vdso32-sysenter-syms.lds 7vdso32-sysenter-syms.lds
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 5d179502a52..fd14be1d147 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -3,21 +3,29 @@
3# 3#
4 4
5VDSO64-$(CONFIG_X86_64) := y 5VDSO64-$(CONFIG_X86_64) := y
6VDSOX32-$(CONFIG_X86_X32_ABI) := y
6VDSO32-$(CONFIG_X86_32) := y 7VDSO32-$(CONFIG_X86_32) := y
7VDSO32-$(CONFIG_COMPAT) := y 8VDSO32-$(CONFIG_COMPAT) := y
8 9
9vdso-install-$(VDSO64-y) += vdso.so 10vdso-install-$(VDSO64-y) += vdso.so
11vdso-install-$(VDSOX32-y) += vdsox32.so
10vdso-install-$(VDSO32-y) += $(vdso32-images) 12vdso-install-$(VDSO32-y) += $(vdso32-images)
11 13
12 14
13# files to link into the vdso 15# files to link into the vdso
14vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o 16vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
15 17
18vobjs-$(VDSOX32-y) += $(vobjx32s-compat)
19
20# Filter out x32 objects.
21vobj64s := $(filter-out $(vobjx32s-compat),$(vobjs-y))
22
16# files to link into kernel 23# files to link into kernel
17obj-$(VDSO64-y) += vma.o vdso.o 24obj-$(VDSO64-y) += vma.o vdso.o
25obj-$(VDSOX32-y) += vdsox32.o
18obj-$(VDSO32-y) += vdso32.o vdso32-setup.o 26obj-$(VDSO32-y) += vdso32.o vdso32-setup.o
19 27
20vobjs := $(foreach F,$(vobjs-y),$(obj)/$F) 28vobjs := $(foreach F,$(vobj64s),$(obj)/$F)
21 29
22$(obj)/vdso.o: $(obj)/vdso.so 30$(obj)/vdso.o: $(obj)/vdso.so
23 31
@@ -73,6 +81,42 @@ $(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
73 $(call if_changed,vdsosym) 81 $(call if_changed,vdsosym)
74 82
75# 83#
84# X32 processes use x32 vDSO to access 64bit kernel data.
85#
86# Build x32 vDSO image:
87# 1. Compile x32 vDSO as 64bit.
88# 2. Convert object files to x32.
89# 3. Build x32 VDSO image with x32 objects, which contains 64bit codes
90# so that it can reach 64bit address space with 64bit pointers.
91#
92
93targets += vdsox32-syms.lds
94obj-$(VDSOX32-y) += vdsox32-syms.lds
95
96CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
97VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
98 -Wl,-soname=linux-vdso.so.1 \
99 -Wl,-z,max-page-size=4096 \
100 -Wl,-z,common-page-size=4096
101
102vobjx32s-y := $(vobj64s:.o=-x32.o)
103vobjx32s := $(foreach F,$(vobjx32s-y),$(obj)/$F)
104
105# Convert 64bit object file to x32 for x32 vDSO.
106quiet_cmd_x32 = X32 $@
107 cmd_x32 = $(OBJCOPY) -O elf32-x86-64 $< $@
108
109$(obj)/%-x32.o: $(obj)/%.o FORCE
110 $(call if_changed,x32)
111
112targets += vdsox32.so vdsox32.so.dbg vdsox32.lds $(vobjx32s-y)
113
114$(obj)/vdsox32.o: $(src)/vdsox32.S $(obj)/vdsox32.so
115
116$(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
117 $(call if_changed,vdso)
118
119#
76# Build multiple 32-bit vDSO images to choose from at boot time. 120# Build multiple 32-bit vDSO images to choose from at boot time.
77# 121#
78obj-$(VDSO32-y) += vdso32-syms.lds 122obj-$(VDSO32-y) += vdso32-syms.lds
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index 468d591dde3..10f9f59477d 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -317,6 +317,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
317 int ret = 0; 317 int ret = 0;
318 bool compat; 318 bool compat;
319 319
320#ifdef CONFIG_X86_X32_ABI
321 if (test_thread_flag(TIF_X32))
322 return x32_setup_additional_pages(bprm, uses_interp);
323#endif
324
320 if (vdso_enabled == VDSO_DISABLED) 325 if (vdso_enabled == VDSO_DISABLED)
321 return 0; 326 return 0;
322 327
diff --git a/arch/x86/vdso/vdsox32.S b/arch/x86/vdso/vdsox32.S
new file mode 100644
index 00000000000..d6b9a7f42a8
--- /dev/null
+++ b/arch/x86/vdso/vdsox32.S
@@ -0,0 +1,22 @@
1#include <asm/page_types.h>
2#include <linux/linkage.h>
3#include <linux/init.h>
4
5__PAGE_ALIGNED_DATA
6
7 .globl vdsox32_start, vdsox32_end
8 .align PAGE_SIZE
9vdsox32_start:
10 .incbin "arch/x86/vdso/vdsox32.so"
11vdsox32_end:
12 .align PAGE_SIZE /* extra data here leaks to userspace. */
13
14.previous
15
16 .globl vdsox32_pages
17 .bss
18 .align 8
19 .type vdsox32_pages, @object
20vdsox32_pages:
21 .zero (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE * 8
22 .size vdsox32_pages, .-vdsox32_pages
diff --git a/arch/x86/vdso/vdsox32.lds.S b/arch/x86/vdso/vdsox32.lds.S
new file mode 100644
index 00000000000..62272aa2ae0
--- /dev/null
+++ b/arch/x86/vdso/vdsox32.lds.S
@@ -0,0 +1,28 @@
1/*
2 * Linker script for x32 vDSO.
3 * We #include the file to define the layout details.
4 * Here we only choose the prelinked virtual address.
5 *
6 * This file defines the version script giving the user-exported symbols in
7 * the DSO. We can define local symbols here called VDSO* to make their
8 * values visible using the asm-x86/vdso.h macros from the kernel proper.
9 */
10
11#define VDSO_PRELINK 0
12#include "vdso-layout.lds.S"
13
14/*
15 * This controls what userland symbols we export from the vDSO.
16 */
17VERSION {
18 LINUX_2.6 {
19 global:
20 __vdso_clock_gettime;
21 __vdso_gettimeofday;
22 __vdso_getcpu;
23 __vdso_time;
24 local: *;
25 };
26}
27
28VDSOX32_PRELINK = VDSO_PRELINK;
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 153407c35b7..d7dce1dbf8c 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -24,7 +24,44 @@ extern unsigned short vdso_sync_cpuid;
24extern struct page *vdso_pages[]; 24extern struct page *vdso_pages[];
25static unsigned vdso_size; 25static unsigned vdso_size;
26 26
27static void __init patch_vdso(void *vdso, size_t len) 27#ifdef CONFIG_X86_X32_ABI
28extern char vdsox32_start[], vdsox32_end[];
29extern struct page *vdsox32_pages[];
30static unsigned vdsox32_size;
31
32static void __init patch_vdsox32(void *vdso, size_t len)
33{
34 Elf32_Ehdr *hdr = vdso;
35 Elf32_Shdr *sechdrs, *alt_sec = 0;
36 char *secstrings;
37 void *alt_data;
38 int i;
39
40 BUG_ON(len < sizeof(Elf32_Ehdr));
41 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
42
43 sechdrs = (void *)hdr + hdr->e_shoff;
44 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
45
46 for (i = 1; i < hdr->e_shnum; i++) {
47 Elf32_Shdr *shdr = &sechdrs[i];
48 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
49 alt_sec = shdr;
50 goto found;
51 }
52 }
53
54 /* If we get here, it's probably a bug. */
55 pr_warning("patch_vdsox32: .altinstructions not found\n");
56 return; /* nothing to patch */
57
58found:
59 alt_data = (void *)hdr + alt_sec->sh_offset;
60 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
61}
62#endif
63
64static void __init patch_vdso64(void *vdso, size_t len)
28{ 65{
29 Elf64_Ehdr *hdr = vdso; 66 Elf64_Ehdr *hdr = vdso;
30 Elf64_Shdr *sechdrs, *alt_sec = 0; 67 Elf64_Shdr *sechdrs, *alt_sec = 0;
@@ -47,7 +84,7 @@ static void __init patch_vdso(void *vdso, size_t len)
47 } 84 }
48 85
49 /* If we get here, it's probably a bug. */ 86 /* If we get here, it's probably a bug. */
50 pr_warning("patch_vdso: .altinstructions not found\n"); 87 pr_warning("patch_vdso64: .altinstructions not found\n");
51 return; /* nothing to patch */ 88 return; /* nothing to patch */
52 89
53found: 90found:
@@ -60,12 +97,20 @@ static int __init init_vdso(void)
60 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; 97 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
61 int i; 98 int i;
62 99
63 patch_vdso(vdso_start, vdso_end - vdso_start); 100 patch_vdso64(vdso_start, vdso_end - vdso_start);
64 101
65 vdso_size = npages << PAGE_SHIFT; 102 vdso_size = npages << PAGE_SHIFT;
66 for (i = 0; i < npages; i++) 103 for (i = 0; i < npages; i++)
67 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE); 104 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
68 105
106#ifdef CONFIG_X86_X32_ABI
107 patch_vdsox32(vdsox32_start, vdsox32_end - vdsox32_start);
108 npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
109 vdsox32_size = npages << PAGE_SHIFT;
110 for (i = 0; i < npages; i++)
111 vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
112#endif
113
69 return 0; 114 return 0;
70} 115}
71subsys_initcall(init_vdso); 116subsys_initcall(init_vdso);
@@ -103,7 +148,10 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
103 148
104/* Setup a VMA at program startup for the vsyscall page. 149/* Setup a VMA at program startup for the vsyscall page.
105 Not called for compat tasks */ 150 Not called for compat tasks */
106int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 151static int setup_additional_pages(struct linux_binprm *bprm,
152 int uses_interp,
153 struct page **pages,
154 unsigned size)
107{ 155{
108 struct mm_struct *mm = current->mm; 156 struct mm_struct *mm = current->mm;
109 unsigned long addr; 157 unsigned long addr;
@@ -113,8 +161,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
113 return 0; 161 return 0;
114 162
115 down_write(&mm->mmap_sem); 163 down_write(&mm->mmap_sem);
116 addr = vdso_addr(mm->start_stack, vdso_size); 164 addr = vdso_addr(mm->start_stack, size);
117 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0); 165 addr = get_unmapped_area(NULL, addr, size, 0, 0);
118 if (IS_ERR_VALUE(addr)) { 166 if (IS_ERR_VALUE(addr)) {
119 ret = addr; 167 ret = addr;
120 goto up_fail; 168 goto up_fail;
@@ -122,11 +170,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
122 170
123 current->mm->context.vdso = (void *)addr; 171 current->mm->context.vdso = (void *)addr;
124 172
125 ret = install_special_mapping(mm, addr, vdso_size, 173 ret = install_special_mapping(mm, addr, size,
126 VM_READ|VM_EXEC| 174 VM_READ|VM_EXEC|
127 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| 175 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
128 VM_ALWAYSDUMP, 176 VM_ALWAYSDUMP,
129 vdso_pages); 177 pages);
130 if (ret) { 178 if (ret) {
131 current->mm->context.vdso = NULL; 179 current->mm->context.vdso = NULL;
132 goto up_fail; 180 goto up_fail;
@@ -137,6 +185,20 @@ up_fail:
137 return ret; 185 return ret;
138} 186}
139 187
188int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
189{
190 return setup_additional_pages(bprm, uses_interp, vdso_pages,
191 vdso_size);
192}
193
194#ifdef CONFIG_X86_X32_ABI
195int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
196{
197 return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
198 vdsox32_size);
199}
200#endif
201
140static __init int vdso_setup(char *s) 202static __init int vdso_setup(char *s)
141{ 203{
142 vdso_enabled = simple_strtoul(s, NULL, 0); 204 vdso_enabled = simple_strtoul(s, NULL, 0);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 12eb07bfb26..4172af8ceeb 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1141,7 +1141,9 @@ asmlinkage void __init xen_start_kernel(void)
1141 1141
1142 /* Prevent unwanted bits from being set in PTEs. */ 1142 /* Prevent unwanted bits from being set in PTEs. */
1143 __supported_pte_mask &= ~_PAGE_GLOBAL; 1143 __supported_pte_mask &= ~_PAGE_GLOBAL;
1144#if 0
1144 if (!xen_initial_domain()) 1145 if (!xen_initial_domain())
1146#endif
1145 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); 1147 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
1146 1148
1147 __supported_pte_mask |= _PAGE_IOMAP; 1149 __supported_pte_mask |= _PAGE_IOMAP;
@@ -1204,10 +1206,6 @@ asmlinkage void __init xen_start_kernel(void)
1204 1206
1205 pgd = (pgd_t *)xen_start_info->pt_base; 1207 pgd = (pgd_t *)xen_start_info->pt_base;
1206 1208
1207 if (!xen_initial_domain())
1208 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
1209
1210 __supported_pte_mask |= _PAGE_IOMAP;
1211 /* Don't do the full vcpu_info placement stuff until we have a 1209 /* Don't do the full vcpu_info placement stuff until we have a
1212 possible map and a non-dummy shared_info. */ 1210 possible map and a non-dummy shared_info. */
1213 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 1211 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 58a0e46c404..95c1cf60c66 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -415,13 +415,13 @@ static pteval_t iomap_pte(pteval_t val)
415static pteval_t xen_pte_val(pte_t pte) 415static pteval_t xen_pte_val(pte_t pte)
416{ 416{
417 pteval_t pteval = pte.pte; 417 pteval_t pteval = pte.pte;
418 418#if 0
419 /* If this is a WC pte, convert back from Xen WC to Linux WC */ 419 /* If this is a WC pte, convert back from Xen WC to Linux WC */
420 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { 420 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
421 WARN_ON(!pat_enabled); 421 WARN_ON(!pat_enabled);
422 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; 422 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
423 } 423 }
424 424#endif
425 if (xen_initial_domain() && (pteval & _PAGE_IOMAP)) 425 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
426 return pteval; 426 return pteval;
427 427
@@ -463,7 +463,7 @@ void xen_set_pat(u64 pat)
463static pte_t xen_make_pte(pteval_t pte) 463static pte_t xen_make_pte(pteval_t pte)
464{ 464{
465 phys_addr_t addr = (pte & PTE_PFN_MASK); 465 phys_addr_t addr = (pte & PTE_PFN_MASK);
466 466#if 0
467 /* If Linux is trying to set a WC pte, then map to the Xen WC. 467 /* If Linux is trying to set a WC pte, then map to the Xen WC.
468 * If _PAGE_PAT is set, then it probably means it is really 468 * If _PAGE_PAT is set, then it probably means it is really
469 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope 469 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
@@ -476,7 +476,7 @@ static pte_t xen_make_pte(pteval_t pte)
476 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) 476 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
477 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; 477 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
478 } 478 }
479 479#endif
480 /* 480 /*
481 * Unprivileged domains are allowed to do IOMAPpings for 481 * Unprivileged domains are allowed to do IOMAPpings for
482 * PCI passthrough, but not map ISA space. The ISA 482 * PCI passthrough, but not map ISA space. The ISA
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 041d4fe9dfe..501d4e0244b 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -409,6 +409,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
409 play_dead_common(); 409 play_dead_common();
410 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 410 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
411 cpu_bringup(); 411 cpu_bringup();
412 /*
413 * Balance out the preempt calls - as we are running in cpu_idle
414 * loop which has been called at bootup from cpu_bringup_and_idle.
415 * The cpucpu_bringup_and_idle called cpu_bringup which made a
416 * preempt_disable() So this preempt_enable will balance it out.
417 */
418 preempt_enable();
412} 419}
413 420
414#else /* !CONFIG_HOTPLUG_CPU */ 421#else /* !CONFIG_HOTPLUG_CPU */