diff options
author | Ingo Molnar <mingo@elte.hu> | 2012-03-13 11:32:54 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2012-03-13 11:33:03 -0400 |
commit | ef15eda98217f5183f457e7a2de8b79555ef908b (patch) | |
tree | f8f22b48f7bb237c9aa6646175f3e17eeac4af0e /arch/x86 | |
parent | 5cb4ac3a583d4ee18c8682ab857e093c4a0d0895 (diff) | |
parent | ef334a20d84f52407a8a2afd02ddeaecbef0ad3d (diff) |
Merge branch 'x86/cleanups' into perf/uprobes
Merge reason: We want to merge a dependent patch.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
70 files changed, 1733 insertions, 843 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 481dbfcf14ed..d2a540f7d6cb 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -2168,9 +2168,9 @@ config IA32_EMULATION | |||
2168 | depends on X86_64 | 2168 | depends on X86_64 |
2169 | select COMPAT_BINFMT_ELF | 2169 | select COMPAT_BINFMT_ELF |
2170 | ---help--- | 2170 | ---help--- |
2171 | Include code to run 32-bit programs under a 64-bit kernel. You should | 2171 | Include code to run legacy 32-bit programs under a |
2172 | likely turn this on, unless you're 100% sure that you don't have any | 2172 | 64-bit kernel. You should likely turn this on, unless you're |
2173 | 32-bit programs left. | 2173 | 100% sure that you don't have any 32-bit programs left. |
2174 | 2174 | ||
2175 | config IA32_AOUT | 2175 | config IA32_AOUT |
2176 | tristate "IA32 a.out support" | 2176 | tristate "IA32 a.out support" |
@@ -2178,9 +2178,22 @@ config IA32_AOUT | |||
2178 | ---help--- | 2178 | ---help--- |
2179 | Support old a.out binaries in the 32bit emulation. | 2179 | Support old a.out binaries in the 32bit emulation. |
2180 | 2180 | ||
2181 | config X86_X32 | ||
2182 | bool "x32 ABI for 64-bit mode (EXPERIMENTAL)" | ||
2183 | depends on X86_64 && IA32_EMULATION && EXPERIMENTAL | ||
2184 | ---help--- | ||
2185 | Include code to run binaries for the x32 native 32-bit ABI | ||
2186 | for 64-bit processors. An x32 process gets access to the | ||
2187 | full 64-bit register file and wide data path while leaving | ||
2188 | pointers at 32 bits for smaller memory footprint. | ||
2189 | |||
2190 | You will need a recent binutils (2.22 or later) with | ||
2191 | elf32_x86_64 support enabled to compile a kernel with this | ||
2192 | option set. | ||
2193 | |||
2181 | config COMPAT | 2194 | config COMPAT |
2182 | def_bool y | 2195 | def_bool y |
2183 | depends on IA32_EMULATION | 2196 | depends on IA32_EMULATION || X86_X32 |
2184 | 2197 | ||
2185 | config COMPAT_FOR_U64_ALIGNMENT | 2198 | config COMPAT_FOR_U64_ALIGNMENT |
2186 | def_bool COMPAT | 2199 | def_bool COMPAT |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 209ba1294592..968dbe24a255 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -82,6 +82,22 @@ ifdef CONFIG_CC_STACKPROTECTOR | |||
82 | endif | 82 | endif |
83 | endif | 83 | endif |
84 | 84 | ||
85 | ifdef CONFIG_X86_X32 | ||
86 | x32_ld_ok := $(call try-run,\ | ||
87 | /bin/echo -e '1: .quad 1b' | \ | ||
88 | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" - && \ | ||
89 | $(OBJCOPY) -O elf32-x86-64 "$$TMP" "$$TMPO" && \ | ||
90 | $(LD) -m elf32_x86_64 "$$TMPO" -o "$$TMP",y,n) | ||
91 | ifeq ($(x32_ld_ok),y) | ||
92 | CONFIG_X86_X32_ABI := y | ||
93 | KBUILD_AFLAGS += -DCONFIG_X86_X32_ABI | ||
94 | KBUILD_CFLAGS += -DCONFIG_X86_X32_ABI | ||
95 | else | ||
96 | $(warning CONFIG_X86_X32 enabled but no binutils support) | ||
97 | endif | ||
98 | endif | ||
99 | export CONFIG_X86_X32_ABI | ||
100 | |||
85 | # Don't unroll struct assignments with kmemcheck enabled | 101 | # Don't unroll struct assignments with kmemcheck enabled |
86 | ifeq ($(CONFIG_KMEMCHECK),y) | 102 | ifeq ($(CONFIG_KMEMCHECK),y) |
87 | KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) | 103 | KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) |
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 65577698cab2..45b4fdd4e1da 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c | |||
@@ -12,10 +12,8 @@ | |||
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/signal.h> | ||
16 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
17 | #include <linux/wait.h> | 16 | #include <linux/wait.h> |
18 | #include <linux/ptrace.h> | ||
19 | #include <linux/unistd.h> | 17 | #include <linux/unistd.h> |
20 | #include <linux/stddef.h> | 18 | #include <linux/stddef.h> |
21 | #include <linux/personality.h> | 19 | #include <linux/personality.h> |
@@ -31,20 +29,15 @@ | |||
31 | #include <asm/proto.h> | 29 | #include <asm/proto.h> |
32 | #include <asm/vdso.h> | 30 | #include <asm/vdso.h> |
33 | #include <asm/sigframe.h> | 31 | #include <asm/sigframe.h> |
32 | #include <asm/sighandling.h> | ||
34 | #include <asm/sys_ia32.h> | 33 | #include <asm/sys_ia32.h> |
35 | 34 | ||
36 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 35 | #define FIX_EFLAGS __FIX_EFLAGS |
37 | |||
38 | #define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ | ||
39 | X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \ | ||
40 | X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \ | ||
41 | X86_EFLAGS_CF) | ||
42 | |||
43 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where); | ||
44 | 36 | ||
45 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) | 37 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) |
46 | { | 38 | { |
47 | int err = 0; | 39 | int err = 0; |
40 | bool ia32 = !is_ia32_task(); | ||
48 | 41 | ||
49 | if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) | 42 | if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) |
50 | return -EFAULT; | 43 | return -EFAULT; |
@@ -74,8 +67,13 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) | |||
74 | case __SI_FAULT >> 16: | 67 | case __SI_FAULT >> 16: |
75 | break; | 68 | break; |
76 | case __SI_CHLD >> 16: | 69 | case __SI_CHLD >> 16: |
77 | put_user_ex(from->si_utime, &to->si_utime); | 70 | if (ia32) { |
78 | put_user_ex(from->si_stime, &to->si_stime); | 71 | put_user_ex(from->si_utime, &to->si_utime); |
72 | put_user_ex(from->si_stime, &to->si_stime); | ||
73 | } else { | ||
74 | put_user_ex(from->si_utime, &to->_sifields._sigchld_x32._utime); | ||
75 | put_user_ex(from->si_stime, &to->_sifields._sigchld_x32._stime); | ||
76 | } | ||
79 | put_user_ex(from->si_status, &to->si_status); | 77 | put_user_ex(from->si_status, &to->si_status); |
80 | /* FALL THROUGH */ | 78 | /* FALL THROUGH */ |
81 | default: | 79 | default: |
@@ -347,7 +345,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, | |||
347 | put_user_ex(regs->dx, &sc->dx); | 345 | put_user_ex(regs->dx, &sc->dx); |
348 | put_user_ex(regs->cx, &sc->cx); | 346 | put_user_ex(regs->cx, &sc->cx); |
349 | put_user_ex(regs->ax, &sc->ax); | 347 | put_user_ex(regs->ax, &sc->ax); |
350 | put_user_ex(current->thread.trap_no, &sc->trapno); | 348 | put_user_ex(current->thread.trap_nr, &sc->trapno); |
351 | put_user_ex(current->thread.error_code, &sc->err); | 349 | put_user_ex(current->thread.error_code, &sc->err); |
352 | put_user_ex(regs->ip, &sc->ip); | 350 | put_user_ex(regs->ip, &sc->ip); |
353 | put_user_ex(regs->cs, (unsigned int __user *)&sc->cs); | 351 | put_user_ex(regs->cs, (unsigned int __user *)&sc->cs); |
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index f6f5c53dc903..aec2202a596c 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c | |||
@@ -287,46 +287,6 @@ asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 __user *act, | |||
287 | return ret; | 287 | return ret; |
288 | } | 288 | } |
289 | 289 | ||
290 | asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, | ||
291 | compat_sigset_t __user *oset, | ||
292 | unsigned int sigsetsize) | ||
293 | { | ||
294 | sigset_t s; | ||
295 | compat_sigset_t s32; | ||
296 | int ret; | ||
297 | mm_segment_t old_fs = get_fs(); | ||
298 | |||
299 | if (set) { | ||
300 | if (copy_from_user(&s32, set, sizeof(compat_sigset_t))) | ||
301 | return -EFAULT; | ||
302 | switch (_NSIG_WORDS) { | ||
303 | case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32); | ||
304 | case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32); | ||
305 | case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32); | ||
306 | case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); | ||
307 | } | ||
308 | } | ||
309 | set_fs(KERNEL_DS); | ||
310 | ret = sys_rt_sigprocmask(how, | ||
311 | set ? (sigset_t __user *)&s : NULL, | ||
312 | oset ? (sigset_t __user *)&s : NULL, | ||
313 | sigsetsize); | ||
314 | set_fs(old_fs); | ||
315 | if (ret) | ||
316 | return ret; | ||
317 | if (oset) { | ||
318 | switch (_NSIG_WORDS) { | ||
319 | case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; | ||
320 | case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; | ||
321 | case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; | ||
322 | case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; | ||
323 | } | ||
324 | if (copy_to_user(oset, &s32, sizeof(compat_sigset_t))) | ||
325 | return -EFAULT; | ||
326 | } | ||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | asmlinkage long sys32_alarm(unsigned int seconds) | 290 | asmlinkage long sys32_alarm(unsigned int seconds) |
331 | { | 291 | { |
332 | return alarm_setitimer(seconds); | 292 | return alarm_setitimer(seconds); |
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index b57e6a43a37a..f9c0d3ba9e84 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild | |||
@@ -14,6 +14,7 @@ header-y += msr.h | |||
14 | header-y += mtrr.h | 14 | header-y += mtrr.h |
15 | header-y += posix_types_32.h | 15 | header-y += posix_types_32.h |
16 | header-y += posix_types_64.h | 16 | header-y += posix_types_64.h |
17 | header-y += posix_types_x32.h | ||
17 | header-y += prctl.h | 18 | header-y += prctl.h |
18 | header-y += processor-flags.h | 19 | header-y += processor-flags.h |
19 | header-y += ptrace-abi.h | 20 | header-y += ptrace-abi.h |
@@ -24,3 +25,4 @@ header-y += vsyscall.h | |||
24 | 25 | ||
25 | genhdr-y += unistd_32.h | 26 | genhdr-y += unistd_32.h |
26 | genhdr-y += unistd_64.h | 27 | genhdr-y += unistd_64.h |
28 | genhdr-y += unistd_x32.h | ||
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index 30d737ef2a42..d6805798d6fc 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h | |||
@@ -6,7 +6,9 @@ | |||
6 | */ | 6 | */ |
7 | #include <linux/types.h> | 7 | #include <linux/types.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <asm/processor.h> | ||
9 | #include <asm/user32.h> | 10 | #include <asm/user32.h> |
11 | #include <asm/unistd.h> | ||
10 | 12 | ||
11 | #define COMPAT_USER_HZ 100 | 13 | #define COMPAT_USER_HZ 100 |
12 | #define COMPAT_UTS_MACHINE "i686\0\0" | 14 | #define COMPAT_UTS_MACHINE "i686\0\0" |
@@ -186,7 +188,20 @@ struct compat_shmid64_ds { | |||
186 | /* | 188 | /* |
187 | * The type of struct elf_prstatus.pr_reg in compatible core dumps. | 189 | * The type of struct elf_prstatus.pr_reg in compatible core dumps. |
188 | */ | 190 | */ |
191 | #ifdef CONFIG_X86_X32_ABI | ||
192 | typedef struct user_regs_struct compat_elf_gregset_t; | ||
193 | |||
194 | #define PR_REG_SIZE(S) (test_thread_flag(TIF_IA32) ? 68 : 216) | ||
195 | #define PRSTATUS_SIZE(S) (test_thread_flag(TIF_IA32) ? 144 : 296) | ||
196 | #define SET_PR_FPVALID(S,V) \ | ||
197 | do { *(int *) (((void *) &((S)->pr_reg)) + PR_REG_SIZE(0)) = (V); } \ | ||
198 | while (0) | ||
199 | |||
200 | #define COMPAT_USE_64BIT_TIME \ | ||
201 | (!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT)) | ||
202 | #else | ||
189 | typedef struct user_regs_struct32 compat_elf_gregset_t; | 203 | typedef struct user_regs_struct32 compat_elf_gregset_t; |
204 | #endif | ||
190 | 205 | ||
191 | /* | 206 | /* |
192 | * A pointer passed in from user mode. This should not | 207 | * A pointer passed in from user mode. This should not |
@@ -208,13 +223,30 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) | |||
208 | 223 | ||
209 | static inline void __user *arch_compat_alloc_user_space(long len) | 224 | static inline void __user *arch_compat_alloc_user_space(long len) |
210 | { | 225 | { |
211 | struct pt_regs *regs = task_pt_regs(current); | 226 | compat_uptr_t sp; |
212 | return (void __user *)regs->sp - len; | 227 | |
228 | if (test_thread_flag(TIF_IA32)) { | ||
229 | sp = task_pt_regs(current)->sp; | ||
230 | } else { | ||
231 | /* -128 for the x32 ABI redzone */ | ||
232 | sp = percpu_read(old_rsp) - 128; | ||
233 | } | ||
234 | |||
235 | return (void __user *)round_down(sp - len, 16); | ||
236 | } | ||
237 | |||
238 | static inline bool is_x32_task(void) | ||
239 | { | ||
240 | #ifdef CONFIG_X86_X32_ABI | ||
241 | if (task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT) | ||
242 | return true; | ||
243 | #endif | ||
244 | return false; | ||
213 | } | 245 | } |
214 | 246 | ||
215 | static inline int is_compat_task(void) | 247 | static inline bool is_compat_task(void) |
216 | { | 248 | { |
217 | return current_thread_info()->status & TS_COMPAT; | 249 | return is_ia32_task() || is_x32_task(); |
218 | } | 250 | } |
219 | 251 | ||
220 | #endif /* _ASM_X86_COMPAT_H */ | 252 | #endif /* _ASM_X86_COMPAT_H */ |
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 5f962df30d0f..1e40634591a4 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h | |||
@@ -156,7 +156,12 @@ do { \ | |||
156 | #define elf_check_arch(x) \ | 156 | #define elf_check_arch(x) \ |
157 | ((x)->e_machine == EM_X86_64) | 157 | ((x)->e_machine == EM_X86_64) |
158 | 158 | ||
159 | #define compat_elf_check_arch(x) elf_check_arch_ia32(x) | 159 | #define compat_elf_check_arch(x) \ |
160 | (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64) | ||
161 | |||
162 | #if __USER32_DS != __USER_DS | ||
163 | # error "The following code assumes __USER32_DS == __USER_DS" | ||
164 | #endif | ||
160 | 165 | ||
161 | static inline void elf_common_init(struct thread_struct *t, | 166 | static inline void elf_common_init(struct thread_struct *t, |
162 | struct pt_regs *regs, const u16 ds) | 167 | struct pt_regs *regs, const u16 ds) |
@@ -179,8 +184,9 @@ static inline void elf_common_init(struct thread_struct *t, | |||
179 | void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp); | 184 | void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp); |
180 | #define compat_start_thread start_thread_ia32 | 185 | #define compat_start_thread start_thread_ia32 |
181 | 186 | ||
182 | void set_personality_ia32(void); | 187 | void set_personality_ia32(bool); |
183 | #define COMPAT_SET_PERSONALITY(ex) set_personality_ia32() | 188 | #define COMPAT_SET_PERSONALITY(ex) \ |
189 | set_personality_ia32((ex).e_machine == EM_X86_64) | ||
184 | 190 | ||
185 | #define COMPAT_ELF_PLATFORM ("i686") | 191 | #define COMPAT_ELF_PLATFORM ("i686") |
186 | 192 | ||
@@ -287,7 +293,7 @@ do { \ | |||
287 | #define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */ | 293 | #define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */ |
288 | 294 | ||
289 | /* 1GB for 64bit, 8MB for 32bit */ | 295 | /* 1GB for 64bit, 8MB for 32bit */ |
290 | #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) | 296 | #define STACK_RND_MASK (test_thread_flag(TIF_ADDR32) ? 0x7ff : 0x3fffff) |
291 | 297 | ||
292 | #define ARCH_DLINFO \ | 298 | #define ARCH_DLINFO \ |
293 | do { \ | 299 | do { \ |
@@ -296,9 +302,20 @@ do { \ | |||
296 | (unsigned long)current->mm->context.vdso); \ | 302 | (unsigned long)current->mm->context.vdso); \ |
297 | } while (0) | 303 | } while (0) |
298 | 304 | ||
305 | #define ARCH_DLINFO_X32 \ | ||
306 | do { \ | ||
307 | if (vdso_enabled) \ | ||
308 | NEW_AUX_ENT(AT_SYSINFO_EHDR, \ | ||
309 | (unsigned long)current->mm->context.vdso); \ | ||
310 | } while (0) | ||
311 | |||
299 | #define AT_SYSINFO 32 | 312 | #define AT_SYSINFO 32 |
300 | 313 | ||
301 | #define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(sysctl_vsyscall32) | 314 | #define COMPAT_ARCH_DLINFO \ |
315 | if (test_thread_flag(TIF_X32)) \ | ||
316 | ARCH_DLINFO_X32; \ | ||
317 | else \ | ||
318 | ARCH_DLINFO_IA32(sysctl_vsyscall32) | ||
302 | 319 | ||
303 | #define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) | 320 | #define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) |
304 | 321 | ||
@@ -314,6 +331,8 @@ struct linux_binprm; | |||
314 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 | 331 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 |
315 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | 332 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, |
316 | int uses_interp); | 333 | int uses_interp); |
334 | extern int x32_setup_additional_pages(struct linux_binprm *bprm, | ||
335 | int uses_interp); | ||
317 | 336 | ||
318 | extern int syscall32_setup_pages(struct linux_binprm *, int exstack); | 337 | extern int syscall32_setup_pages(struct linux_binprm *, int exstack); |
319 | #define compat_arch_setup_additional_pages syscall32_setup_pages | 338 | #define compat_arch_setup_additional_pages syscall32_setup_pages |
@@ -330,7 +349,7 @@ static inline int mmap_is_ia32(void) | |||
330 | return 1; | 349 | return 1; |
331 | #endif | 350 | #endif |
332 | #ifdef CONFIG_IA32_EMULATION | 351 | #ifdef CONFIG_IA32_EMULATION |
333 | if (test_thread_flag(TIF_IA32)) | 352 | if (test_thread_flag(TIF_ADDR32)) |
334 | return 1; | 353 | return 1; |
335 | #endif | 354 | #endif |
336 | return 0; | 355 | return 0; |
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 6919e936345b..247904945d3f 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -29,10 +29,11 @@ extern unsigned int sig_xstate_size; | |||
29 | extern void fpu_init(void); | 29 | extern void fpu_init(void); |
30 | extern void mxcsr_feature_mask_init(void); | 30 | extern void mxcsr_feature_mask_init(void); |
31 | extern int init_fpu(struct task_struct *child); | 31 | extern int init_fpu(struct task_struct *child); |
32 | extern asmlinkage void math_state_restore(void); | 32 | extern void math_state_restore(void); |
33 | extern void __math_state_restore(void); | ||
34 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); | 33 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); |
35 | 34 | ||
35 | DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); | ||
36 | |||
36 | extern user_regset_active_fn fpregs_active, xfpregs_active; | 37 | extern user_regset_active_fn fpregs_active, xfpregs_active; |
37 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, | 38 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, |
38 | xstateregs_get; | 39 | xstateregs_get; |
@@ -212,19 +213,11 @@ static inline void fpu_fxsave(struct fpu *fpu) | |||
212 | 213 | ||
213 | #endif /* CONFIG_X86_64 */ | 214 | #endif /* CONFIG_X86_64 */ |
214 | 215 | ||
215 | /* We need a safe address that is cheap to find and that is already | ||
216 | in L1 during context switch. The best choices are unfortunately | ||
217 | different for UP and SMP */ | ||
218 | #ifdef CONFIG_SMP | ||
219 | #define safe_address (__per_cpu_offset[0]) | ||
220 | #else | ||
221 | #define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER]) | ||
222 | #endif | ||
223 | |||
224 | /* | 216 | /* |
225 | * These must be called with preempt disabled | 217 | * These must be called with preempt disabled. Returns |
218 | * 'true' if the FPU state is still intact. | ||
226 | */ | 219 | */ |
227 | static inline void fpu_save_init(struct fpu *fpu) | 220 | static inline int fpu_save_init(struct fpu *fpu) |
228 | { | 221 | { |
229 | if (use_xsave()) { | 222 | if (use_xsave()) { |
230 | fpu_xsave(fpu); | 223 | fpu_xsave(fpu); |
@@ -233,33 +226,33 @@ static inline void fpu_save_init(struct fpu *fpu) | |||
233 | * xsave header may indicate the init state of the FP. | 226 | * xsave header may indicate the init state of the FP. |
234 | */ | 227 | */ |
235 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) | 228 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) |
236 | return; | 229 | return 1; |
237 | } else if (use_fxsr()) { | 230 | } else if (use_fxsr()) { |
238 | fpu_fxsave(fpu); | 231 | fpu_fxsave(fpu); |
239 | } else { | 232 | } else { |
240 | asm volatile("fnsave %[fx]; fwait" | 233 | asm volatile("fnsave %[fx]; fwait" |
241 | : [fx] "=m" (fpu->state->fsave)); | 234 | : [fx] "=m" (fpu->state->fsave)); |
242 | return; | 235 | return 0; |
243 | } | 236 | } |
244 | 237 | ||
245 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) | 238 | /* |
239 | * If exceptions are pending, we need to clear them so | ||
240 | * that we don't randomly get exceptions later. | ||
241 | * | ||
242 | * FIXME! Is this perhaps only true for the old-style | ||
243 | * irq13 case? Maybe we could leave the x87 state | ||
244 | * intact otherwise? | ||
245 | */ | ||
246 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) { | ||
246 | asm volatile("fnclex"); | 247 | asm volatile("fnclex"); |
247 | 248 | return 0; | |
248 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | 249 | } |
249 | is pending. Clear the x87 state here by setting it to fixed | 250 | return 1; |
250 | values. safe_address is a random variable that should be in L1 */ | ||
251 | alternative_input( | ||
252 | ASM_NOP8 ASM_NOP2, | ||
253 | "emms\n\t" /* clear stack tags */ | ||
254 | "fildl %P[addr]", /* set F?P to defined value */ | ||
255 | X86_FEATURE_FXSAVE_LEAK, | ||
256 | [addr] "m" (safe_address)); | ||
257 | } | 251 | } |
258 | 252 | ||
259 | static inline void __save_init_fpu(struct task_struct *tsk) | 253 | static inline int __save_init_fpu(struct task_struct *tsk) |
260 | { | 254 | { |
261 | fpu_save_init(&tsk->thread.fpu); | 255 | return fpu_save_init(&tsk->thread.fpu); |
262 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
263 | } | 256 | } |
264 | 257 | ||
265 | static inline int fpu_fxrstor_checking(struct fpu *fpu) | 258 | static inline int fpu_fxrstor_checking(struct fpu *fpu) |
@@ -277,44 +270,212 @@ static inline int fpu_restore_checking(struct fpu *fpu) | |||
277 | 270 | ||
278 | static inline int restore_fpu_checking(struct task_struct *tsk) | 271 | static inline int restore_fpu_checking(struct task_struct *tsk) |
279 | { | 272 | { |
273 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
274 | is pending. Clear the x87 state here by setting it to fixed | ||
275 | values. "m" is a random variable that should be in L1 */ | ||
276 | alternative_input( | ||
277 | ASM_NOP8 ASM_NOP2, | ||
278 | "emms\n\t" /* clear stack tags */ | ||
279 | "fildl %P[addr]", /* set F?P to defined value */ | ||
280 | X86_FEATURE_FXSAVE_LEAK, | ||
281 | [addr] "m" (tsk->thread.fpu.has_fpu)); | ||
282 | |||
280 | return fpu_restore_checking(&tsk->thread.fpu); | 283 | return fpu_restore_checking(&tsk->thread.fpu); |
281 | } | 284 | } |
282 | 285 | ||
283 | /* | 286 | /* |
284 | * Signal frame handlers... | 287 | * Software FPU state helpers. Careful: these need to |
288 | * be preemption protection *and* they need to be | ||
289 | * properly paired with the CR0.TS changes! | ||
285 | */ | 290 | */ |
286 | extern int save_i387_xstate(void __user *buf); | 291 | static inline int __thread_has_fpu(struct task_struct *tsk) |
287 | extern int restore_i387_xstate(void __user *buf); | 292 | { |
293 | return tsk->thread.fpu.has_fpu; | ||
294 | } | ||
288 | 295 | ||
289 | static inline void __unlazy_fpu(struct task_struct *tsk) | 296 | /* Must be paired with an 'stts' after! */ |
297 | static inline void __thread_clear_has_fpu(struct task_struct *tsk) | ||
290 | { | 298 | { |
291 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 299 | tsk->thread.fpu.has_fpu = 0; |
292 | __save_init_fpu(tsk); | 300 | percpu_write(fpu_owner_task, NULL); |
293 | stts(); | 301 | } |
294 | } else | 302 | |
295 | tsk->fpu_counter = 0; | 303 | /* Must be paired with a 'clts' before! */ |
304 | static inline void __thread_set_has_fpu(struct task_struct *tsk) | ||
305 | { | ||
306 | tsk->thread.fpu.has_fpu = 1; | ||
307 | percpu_write(fpu_owner_task, tsk); | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Encapsulate the CR0.TS handling together with the | ||
312 | * software flag. | ||
313 | * | ||
314 | * These generally need preemption protection to work, | ||
315 | * do try to avoid using these on their own. | ||
316 | */ | ||
317 | static inline void __thread_fpu_end(struct task_struct *tsk) | ||
318 | { | ||
319 | __thread_clear_has_fpu(tsk); | ||
320 | stts(); | ||
321 | } | ||
322 | |||
323 | static inline void __thread_fpu_begin(struct task_struct *tsk) | ||
324 | { | ||
325 | clts(); | ||
326 | __thread_set_has_fpu(tsk); | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * FPU state switching for scheduling. | ||
331 | * | ||
332 | * This is a two-stage process: | ||
333 | * | ||
334 | * - switch_fpu_prepare() saves the old state and | ||
335 | * sets the new state of the CR0.TS bit. This is | ||
336 | * done within the context of the old process. | ||
337 | * | ||
338 | * - switch_fpu_finish() restores the new state as | ||
339 | * necessary. | ||
340 | */ | ||
341 | typedef struct { int preload; } fpu_switch_t; | ||
342 | |||
343 | /* | ||
344 | * FIXME! We could do a totally lazy restore, but we need to | ||
345 | * add a per-cpu "this was the task that last touched the FPU | ||
346 | * on this CPU" variable, and the task needs to have a "I last | ||
347 | * touched the FPU on this CPU" and check them. | ||
348 | * | ||
349 | * We don't do that yet, so "fpu_lazy_restore()" always returns | ||
350 | * false, but some day.. | ||
351 | */ | ||
352 | static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) | ||
353 | { | ||
354 | return new == percpu_read_stable(fpu_owner_task) && | ||
355 | cpu == new->thread.fpu.last_cpu; | ||
356 | } | ||
357 | |||
358 | static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) | ||
359 | { | ||
360 | fpu_switch_t fpu; | ||
361 | |||
362 | fpu.preload = tsk_used_math(new) && new->fpu_counter > 5; | ||
363 | if (__thread_has_fpu(old)) { | ||
364 | if (!__save_init_fpu(old)) | ||
365 | cpu = ~0; | ||
366 | old->thread.fpu.last_cpu = cpu; | ||
367 | old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */ | ||
368 | |||
369 | /* Don't change CR0.TS if we just switch! */ | ||
370 | if (fpu.preload) { | ||
371 | new->fpu_counter++; | ||
372 | __thread_set_has_fpu(new); | ||
373 | prefetch(new->thread.fpu.state); | ||
374 | } else | ||
375 | stts(); | ||
376 | } else { | ||
377 | old->fpu_counter = 0; | ||
378 | old->thread.fpu.last_cpu = ~0; | ||
379 | if (fpu.preload) { | ||
380 | new->fpu_counter++; | ||
381 | if (fpu_lazy_restore(new, cpu)) | ||
382 | fpu.preload = 0; | ||
383 | else | ||
384 | prefetch(new->thread.fpu.state); | ||
385 | __thread_fpu_begin(new); | ||
386 | } | ||
387 | } | ||
388 | return fpu; | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * By the time this gets called, we've already cleared CR0.TS and | ||
393 | * given the process the FPU if we are going to preload the FPU | ||
394 | * state - all we need to do is to conditionally restore the register | ||
395 | * state itself. | ||
396 | */ | ||
397 | static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) | ||
398 | { | ||
399 | if (fpu.preload) { | ||
400 | if (unlikely(restore_fpu_checking(new))) | ||
401 | __thread_fpu_end(new); | ||
402 | } | ||
296 | } | 403 | } |
297 | 404 | ||
405 | /* | ||
406 | * Signal frame handlers... | ||
407 | */ | ||
408 | extern int save_i387_xstate(void __user *buf); | ||
409 | extern int restore_i387_xstate(void __user *buf); | ||
410 | |||
298 | static inline void __clear_fpu(struct task_struct *tsk) | 411 | static inline void __clear_fpu(struct task_struct *tsk) |
299 | { | 412 | { |
300 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 413 | if (__thread_has_fpu(tsk)) { |
301 | /* Ignore delayed exceptions from user space */ | 414 | /* Ignore delayed exceptions from user space */ |
302 | asm volatile("1: fwait\n" | 415 | asm volatile("1: fwait\n" |
303 | "2:\n" | 416 | "2:\n" |
304 | _ASM_EXTABLE(1b, 2b)); | 417 | _ASM_EXTABLE(1b, 2b)); |
305 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 418 | __thread_fpu_end(tsk); |
306 | stts(); | ||
307 | } | 419 | } |
308 | } | 420 | } |
309 | 421 | ||
422 | /* | ||
423 | * Were we in an interrupt that interrupted kernel mode? | ||
424 | * | ||
425 | * We can do a kernel_fpu_begin/end() pair *ONLY* if that | ||
426 | * pair does nothing at all: the thread must not have fpu (so | ||
427 | * that we don't try to save the FPU state), and TS must | ||
428 | * be set (so that the clts/stts pair does nothing that is | ||
429 | * visible in the interrupted kernel thread). | ||
430 | */ | ||
431 | static inline bool interrupted_kernel_fpu_idle(void) | ||
432 | { | ||
433 | return !__thread_has_fpu(current) && | ||
434 | (read_cr0() & X86_CR0_TS); | ||
435 | } | ||
436 | |||
437 | /* | ||
438 | * Were we in user mode (or vm86 mode) when we were | ||
439 | * interrupted? | ||
440 | * | ||
441 | * Doing kernel_fpu_begin/end() is ok if we are running | ||
442 | * in an interrupt context from user mode - we'll just | ||
443 | * save the FPU state as required. | ||
444 | */ | ||
445 | static inline bool interrupted_user_mode(void) | ||
446 | { | ||
447 | struct pt_regs *regs = get_irq_regs(); | ||
448 | return regs && user_mode_vm(regs); | ||
449 | } | ||
450 | |||
451 | /* | ||
452 | * Can we use the FPU in kernel mode with the | ||
453 | * whole "kernel_fpu_begin/end()" sequence? | ||
454 | * | ||
455 | * It's always ok in process context (ie "not interrupt") | ||
456 | * but it is sometimes ok even from an irq. | ||
457 | */ | ||
458 | static inline bool irq_fpu_usable(void) | ||
459 | { | ||
460 | return !in_interrupt() || | ||
461 | interrupted_user_mode() || | ||
462 | interrupted_kernel_fpu_idle(); | ||
463 | } | ||
464 | |||
310 | static inline void kernel_fpu_begin(void) | 465 | static inline void kernel_fpu_begin(void) |
311 | { | 466 | { |
312 | struct thread_info *me = current_thread_info(); | 467 | struct task_struct *me = current; |
468 | |||
469 | WARN_ON_ONCE(!irq_fpu_usable()); | ||
313 | preempt_disable(); | 470 | preempt_disable(); |
314 | if (me->status & TS_USEDFPU) | 471 | if (__thread_has_fpu(me)) { |
315 | __save_init_fpu(me->task); | 472 | __save_init_fpu(me); |
316 | else | 473 | __thread_clear_has_fpu(me); |
474 | /* We do 'stts()' in kernel_fpu_end() */ | ||
475 | } else { | ||
476 | percpu_write(fpu_owner_task, NULL); | ||
317 | clts(); | 477 | clts(); |
478 | } | ||
318 | } | 479 | } |
319 | 480 | ||
320 | static inline void kernel_fpu_end(void) | 481 | static inline void kernel_fpu_end(void) |
@@ -323,14 +484,6 @@ static inline void kernel_fpu_end(void) | |||
323 | preempt_enable(); | 484 | preempt_enable(); |
324 | } | 485 | } |
325 | 486 | ||
326 | static inline bool irq_fpu_usable(void) | ||
327 | { | ||
328 | struct pt_regs *regs; | ||
329 | |||
330 | return !in_interrupt() || !(regs = get_irq_regs()) || \ | ||
331 | user_mode(regs) || (read_cr0() & X86_CR0_TS); | ||
332 | } | ||
333 | |||
334 | /* | 487 | /* |
335 | * Some instructions like VIA's padlock instructions generate a spurious | 488 | * Some instructions like VIA's padlock instructions generate a spurious |
336 | * DNA fault but don't modify SSE registers. And these instructions | 489 | * DNA fault but don't modify SSE registers. And these instructions |
@@ -363,20 +516,64 @@ static inline void irq_ts_restore(int TS_state) | |||
363 | } | 516 | } |
364 | 517 | ||
365 | /* | 518 | /* |
519 | * The question "does this thread have fpu access?" | ||
520 | * is slightly racy, since preemption could come in | ||
521 | * and revoke it immediately after the test. | ||
522 | * | ||
523 | * However, even in that very unlikely scenario, | ||
524 | * we can just assume we have FPU access - typically | ||
525 | * to save the FP state - we'll just take a #NM | ||
526 | * fault and get the FPU access back. | ||
527 | * | ||
528 | * The actual user_fpu_begin/end() functions | ||
529 | * need to be preemption-safe, though. | ||
530 | * | ||
531 | * NOTE! user_fpu_end() must be used only after you | ||
532 | * have saved the FP state, and user_fpu_begin() must | ||
533 | * be used only immediately before restoring it. | ||
534 | * These functions do not do any save/restore on | ||
535 | * their own. | ||
536 | */ | ||
537 | static inline int user_has_fpu(void) | ||
538 | { | ||
539 | return __thread_has_fpu(current); | ||
540 | } | ||
541 | |||
542 | static inline void user_fpu_end(void) | ||
543 | { | ||
544 | preempt_disable(); | ||
545 | __thread_fpu_end(current); | ||
546 | preempt_enable(); | ||
547 | } | ||
548 | |||
549 | static inline void user_fpu_begin(void) | ||
550 | { | ||
551 | preempt_disable(); | ||
552 | if (!user_has_fpu()) | ||
553 | __thread_fpu_begin(current); | ||
554 | preempt_enable(); | ||
555 | } | ||
556 | |||
557 | /* | ||
366 | * These disable preemption on their own and are safe | 558 | * These disable preemption on their own and are safe |
367 | */ | 559 | */ |
368 | static inline void save_init_fpu(struct task_struct *tsk) | 560 | static inline void save_init_fpu(struct task_struct *tsk) |
369 | { | 561 | { |
562 | WARN_ON_ONCE(!__thread_has_fpu(tsk)); | ||
370 | preempt_disable(); | 563 | preempt_disable(); |
371 | __save_init_fpu(tsk); | 564 | __save_init_fpu(tsk); |
372 | stts(); | 565 | __thread_fpu_end(tsk); |
373 | preempt_enable(); | 566 | preempt_enable(); |
374 | } | 567 | } |
375 | 568 | ||
376 | static inline void unlazy_fpu(struct task_struct *tsk) | 569 | static inline void unlazy_fpu(struct task_struct *tsk) |
377 | { | 570 | { |
378 | preempt_disable(); | 571 | preempt_disable(); |
379 | __unlazy_fpu(tsk); | 572 | if (__thread_has_fpu(tsk)) { |
573 | __save_init_fpu(tsk); | ||
574 | __thread_fpu_end(tsk); | ||
575 | } else | ||
576 | tsk->fpu_counter = 0; | ||
380 | preempt_enable(); | 577 | preempt_enable(); |
381 | } | 578 | } |
382 | 579 | ||
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h index 1f7e62517284..7d0c18587709 100644 --- a/arch/x86/include/asm/ia32.h +++ b/arch/x86/include/asm/ia32.h | |||
@@ -43,6 +43,15 @@ struct ucontext_ia32 { | |||
43 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | 43 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ |
44 | }; | 44 | }; |
45 | 45 | ||
46 | struct ucontext_x32 { | ||
47 | unsigned int uc_flags; | ||
48 | unsigned int uc_link; | ||
49 | stack_ia32_t uc_stack; | ||
50 | unsigned int uc__pad0; /* needed for alignment */ | ||
51 | struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */ | ||
52 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | ||
53 | }; | ||
54 | |||
46 | /* This matches struct stat64 in glibc2.2, hence the absolutely | 55 | /* This matches struct stat64 in glibc2.2, hence the absolutely |
47 | * insane amounts of padding around dev_t's. | 56 | * insane amounts of padding around dev_t's. |
48 | */ | 57 | */ |
@@ -116,6 +125,15 @@ typedef struct compat_siginfo { | |||
116 | compat_clock_t _stime; | 125 | compat_clock_t _stime; |
117 | } _sigchld; | 126 | } _sigchld; |
118 | 127 | ||
128 | /* SIGCHLD (x32 version) */ | ||
129 | struct { | ||
130 | unsigned int _pid; /* which child */ | ||
131 | unsigned int _uid; /* sender's uid */ | ||
132 | int _status; /* exit code */ | ||
133 | s64 _utime; | ||
134 | s64 _stime; | ||
135 | } _sigchld_x32; | ||
136 | |||
119 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ | 137 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ |
120 | struct { | 138 | struct { |
121 | unsigned int _addr; /* faulting insn/memory ref. */ | 139 | unsigned int _addr; /* faulting insn/memory ref. */ |
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index ab4092e3214e..7b9cfc4878af 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -190,6 +190,9 @@ struct x86_emulate_ops { | |||
190 | int (*intercept)(struct x86_emulate_ctxt *ctxt, | 190 | int (*intercept)(struct x86_emulate_ctxt *ctxt, |
191 | struct x86_instruction_info *info, | 191 | struct x86_instruction_info *info, |
192 | enum x86_intercept_stage stage); | 192 | enum x86_intercept_stage stage); |
193 | |||
194 | bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, | ||
195 | u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); | ||
193 | }; | 196 | }; |
194 | 197 | ||
195 | typedef u32 __attribute__((vector_size(16))) sse128_t; | 198 | typedef u32 __attribute__((vector_size(16))) sse128_t; |
@@ -298,6 +301,19 @@ struct x86_emulate_ctxt { | |||
298 | #define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \ | 301 | #define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \ |
299 | X86EMUL_MODE_PROT64) | 302 | X86EMUL_MODE_PROT64) |
300 | 303 | ||
304 | /* CPUID vendors */ | ||
305 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 | ||
306 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 | ||
307 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65 | ||
308 | |||
309 | #define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41 | ||
310 | #define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574 | ||
311 | #define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273 | ||
312 | |||
313 | #define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547 | ||
314 | #define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e | ||
315 | #define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69 | ||
316 | |||
301 | enum x86_intercept_stage { | 317 | enum x86_intercept_stage { |
302 | X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */ | 318 | X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */ |
303 | X86_ICPT_PRE_EXCEPT, | 319 | X86_ICPT_PRE_EXCEPT, |
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index 4365ffdb461f..7e3f17f92c66 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h | |||
@@ -29,18 +29,18 @@ | |||
29 | 29 | ||
30 | #define MTRR_IOCTL_BASE 'M' | 30 | #define MTRR_IOCTL_BASE 'M' |
31 | 31 | ||
32 | struct mtrr_sentry { | ||
33 | unsigned long base; /* Base address */ | ||
34 | unsigned int size; /* Size of region */ | ||
35 | unsigned int type; /* Type of region */ | ||
36 | }; | ||
37 | |||
38 | /* Warning: this structure has a different order from i386 | 32 | /* Warning: this structure has a different order from i386 |
39 | on x86-64. The 32bit emulation code takes care of that. | 33 | on x86-64. The 32bit emulation code takes care of that. |
40 | But you need to use this for 64bit, otherwise your X server | 34 | But you need to use this for 64bit, otherwise your X server |
41 | will break. */ | 35 | will break. */ |
42 | 36 | ||
43 | #ifdef __i386__ | 37 | #ifdef __i386__ |
38 | struct mtrr_sentry { | ||
39 | unsigned long base; /* Base address */ | ||
40 | unsigned int size; /* Size of region */ | ||
41 | unsigned int type; /* Type of region */ | ||
42 | }; | ||
43 | |||
44 | struct mtrr_gentry { | 44 | struct mtrr_gentry { |
45 | unsigned int regnum; /* Register number */ | 45 | unsigned int regnum; /* Register number */ |
46 | unsigned long base; /* Base address */ | 46 | unsigned long base; /* Base address */ |
@@ -50,12 +50,20 @@ struct mtrr_gentry { | |||
50 | 50 | ||
51 | #else /* __i386__ */ | 51 | #else /* __i386__ */ |
52 | 52 | ||
53 | struct mtrr_sentry { | ||
54 | __u64 base; /* Base address */ | ||
55 | __u32 size; /* Size of region */ | ||
56 | __u32 type; /* Type of region */ | ||
57 | }; | ||
58 | |||
53 | struct mtrr_gentry { | 59 | struct mtrr_gentry { |
54 | unsigned long base; /* Base address */ | 60 | __u64 base; /* Base address */ |
55 | unsigned int size; /* Size of region */ | 61 | __u32 size; /* Size of region */ |
56 | unsigned int regnum; /* Register number */ | 62 | __u32 regnum; /* Register number */ |
57 | unsigned int type; /* Type of region */ | 63 | __u32 type; /* Type of region */ |
64 | __u32 _pad; /* Unused */ | ||
58 | }; | 65 | }; |
66 | |||
59 | #endif /* !__i386__ */ | 67 | #endif /* !__i386__ */ |
60 | 68 | ||
61 | struct mtrr_var_range { | 69 | struct mtrr_var_range { |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 9b922c136254..e8fb2c7a5f4f 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -240,4 +240,12 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) | |||
240 | static inline void perf_events_lapic_init(void) { } | 240 | static inline void perf_events_lapic_init(void) { } |
241 | #endif | 241 | #endif |
242 | 242 | ||
243 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) | ||
244 | extern void amd_pmu_enable_virt(void); | ||
245 | extern void amd_pmu_disable_virt(void); | ||
246 | #else | ||
247 | static inline void amd_pmu_enable_virt(void) { } | ||
248 | static inline void amd_pmu_disable_virt(void) { } | ||
249 | #endif | ||
250 | |||
243 | #endif /* _ASM_X86_PERF_EVENT_H */ | 251 | #endif /* _ASM_X86_PERF_EVENT_H */ |
diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h index bb7133dc155d..3427b7798dbc 100644 --- a/arch/x86/include/asm/posix_types.h +++ b/arch/x86/include/asm/posix_types.h | |||
@@ -7,7 +7,9 @@ | |||
7 | #else | 7 | #else |
8 | # ifdef __i386__ | 8 | # ifdef __i386__ |
9 | # include "posix_types_32.h" | 9 | # include "posix_types_32.h" |
10 | # else | 10 | # elif defined(__LP64__) |
11 | # include "posix_types_64.h" | 11 | # include "posix_types_64.h" |
12 | # else | ||
13 | # include "posix_types_x32.h" | ||
12 | # endif | 14 | # endif |
13 | #endif | 15 | #endif |
diff --git a/arch/x86/include/asm/posix_types_32.h b/arch/x86/include/asm/posix_types_32.h index f7d9adf82e53..99f262e04b91 100644 --- a/arch/x86/include/asm/posix_types_32.h +++ b/arch/x86/include/asm/posix_types_32.h | |||
@@ -7,79 +7,22 @@ | |||
7 | * assume GCC is being used. | 7 | * assume GCC is being used. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | typedef unsigned long __kernel_ino_t; | ||
11 | typedef unsigned short __kernel_mode_t; | 10 | typedef unsigned short __kernel_mode_t; |
11 | #define __kernel_mode_t __kernel_mode_t | ||
12 | |||
12 | typedef unsigned short __kernel_nlink_t; | 13 | typedef unsigned short __kernel_nlink_t; |
13 | typedef long __kernel_off_t; | 14 | #define __kernel_nlink_t __kernel_nlink_t |
14 | typedef int __kernel_pid_t; | 15 | |
15 | typedef unsigned short __kernel_ipc_pid_t; | 16 | typedef unsigned short __kernel_ipc_pid_t; |
17 | #define __kernel_ipc_pid_t __kernel_ipc_pid_t | ||
18 | |||
16 | typedef unsigned short __kernel_uid_t; | 19 | typedef unsigned short __kernel_uid_t; |
17 | typedef unsigned short __kernel_gid_t; | 20 | typedef unsigned short __kernel_gid_t; |
18 | typedef unsigned int __kernel_size_t; | 21 | #define __kernel_uid_t __kernel_uid_t |
19 | typedef int __kernel_ssize_t; | ||
20 | typedef int __kernel_ptrdiff_t; | ||
21 | typedef long __kernel_time_t; | ||
22 | typedef long __kernel_suseconds_t; | ||
23 | typedef long __kernel_clock_t; | ||
24 | typedef int __kernel_timer_t; | ||
25 | typedef int __kernel_clockid_t; | ||
26 | typedef int __kernel_daddr_t; | ||
27 | typedef char * __kernel_caddr_t; | ||
28 | typedef unsigned short __kernel_uid16_t; | ||
29 | typedef unsigned short __kernel_gid16_t; | ||
30 | typedef unsigned int __kernel_uid32_t; | ||
31 | typedef unsigned int __kernel_gid32_t; | ||
32 | 22 | ||
33 | typedef unsigned short __kernel_old_uid_t; | ||
34 | typedef unsigned short __kernel_old_gid_t; | ||
35 | typedef unsigned short __kernel_old_dev_t; | 23 | typedef unsigned short __kernel_old_dev_t; |
24 | #define __kernel_old_dev_t __kernel_old_dev_t | ||
36 | 25 | ||
37 | #ifdef __GNUC__ | 26 | #include <asm-generic/posix_types.h> |
38 | typedef long long __kernel_loff_t; | ||
39 | #endif | ||
40 | |||
41 | typedef struct { | ||
42 | int val[2]; | ||
43 | } __kernel_fsid_t; | ||
44 | |||
45 | #if defined(__KERNEL__) | ||
46 | |||
47 | #undef __FD_SET | ||
48 | #define __FD_SET(fd,fdsetp) \ | ||
49 | asm volatile("btsl %1,%0": \ | ||
50 | "+m" (*(__kernel_fd_set *)(fdsetp)) \ | ||
51 | : "r" ((int)(fd))) | ||
52 | |||
53 | #undef __FD_CLR | ||
54 | #define __FD_CLR(fd,fdsetp) \ | ||
55 | asm volatile("btrl %1,%0": \ | ||
56 | "+m" (*(__kernel_fd_set *)(fdsetp)) \ | ||
57 | : "r" ((int) (fd))) | ||
58 | |||
59 | #undef __FD_ISSET | ||
60 | #define __FD_ISSET(fd,fdsetp) \ | ||
61 | (__extension__ \ | ||
62 | ({ \ | ||
63 | unsigned char __result; \ | ||
64 | asm volatile("btl %1,%2 ; setb %0" \ | ||
65 | : "=q" (__result) \ | ||
66 | : "r" ((int)(fd)), \ | ||
67 | "m" (*(__kernel_fd_set *)(fdsetp))); \ | ||
68 | __result; \ | ||
69 | })) | ||
70 | |||
71 | #undef __FD_ZERO | ||
72 | #define __FD_ZERO(fdsetp) \ | ||
73 | do { \ | ||
74 | int __d0, __d1; \ | ||
75 | asm volatile("cld ; rep ; stosl" \ | ||
76 | : "=m" (*(__kernel_fd_set *)(fdsetp)), \ | ||
77 | "=&c" (__d0), "=&D" (__d1) \ | ||
78 | : "a" (0), "1" (__FDSET_LONGS), \ | ||
79 | "2" ((__kernel_fd_set *)(fdsetp)) \ | ||
80 | : "memory"); \ | ||
81 | } while (0) | ||
82 | |||
83 | #endif /* defined(__KERNEL__) */ | ||
84 | 27 | ||
85 | #endif /* _ASM_X86_POSIX_TYPES_32_H */ | 28 | #endif /* _ASM_X86_POSIX_TYPES_32_H */ |
diff --git a/arch/x86/include/asm/posix_types_64.h b/arch/x86/include/asm/posix_types_64.h index eb8d2d92b63e..cba0c1ead162 100644 --- a/arch/x86/include/asm/posix_types_64.h +++ b/arch/x86/include/asm/posix_types_64.h | |||
@@ -7,113 +7,13 @@ | |||
7 | * assume GCC is being used. | 7 | * assume GCC is being used. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | typedef unsigned long __kernel_ino_t; | ||
11 | typedef unsigned int __kernel_mode_t; | ||
12 | typedef unsigned long __kernel_nlink_t; | ||
13 | typedef long __kernel_off_t; | ||
14 | typedef int __kernel_pid_t; | ||
15 | typedef int __kernel_ipc_pid_t; | ||
16 | typedef unsigned int __kernel_uid_t; | ||
17 | typedef unsigned int __kernel_gid_t; | ||
18 | typedef unsigned long __kernel_size_t; | ||
19 | typedef long __kernel_ssize_t; | ||
20 | typedef long __kernel_ptrdiff_t; | ||
21 | typedef long __kernel_time_t; | ||
22 | typedef long __kernel_suseconds_t; | ||
23 | typedef long __kernel_clock_t; | ||
24 | typedef int __kernel_timer_t; | ||
25 | typedef int __kernel_clockid_t; | ||
26 | typedef int __kernel_daddr_t; | ||
27 | typedef char * __kernel_caddr_t; | ||
28 | typedef unsigned short __kernel_uid16_t; | ||
29 | typedef unsigned short __kernel_gid16_t; | ||
30 | |||
31 | #ifdef __GNUC__ | ||
32 | typedef long long __kernel_loff_t; | ||
33 | #endif | ||
34 | |||
35 | typedef struct { | ||
36 | int val[2]; | ||
37 | } __kernel_fsid_t; | ||
38 | |||
39 | typedef unsigned short __kernel_old_uid_t; | 10 | typedef unsigned short __kernel_old_uid_t; |
40 | typedef unsigned short __kernel_old_gid_t; | 11 | typedef unsigned short __kernel_old_gid_t; |
41 | typedef __kernel_uid_t __kernel_uid32_t; | 12 | #define __kernel_old_uid_t __kernel_old_uid_t |
42 | typedef __kernel_gid_t __kernel_gid32_t; | ||
43 | 13 | ||
44 | typedef unsigned long __kernel_old_dev_t; | 14 | typedef unsigned long __kernel_old_dev_t; |
15 | #define __kernel_old_dev_t __kernel_old_dev_t | ||
45 | 16 | ||
46 | #ifdef __KERNEL__ | 17 | #include <asm-generic/posix_types.h> |
47 | |||
48 | #undef __FD_SET | ||
49 | static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) | ||
50 | { | ||
51 | unsigned long _tmp = fd / __NFDBITS; | ||
52 | unsigned long _rem = fd % __NFDBITS; | ||
53 | fdsetp->fds_bits[_tmp] |= (1UL<<_rem); | ||
54 | } | ||
55 | |||
56 | #undef __FD_CLR | ||
57 | static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) | ||
58 | { | ||
59 | unsigned long _tmp = fd / __NFDBITS; | ||
60 | unsigned long _rem = fd % __NFDBITS; | ||
61 | fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem); | ||
62 | } | ||
63 | |||
64 | #undef __FD_ISSET | ||
65 | static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) | ||
66 | { | ||
67 | unsigned long _tmp = fd / __NFDBITS; | ||
68 | unsigned long _rem = fd % __NFDBITS; | ||
69 | return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0; | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * This will unroll the loop for the normal constant cases (8 or 32 longs, | ||
74 | * for 256 and 1024-bit fd_sets respectively) | ||
75 | */ | ||
76 | #undef __FD_ZERO | ||
77 | static inline void __FD_ZERO(__kernel_fd_set *p) | ||
78 | { | ||
79 | unsigned long *tmp = p->fds_bits; | ||
80 | int i; | ||
81 | |||
82 | if (__builtin_constant_p(__FDSET_LONGS)) { | ||
83 | switch (__FDSET_LONGS) { | ||
84 | case 32: | ||
85 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; | ||
86 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; | ||
87 | tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; | ||
88 | tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; | ||
89 | tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0; | ||
90 | tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0; | ||
91 | tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0; | ||
92 | tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0; | ||
93 | return; | ||
94 | case 16: | ||
95 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; | ||
96 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; | ||
97 | tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; | ||
98 | tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; | ||
99 | return; | ||
100 | case 8: | ||
101 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; | ||
102 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; | ||
103 | return; | ||
104 | case 4: | ||
105 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; | ||
106 | return; | ||
107 | } | ||
108 | } | ||
109 | i = __FDSET_LONGS; | ||
110 | while (i) { | ||
111 | i--; | ||
112 | *tmp = 0; | ||
113 | tmp++; | ||
114 | } | ||
115 | } | ||
116 | |||
117 | #endif /* defined(__KERNEL__) */ | ||
118 | 18 | ||
119 | #endif /* _ASM_X86_POSIX_TYPES_64_H */ | 19 | #endif /* _ASM_X86_POSIX_TYPES_64_H */ |
diff --git a/arch/x86/include/asm/posix_types_x32.h b/arch/x86/include/asm/posix_types_x32.h new file mode 100644 index 000000000000..85f9bdafa93c --- /dev/null +++ b/arch/x86/include/asm/posix_types_x32.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef _ASM_X86_POSIX_TYPES_X32_H | ||
2 | #define _ASM_X86_POSIX_TYPES_X32_H | ||
3 | |||
4 | /* | ||
5 | * This file is only used by user-level software, so you need to | ||
6 | * be a little careful about namespace pollution etc. Also, we cannot | ||
7 | * assume GCC is being used. | ||
8 | * | ||
9 | * These types should generally match the ones used by the 64-bit kernel, | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | typedef long long __kernel_long_t; | ||
14 | typedef unsigned long long __kernel_ulong_t; | ||
15 | #define __kernel_long_t __kernel_long_t | ||
16 | |||
17 | #include <asm/posix_types_64.h> | ||
18 | |||
19 | #endif /* _ASM_X86_POSIX_TYPES_X32_H */ | ||
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index aa9088c26931..f6d0d2eb0832 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -374,6 +374,8 @@ union thread_xstate { | |||
374 | }; | 374 | }; |
375 | 375 | ||
376 | struct fpu { | 376 | struct fpu { |
377 | unsigned int last_cpu; | ||
378 | unsigned int has_fpu; | ||
377 | union thread_xstate *state; | 379 | union thread_xstate *state; |
378 | }; | 380 | }; |
379 | 381 | ||
@@ -451,7 +453,7 @@ struct thread_struct { | |||
451 | unsigned long ptrace_dr7; | 453 | unsigned long ptrace_dr7; |
452 | /* Fault info: */ | 454 | /* Fault info: */ |
453 | unsigned long cr2; | 455 | unsigned long cr2; |
454 | unsigned long trap_no; | 456 | unsigned long trap_nr; |
455 | unsigned long error_code; | 457 | unsigned long error_code; |
456 | /* floating point and extended processor state */ | 458 | /* floating point and extended processor state */ |
457 | struct fpu fpu; | 459 | struct fpu fpu; |
@@ -924,9 +926,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); | |||
924 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ | 926 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ |
925 | 0xc0000000 : 0xFFFFe000) | 927 | 0xc0000000 : 0xFFFFe000) |
926 | 928 | ||
927 | #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ | 929 | #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ |
928 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) | 930 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
929 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ | 931 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \ |
930 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) | 932 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
931 | 933 | ||
932 | #define STACK_TOP TASK_SIZE | 934 | #define STACK_TOP TASK_SIZE |
@@ -948,6 +950,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); | |||
948 | 950 | ||
949 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) | 951 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) |
950 | extern unsigned long KSTK_ESP(struct task_struct *task); | 952 | extern unsigned long KSTK_ESP(struct task_struct *task); |
953 | |||
954 | /* | ||
955 | * User space RSP while inside the SYSCALL fast path | ||
956 | */ | ||
957 | DECLARE_PER_CPU(unsigned long, old_rsp); | ||
958 | |||
951 | #endif /* CONFIG_X86_64 */ | 959 | #endif /* CONFIG_X86_64 */ |
952 | 960 | ||
953 | extern void start_thread(struct pt_regs *regs, unsigned long new_ip, | 961 | extern void start_thread(struct pt_regs *regs, unsigned long new_ip, |
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 35664547125b..dcfde52979c3 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h | |||
@@ -145,7 +145,6 @@ extern unsigned long | |||
145 | convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); | 145 | convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); |
146 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, | 146 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, |
147 | int error_code, int si_code); | 147 | int error_code, int si_code); |
148 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where); | ||
149 | 148 | ||
150 | extern long syscall_trace_enter(struct pt_regs *); | 149 | extern long syscall_trace_enter(struct pt_regs *); |
151 | extern void syscall_trace_leave(struct pt_regs *); | 150 | extern void syscall_trace_leave(struct pt_regs *); |
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h index 04459d25e66e..4a085383af27 100644 --- a/arch/x86/include/asm/sigcontext.h +++ b/arch/x86/include/asm/sigcontext.h | |||
@@ -230,34 +230,37 @@ struct sigcontext { | |||
230 | * User-space might still rely on the old definition: | 230 | * User-space might still rely on the old definition: |
231 | */ | 231 | */ |
232 | struct sigcontext { | 232 | struct sigcontext { |
233 | unsigned long r8; | 233 | __u64 r8; |
234 | unsigned long r9; | 234 | __u64 r9; |
235 | unsigned long r10; | 235 | __u64 r10; |
236 | unsigned long r11; | 236 | __u64 r11; |
237 | unsigned long r12; | 237 | __u64 r12; |
238 | unsigned long r13; | 238 | __u64 r13; |
239 | unsigned long r14; | 239 | __u64 r14; |
240 | unsigned long r15; | 240 | __u64 r15; |
241 | unsigned long rdi; | 241 | __u64 rdi; |
242 | unsigned long rsi; | 242 | __u64 rsi; |
243 | unsigned long rbp; | 243 | __u64 rbp; |
244 | unsigned long rbx; | 244 | __u64 rbx; |
245 | unsigned long rdx; | 245 | __u64 rdx; |
246 | unsigned long rax; | 246 | __u64 rax; |
247 | unsigned long rcx; | 247 | __u64 rcx; |
248 | unsigned long rsp; | 248 | __u64 rsp; |
249 | unsigned long rip; | 249 | __u64 rip; |
250 | unsigned long eflags; /* RFLAGS */ | 250 | __u64 eflags; /* RFLAGS */ |
251 | unsigned short cs; | 251 | __u16 cs; |
252 | unsigned short gs; | 252 | __u16 gs; |
253 | unsigned short fs; | 253 | __u16 fs; |
254 | unsigned short __pad0; | 254 | __u16 __pad0; |
255 | unsigned long err; | 255 | __u64 err; |
256 | unsigned long trapno; | 256 | __u64 trapno; |
257 | unsigned long oldmask; | 257 | __u64 oldmask; |
258 | unsigned long cr2; | 258 | __u64 cr2; |
259 | struct _fpstate __user *fpstate; /* zero when no FPU context */ | 259 | struct _fpstate __user *fpstate; /* zero when no FPU context */ |
260 | unsigned long reserved1[8]; | 260 | #ifndef __LP64__ |
261 | __u32 __fpstate_pad; | ||
262 | #endif | ||
263 | __u64 reserved1[8]; | ||
261 | }; | 264 | }; |
262 | #endif /* !__KERNEL__ */ | 265 | #endif /* !__KERNEL__ */ |
263 | 266 | ||
diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h index 4e0fe26d27d3..7c7c27c97daa 100644 --- a/arch/x86/include/asm/sigframe.h +++ b/arch/x86/include/asm/sigframe.h | |||
@@ -59,12 +59,25 @@ struct rt_sigframe_ia32 { | |||
59 | #endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */ | 59 | #endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */ |
60 | 60 | ||
61 | #ifdef CONFIG_X86_64 | 61 | #ifdef CONFIG_X86_64 |
62 | |||
62 | struct rt_sigframe { | 63 | struct rt_sigframe { |
63 | char __user *pretcode; | 64 | char __user *pretcode; |
64 | struct ucontext uc; | 65 | struct ucontext uc; |
65 | struct siginfo info; | 66 | struct siginfo info; |
66 | /* fp state follows here */ | 67 | /* fp state follows here */ |
67 | }; | 68 | }; |
69 | |||
70 | #ifdef CONFIG_X86_X32_ABI | ||
71 | |||
72 | struct rt_sigframe_x32 { | ||
73 | u64 pretcode; | ||
74 | struct ucontext_x32 uc; | ||
75 | compat_siginfo_t info; | ||
76 | /* fp state follows here */ | ||
77 | }; | ||
78 | |||
79 | #endif /* CONFIG_X86_X32_ABI */ | ||
80 | |||
68 | #endif /* CONFIG_X86_64 */ | 81 | #endif /* CONFIG_X86_64 */ |
69 | 82 | ||
70 | #endif /* _ASM_X86_SIGFRAME_H */ | 83 | #endif /* _ASM_X86_SIGFRAME_H */ |
diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h new file mode 100644 index 000000000000..ada93b3b8c66 --- /dev/null +++ b/arch/x86/include/asm/sighandling.h | |||
@@ -0,0 +1,24 @@ | |||
1 | #ifndef _ASM_X86_SIGHANDLING_H | ||
2 | #define _ASM_X86_SIGHANDLING_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/ptrace.h> | ||
6 | #include <linux/signal.h> | ||
7 | |||
8 | #include <asm/processor-flags.h> | ||
9 | |||
10 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
11 | |||
12 | #define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ | ||
13 | X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \ | ||
14 | X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \ | ||
15 | X86_EFLAGS_CF) | ||
16 | |||
17 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where); | ||
18 | |||
19 | int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | ||
20 | unsigned long *pax); | ||
21 | int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, | ||
22 | struct pt_regs *regs, unsigned long mask); | ||
23 | |||
24 | #endif /* _ASM_X86_SIGHANDLING_H */ | ||
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index cb238526a9f1..3fda9db48819 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h | |||
@@ -10,6 +10,8 @@ | |||
10 | #ifndef _ASM_X86_SYS_IA32_H | 10 | #ifndef _ASM_X86_SYS_IA32_H |
11 | #define _ASM_X86_SYS_IA32_H | 11 | #define _ASM_X86_SYS_IA32_H |
12 | 12 | ||
13 | #ifdef CONFIG_COMPAT | ||
14 | |||
13 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
14 | #include <linux/linkage.h> | 16 | #include <linux/linkage.h> |
15 | #include <linux/types.h> | 17 | #include <linux/types.h> |
@@ -36,8 +38,6 @@ asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *, | |||
36 | struct sigaction32 __user *, unsigned int); | 38 | struct sigaction32 __user *, unsigned int); |
37 | asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *, | 39 | asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *, |
38 | struct old_sigaction32 __user *); | 40 | struct old_sigaction32 __user *); |
39 | asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *, | ||
40 | compat_sigset_t __user *, unsigned int); | ||
41 | asmlinkage long sys32_alarm(unsigned int); | 41 | asmlinkage long sys32_alarm(unsigned int); |
42 | 42 | ||
43 | asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); | 43 | asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); |
@@ -83,4 +83,7 @@ asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32); | |||
83 | 83 | ||
84 | asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int, | 84 | asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int, |
85 | const char __user *); | 85 | const char __user *); |
86 | |||
87 | #endif /* CONFIG_COMPAT */ | ||
88 | |||
86 | #endif /* _ASM_X86_SYS_IA32_H */ | 89 | #endif /* _ASM_X86_SYS_IA32_H */ |
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index d962e5652a73..386b78686c4d 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <asm/asm-offsets.h> /* For NR_syscalls */ | 18 | #include <asm/asm-offsets.h> /* For NR_syscalls */ |
19 | #include <asm/unistd.h> | ||
19 | 20 | ||
20 | extern const unsigned long sys_call_table[]; | 21 | extern const unsigned long sys_call_table[]; |
21 | 22 | ||
@@ -26,13 +27,13 @@ extern const unsigned long sys_call_table[]; | |||
26 | */ | 27 | */ |
27 | static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) | 28 | static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) |
28 | { | 29 | { |
29 | return regs->orig_ax; | 30 | return regs->orig_ax & __SYSCALL_MASK; |
30 | } | 31 | } |
31 | 32 | ||
32 | static inline void syscall_rollback(struct task_struct *task, | 33 | static inline void syscall_rollback(struct task_struct *task, |
33 | struct pt_regs *regs) | 34 | struct pt_regs *regs) |
34 | { | 35 | { |
35 | regs->ax = regs->orig_ax; | 36 | regs->ax = regs->orig_ax & __SYSCALL_MASK; |
36 | } | 37 | } |
37 | 38 | ||
38 | static inline long syscall_get_error(struct task_struct *task, | 39 | static inline long syscall_get_error(struct task_struct *task, |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index bc817cd8b443..ad6df8ccd715 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -86,7 +86,7 @@ struct thread_info { | |||
86 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ | 86 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ |
87 | #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ | 87 | #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ |
88 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ | 88 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ |
89 | #define TIF_IA32 17 /* 32bit process */ | 89 | #define TIF_IA32 17 /* IA32 compatibility process */ |
90 | #define TIF_FORK 18 /* ret_from_fork */ | 90 | #define TIF_FORK 18 /* ret_from_fork */ |
91 | #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ | 91 | #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ |
92 | #define TIF_DEBUG 21 /* uses debug registers */ | 92 | #define TIF_DEBUG 21 /* uses debug registers */ |
@@ -95,6 +95,8 @@ struct thread_info { | |||
95 | #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ | 95 | #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ |
96 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ | 96 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ |
97 | #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ | 97 | #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ |
98 | #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */ | ||
99 | #define TIF_X32 30 /* 32-bit native x86-64 binary */ | ||
98 | 100 | ||
99 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 101 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
100 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 102 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
@@ -116,6 +118,8 @@ struct thread_info { | |||
116 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) | 118 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) |
117 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) | 119 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) |
118 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) | 120 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) |
121 | #define _TIF_ADDR32 (1 << TIF_ADDR32) | ||
122 | #define _TIF_X32 (1 << TIF_X32) | ||
119 | 123 | ||
120 | /* work to do in syscall_trace_enter() */ | 124 | /* work to do in syscall_trace_enter() */ |
121 | #define _TIF_WORK_SYSCALL_ENTRY \ | 125 | #define _TIF_WORK_SYSCALL_ENTRY \ |
@@ -247,8 +251,6 @@ static inline struct thread_info *current_thread_info(void) | |||
247 | * ever touches our thread-synchronous status, so we don't | 251 | * ever touches our thread-synchronous status, so we don't |
248 | * have to worry about atomic accesses. | 252 | * have to worry about atomic accesses. |
249 | */ | 253 | */ |
250 | #define TS_USEDFPU 0x0001 /* FPU was used by this task | ||
251 | this quantum (SMP) */ | ||
252 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ | 254 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ |
253 | #define TS_POLLING 0x0004 /* idle task polling need_resched, | 255 | #define TS_POLLING 0x0004 /* idle task polling need_resched, |
254 | skip sending interrupt */ | 256 | skip sending interrupt */ |
@@ -264,6 +266,18 @@ static inline void set_restore_sigmask(void) | |||
264 | ti->status |= TS_RESTORE_SIGMASK; | 266 | ti->status |= TS_RESTORE_SIGMASK; |
265 | set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); | 267 | set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); |
266 | } | 268 | } |
269 | |||
270 | static inline bool is_ia32_task(void) | ||
271 | { | ||
272 | #ifdef CONFIG_X86_32 | ||
273 | return true; | ||
274 | #endif | ||
275 | #ifdef CONFIG_IA32_EMULATION | ||
276 | if (current_thread_info()->status & TS_COMPAT) | ||
277 | return true; | ||
278 | #endif | ||
279 | return false; | ||
280 | } | ||
267 | #endif /* !__ASSEMBLY__ */ | 281 | #endif /* !__ASSEMBLY__ */ |
268 | 282 | ||
269 | #ifndef __ASSEMBLY__ | 283 | #ifndef __ASSEMBLY__ |
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 0012d0902c5f..88eae2aec619 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h | |||
@@ -89,4 +89,29 @@ asmlinkage void smp_thermal_interrupt(void); | |||
89 | asmlinkage void mce_threshold_interrupt(void); | 89 | asmlinkage void mce_threshold_interrupt(void); |
90 | #endif | 90 | #endif |
91 | 91 | ||
92 | /* Interrupts/Exceptions */ | ||
93 | enum { | ||
94 | X86_TRAP_DE = 0, /* 0, Divide-by-zero */ | ||
95 | X86_TRAP_DB, /* 1, Debug */ | ||
96 | X86_TRAP_NMI, /* 2, Non-maskable Interrupt */ | ||
97 | X86_TRAP_BP, /* 3, Breakpoint */ | ||
98 | X86_TRAP_OF, /* 4, Overflow */ | ||
99 | X86_TRAP_BR, /* 5, Bound Range Exceeded */ | ||
100 | X86_TRAP_UD, /* 6, Invalid Opcode */ | ||
101 | X86_TRAP_NM, /* 7, Device Not Available */ | ||
102 | X86_TRAP_DF, /* 8, Double Fault */ | ||
103 | X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */ | ||
104 | X86_TRAP_TS, /* 10, Invalid TSS */ | ||
105 | X86_TRAP_NP, /* 11, Segment Not Present */ | ||
106 | X86_TRAP_SS, /* 12, Stack Segment Fault */ | ||
107 | X86_TRAP_GP, /* 13, General Protection Fault */ | ||
108 | X86_TRAP_PF, /* 14, Page Fault */ | ||
109 | X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */ | ||
110 | X86_TRAP_MF, /* 16, x87 Floating-Point Exception */ | ||
111 | X86_TRAP_AC, /* 17, Alignment Check */ | ||
112 | X86_TRAP_MC, /* 18, Machine Check */ | ||
113 | X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */ | ||
114 | X86_TRAP_IRET = 32, /* 32, IRET Exception */ | ||
115 | }; | ||
116 | |||
92 | #endif /* _ASM_X86_TRAPS_H */ | 117 | #endif /* _ASM_X86_TRAPS_H */ |
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h index 21f77b89e47a..37cdc9d99bb1 100644 --- a/arch/x86/include/asm/unistd.h +++ b/arch/x86/include/asm/unistd.h | |||
@@ -1,7 +1,17 @@ | |||
1 | #ifndef _ASM_X86_UNISTD_H | 1 | #ifndef _ASM_X86_UNISTD_H |
2 | #define _ASM_X86_UNISTD_H 1 | 2 | #define _ASM_X86_UNISTD_H 1 |
3 | 3 | ||
4 | /* x32 syscall flag bit */ | ||
5 | #define __X32_SYSCALL_BIT 0x40000000 | ||
6 | |||
4 | #ifdef __KERNEL__ | 7 | #ifdef __KERNEL__ |
8 | |||
9 | # ifdef CONFIG_X86_X32_ABI | ||
10 | # define __SYSCALL_MASK (~(__X32_SYSCALL_BIT)) | ||
11 | # else | ||
12 | # define __SYSCALL_MASK (~0) | ||
13 | # endif | ||
14 | |||
5 | # ifdef CONFIG_X86_32 | 15 | # ifdef CONFIG_X86_32 |
6 | 16 | ||
7 | # include <asm/unistd_32.h> | 17 | # include <asm/unistd_32.h> |
@@ -14,6 +24,7 @@ | |||
14 | # else | 24 | # else |
15 | 25 | ||
16 | # include <asm/unistd_64.h> | 26 | # include <asm/unistd_64.h> |
27 | # include <asm/unistd_64_x32.h> | ||
17 | # define __ARCH_WANT_COMPAT_SYS_TIME | 28 | # define __ARCH_WANT_COMPAT_SYS_TIME |
18 | 29 | ||
19 | # endif | 30 | # endif |
@@ -52,8 +63,10 @@ | |||
52 | #else | 63 | #else |
53 | # ifdef __i386__ | 64 | # ifdef __i386__ |
54 | # include <asm/unistd_32.h> | 65 | # include <asm/unistd_32.h> |
55 | # else | 66 | # elif defined(__LP64__) |
56 | # include <asm/unistd_64.h> | 67 | # include <asm/unistd_64.h> |
68 | # else | ||
69 | # include <asm/unistd_x32.h> | ||
57 | # endif | 70 | # endif |
58 | #endif | 71 | #endif |
59 | 72 | ||
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 834e897b1e25..1b4754f82ba7 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c | |||
@@ -1,6 +1,12 @@ | |||
1 | #include <asm/ia32.h> | 1 | #include <asm/ia32.h> |
2 | 2 | ||
3 | #define __SYSCALL_64(nr, sym, compat) [nr] = 1, | 3 | #define __SYSCALL_64(nr, sym, compat) [nr] = 1, |
4 | #define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1, | ||
5 | #ifdef CONFIG_X86_X32_ABI | ||
6 | # define __SYSCALL_X32(nr, sym, compat) [nr] = 1, | ||
7 | #else | ||
8 | # define __SYSCALL_X32(nr, sym, compat) /* nothing */ | ||
9 | #endif | ||
4 | static char syscalls_64[] = { | 10 | static char syscalls_64[] = { |
5 | #include <asm/syscalls_64.h> | 11 | #include <asm/syscalls_64.h> |
6 | }; | 12 | }; |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index d43cad74f166..c0f7d68d318f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1044,6 +1044,9 @@ DEFINE_PER_CPU(char *, irq_stack_ptr) = | |||
1044 | 1044 | ||
1045 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; | 1045 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; |
1046 | 1046 | ||
1047 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); | ||
1048 | EXPORT_PER_CPU_SYMBOL(fpu_owner_task); | ||
1049 | |||
1047 | /* | 1050 | /* |
1048 | * Special IST stacks which the CPU switches to when it calls | 1051 | * Special IST stacks which the CPU switches to when it calls |
1049 | * an IST-marked descriptor entry. Up to 7 stacks (hardware | 1052 | * an IST-marked descriptor entry. Up to 7 stacks (hardware |
@@ -1111,6 +1114,8 @@ void debug_stack_reset(void) | |||
1111 | 1114 | ||
1112 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; | 1115 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
1113 | EXPORT_PER_CPU_SYMBOL(current_task); | 1116 | EXPORT_PER_CPU_SYMBOL(current_task); |
1117 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); | ||
1118 | EXPORT_PER_CPU_SYMBOL(fpu_owner_task); | ||
1114 | 1119 | ||
1115 | #ifdef CONFIG_CC_STACKPROTECTOR | 1120 | #ifdef CONFIG_CC_STACKPROTECTOR |
1116 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); | 1121 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 6b45e5e7a901..73d08ed98a64 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -326,8 +326,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) | |||
326 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; | 326 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; |
327 | } | 327 | } |
328 | 328 | ||
329 | static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, | 329 | static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) |
330 | int index) | ||
331 | { | 330 | { |
332 | int node; | 331 | int node; |
333 | 332 | ||
@@ -725,14 +724,16 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); | |||
725 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) | 724 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) |
726 | 725 | ||
727 | #ifdef CONFIG_SMP | 726 | #ifdef CONFIG_SMP |
728 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 727 | |
728 | static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) | ||
729 | { | 729 | { |
730 | struct _cpuid4_info *this_leaf, *sibling_leaf; | 730 | struct _cpuid4_info *this_leaf; |
731 | unsigned long num_threads_sharing; | 731 | int ret, i, sibling; |
732 | int index_msb, i, sibling; | ||
733 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 732 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
734 | 733 | ||
735 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { | 734 | ret = 0; |
735 | if (index == 3) { | ||
736 | ret = 1; | ||
736 | for_each_cpu(i, cpu_llc_shared_mask(cpu)) { | 737 | for_each_cpu(i, cpu_llc_shared_mask(cpu)) { |
737 | if (!per_cpu(ici_cpuid4_info, i)) | 738 | if (!per_cpu(ici_cpuid4_info, i)) |
738 | continue; | 739 | continue; |
@@ -743,8 +744,35 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
743 | set_bit(sibling, this_leaf->shared_cpu_map); | 744 | set_bit(sibling, this_leaf->shared_cpu_map); |
744 | } | 745 | } |
745 | } | 746 | } |
746 | return; | 747 | } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) { |
748 | ret = 1; | ||
749 | for_each_cpu(i, cpu_sibling_mask(cpu)) { | ||
750 | if (!per_cpu(ici_cpuid4_info, i)) | ||
751 | continue; | ||
752 | this_leaf = CPUID4_INFO_IDX(i, index); | ||
753 | for_each_cpu(sibling, cpu_sibling_mask(cpu)) { | ||
754 | if (!cpu_online(sibling)) | ||
755 | continue; | ||
756 | set_bit(sibling, this_leaf->shared_cpu_map); | ||
757 | } | ||
758 | } | ||
747 | } | 759 | } |
760 | |||
761 | return ret; | ||
762 | } | ||
763 | |||
764 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | ||
765 | { | ||
766 | struct _cpuid4_info *this_leaf, *sibling_leaf; | ||
767 | unsigned long num_threads_sharing; | ||
768 | int index_msb, i; | ||
769 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
770 | |||
771 | if (c->x86_vendor == X86_VENDOR_AMD) { | ||
772 | if (cache_shared_amd_cpu_map_setup(cpu, index)) | ||
773 | return; | ||
774 | } | ||
775 | |||
748 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 776 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
749 | num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; | 777 | num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; |
750 | 778 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 786e76a86322..e4eeaaf58a47 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -528,6 +528,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
528 | 528 | ||
529 | sprintf(name, "threshold_bank%i", bank); | 529 | sprintf(name, "threshold_bank%i", bank); |
530 | 530 | ||
531 | #ifdef CONFIG_SMP | ||
531 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | 532 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ |
532 | i = cpumask_first(cpu_llc_shared_mask(cpu)); | 533 | i = cpumask_first(cpu_llc_shared_mask(cpu)); |
533 | 534 | ||
@@ -553,6 +554,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
553 | 554 | ||
554 | goto out; | 555 | goto out; |
555 | } | 556 | } |
557 | #endif | ||
556 | 558 | ||
557 | b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); | 559 | b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); |
558 | if (!b) { | 560 | if (!b) { |
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 79289632cb27..a041e094b8b9 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c | |||
@@ -167,6 +167,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
167 | { | 167 | { |
168 | int err = 0; | 168 | int err = 0; |
169 | mtrr_type type; | 169 | mtrr_type type; |
170 | unsigned long base; | ||
170 | unsigned long size; | 171 | unsigned long size; |
171 | struct mtrr_sentry sentry; | 172 | struct mtrr_sentry sentry; |
172 | struct mtrr_gentry gentry; | 173 | struct mtrr_gentry gentry; |
@@ -267,14 +268,14 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
267 | #endif | 268 | #endif |
268 | if (gentry.regnum >= num_var_ranges) | 269 | if (gentry.regnum >= num_var_ranges) |
269 | return -EINVAL; | 270 | return -EINVAL; |
270 | mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); | 271 | mtrr_if->get(gentry.regnum, &base, &size, &type); |
271 | 272 | ||
272 | /* Hide entries that go above 4GB */ | 273 | /* Hide entries that go above 4GB */ |
273 | if (gentry.base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)) | 274 | if (base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)) |
274 | || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))) | 275 | || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))) |
275 | gentry.base = gentry.size = gentry.type = 0; | 276 | gentry.base = gentry.size = gentry.type = 0; |
276 | else { | 277 | else { |
277 | gentry.base <<= PAGE_SHIFT; | 278 | gentry.base = base << PAGE_SHIFT; |
278 | gentry.size = size << PAGE_SHIFT; | 279 | gentry.size = size << PAGE_SHIFT; |
279 | gentry.type = type; | 280 | gentry.type = type; |
280 | } | 281 | } |
@@ -321,11 +322,12 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
321 | #endif | 322 | #endif |
322 | if (gentry.regnum >= num_var_ranges) | 323 | if (gentry.regnum >= num_var_ranges) |
323 | return -EINVAL; | 324 | return -EINVAL; |
324 | mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); | 325 | mtrr_if->get(gentry.regnum, &base, &size, &type); |
325 | /* Hide entries that would overflow */ | 326 | /* Hide entries that would overflow */ |
326 | if (size != (__typeof__(gentry.size))size) | 327 | if (size != (__typeof__(gentry.size))size) |
327 | gentry.base = gentry.size = gentry.type = 0; | 328 | gentry.base = gentry.size = gentry.type = 0; |
328 | else { | 329 | else { |
330 | gentry.base = base; | ||
329 | gentry.size = size; | 331 | gentry.size = size; |
330 | gentry.type = type; | 332 | gentry.type = type; |
331 | } | 333 | } |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 3c44b712380c..1c52bdbb9b8b 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <asm/apic.h> | 29 | #include <asm/apic.h> |
30 | #include <asm/stacktrace.h> | 30 | #include <asm/stacktrace.h> |
31 | #include <asm/nmi.h> | 31 | #include <asm/nmi.h> |
32 | #include <asm/compat.h> | ||
33 | #include <asm/smp.h> | 32 | #include <asm/smp.h> |
34 | #include <asm/alternative.h> | 33 | #include <asm/alternative.h> |
35 | #include <asm/timer.h> | 34 | #include <asm/timer.h> |
@@ -988,6 +987,9 @@ static void x86_pmu_start(struct perf_event *event, int flags) | |||
988 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 987 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
989 | int idx = event->hw.idx; | 988 | int idx = event->hw.idx; |
990 | 989 | ||
990 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | ||
991 | return; | ||
992 | |||
991 | if (WARN_ON_ONCE(idx == -1)) | 993 | if (WARN_ON_ONCE(idx == -1)) |
992 | return; | 994 | return; |
993 | 995 | ||
@@ -1674,6 +1676,9 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
1674 | } | 1676 | } |
1675 | 1677 | ||
1676 | #ifdef CONFIG_COMPAT | 1678 | #ifdef CONFIG_COMPAT |
1679 | |||
1680 | #include <asm/compat.h> | ||
1681 | |||
1677 | static inline int | 1682 | static inline int |
1678 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | 1683 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) |
1679 | { | 1684 | { |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 513d617b93c4..82db83b5c3bc 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -147,7 +147,9 @@ struct cpu_hw_events { | |||
147 | /* | 147 | /* |
148 | * AMD specific bits | 148 | * AMD specific bits |
149 | */ | 149 | */ |
150 | struct amd_nb *amd_nb; | 150 | struct amd_nb *amd_nb; |
151 | /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ | ||
152 | u64 perf_ctr_virt_mask; | ||
151 | 153 | ||
152 | void *kfree_on_online; | 154 | void *kfree_on_online; |
153 | }; | 155 | }; |
@@ -425,9 +427,11 @@ void x86_pmu_disable_all(void); | |||
425 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | 427 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, |
426 | u64 enable_mask) | 428 | u64 enable_mask) |
427 | { | 429 | { |
430 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); | ||
431 | |||
428 | if (hwc->extra_reg.reg) | 432 | if (hwc->extra_reg.reg) |
429 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); | 433 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); |
430 | wrmsrl(hwc->config_base, hwc->config | enable_mask); | 434 | wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); |
431 | } | 435 | } |
432 | 436 | ||
433 | void x86_pmu_enable_all(int added); | 437 | void x86_pmu_enable_all(int added); |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 0397b23be8e9..67250a52430b 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/perf_event.h> | 1 | #include <linux/perf_event.h> |
2 | #include <linux/export.h> | ||
2 | #include <linux/types.h> | 3 | #include <linux/types.h> |
3 | #include <linux/init.h> | 4 | #include <linux/init.h> |
4 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
@@ -357,7 +358,9 @@ static void amd_pmu_cpu_starting(int cpu) | |||
357 | struct amd_nb *nb; | 358 | struct amd_nb *nb; |
358 | int i, nb_id; | 359 | int i, nb_id; |
359 | 360 | ||
360 | if (boot_cpu_data.x86_max_cores < 2) | 361 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; |
362 | |||
363 | if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15) | ||
361 | return; | 364 | return; |
362 | 365 | ||
363 | nb_id = amd_get_nb_id(cpu); | 366 | nb_id = amd_get_nb_id(cpu); |
@@ -587,9 +590,9 @@ static __initconst const struct x86_pmu amd_pmu_f15h = { | |||
587 | .put_event_constraints = amd_put_event_constraints, | 590 | .put_event_constraints = amd_put_event_constraints, |
588 | 591 | ||
589 | .cpu_prepare = amd_pmu_cpu_prepare, | 592 | .cpu_prepare = amd_pmu_cpu_prepare, |
590 | .cpu_starting = amd_pmu_cpu_starting, | ||
591 | .cpu_dead = amd_pmu_cpu_dead, | 593 | .cpu_dead = amd_pmu_cpu_dead, |
592 | #endif | 594 | #endif |
595 | .cpu_starting = amd_pmu_cpu_starting, | ||
593 | }; | 596 | }; |
594 | 597 | ||
595 | __init int amd_pmu_init(void) | 598 | __init int amd_pmu_init(void) |
@@ -621,3 +624,33 @@ __init int amd_pmu_init(void) | |||
621 | 624 | ||
622 | return 0; | 625 | return 0; |
623 | } | 626 | } |
627 | |||
628 | void amd_pmu_enable_virt(void) | ||
629 | { | ||
630 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
631 | |||
632 | cpuc->perf_ctr_virt_mask = 0; | ||
633 | |||
634 | /* Reload all events */ | ||
635 | x86_pmu_disable_all(); | ||
636 | x86_pmu_enable_all(0); | ||
637 | } | ||
638 | EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); | ||
639 | |||
640 | void amd_pmu_disable_virt(void) | ||
641 | { | ||
642 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
643 | |||
644 | /* | ||
645 | * We only mask out the Host-only bit so that host-only counting works | ||
646 | * when SVM is disabled. If someone sets up a guest-only counter when | ||
647 | * SVM is disabled the Guest-only bits still gets set and the counter | ||
648 | * will not count anything. | ||
649 | */ | ||
650 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; | ||
651 | |||
652 | /* Reload all events */ | ||
653 | x86_pmu_disable_all(); | ||
654 | x86_pmu_enable_all(0); | ||
655 | } | ||
656 | EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); | ||
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 4025fe4f928f..28f98706b08b 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -265,7 +265,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err) | |||
265 | #endif | 265 | #endif |
266 | printk("\n"); | 266 | printk("\n"); |
267 | if (notify_die(DIE_OOPS, str, regs, err, | 267 | if (notify_die(DIE_OOPS, str, regs, err, |
268 | current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) | 268 | current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) |
269 | return 1; | 269 | return 1; |
270 | 270 | ||
271 | show_registers(regs); | 271 | show_registers(regs); |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 3fe8239fd8fb..2925e14fb1d9 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -482,7 +482,12 @@ GLOBAL(system_call_after_swapgs) | |||
482 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 482 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
483 | jnz tracesys | 483 | jnz tracesys |
484 | system_call_fastpath: | 484 | system_call_fastpath: |
485 | #if __SYSCALL_MASK == ~0 | ||
485 | cmpq $__NR_syscall_max,%rax | 486 | cmpq $__NR_syscall_max,%rax |
487 | #else | ||
488 | andl $__SYSCALL_MASK,%eax | ||
489 | cmpl $__NR_syscall_max,%eax | ||
490 | #endif | ||
486 | ja badsys | 491 | ja badsys |
487 | movq %r10,%rcx | 492 | movq %r10,%rcx |
488 | call *sys_call_table(,%rax,8) # XXX: rip relative | 493 | call *sys_call_table(,%rax,8) # XXX: rip relative |
@@ -596,7 +601,12 @@ tracesys: | |||
596 | */ | 601 | */ |
597 | LOAD_ARGS ARGOFFSET, 1 | 602 | LOAD_ARGS ARGOFFSET, 1 |
598 | RESTORE_REST | 603 | RESTORE_REST |
604 | #if __SYSCALL_MASK == ~0 | ||
599 | cmpq $__NR_syscall_max,%rax | 605 | cmpq $__NR_syscall_max,%rax |
606 | #else | ||
607 | andl $__SYSCALL_MASK,%eax | ||
608 | cmpl $__NR_syscall_max,%eax | ||
609 | #endif | ||
600 | ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ | 610 | ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ |
601 | movq %r10,%rcx /* fixup for C */ | 611 | movq %r10,%rcx /* fixup for C */ |
602 | call *sys_call_table(,%rax,8) | 612 | call *sys_call_table(,%rax,8) |
@@ -736,6 +746,40 @@ ENTRY(stub_rt_sigreturn) | |||
736 | CFI_ENDPROC | 746 | CFI_ENDPROC |
737 | END(stub_rt_sigreturn) | 747 | END(stub_rt_sigreturn) |
738 | 748 | ||
749 | #ifdef CONFIG_X86_X32_ABI | ||
750 | PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx | ||
751 | |||
752 | ENTRY(stub_x32_rt_sigreturn) | ||
753 | CFI_STARTPROC | ||
754 | addq $8, %rsp | ||
755 | PARTIAL_FRAME 0 | ||
756 | SAVE_REST | ||
757 | movq %rsp,%rdi | ||
758 | FIXUP_TOP_OF_STACK %r11 | ||
759 | call sys32_x32_rt_sigreturn | ||
760 | movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer | ||
761 | RESTORE_REST | ||
762 | jmp int_ret_from_sys_call | ||
763 | CFI_ENDPROC | ||
764 | END(stub_x32_rt_sigreturn) | ||
765 | |||
766 | ENTRY(stub_x32_execve) | ||
767 | CFI_STARTPROC | ||
768 | addq $8, %rsp | ||
769 | PARTIAL_FRAME 0 | ||
770 | SAVE_REST | ||
771 | FIXUP_TOP_OF_STACK %r11 | ||
772 | movq %rsp, %rcx | ||
773 | call sys32_execve | ||
774 | RESTORE_TOP_OF_STACK %r11 | ||
775 | movq %rax,RAX(%rsp) | ||
776 | RESTORE_REST | ||
777 | jmp int_ret_from_sys_call | ||
778 | CFI_ENDPROC | ||
779 | END(stub_x32_execve) | ||
780 | |||
781 | #endif | ||
782 | |||
739 | /* | 783 | /* |
740 | * Build the entry stubs and pointer table with some assembler magic. | 784 | * Build the entry stubs and pointer table with some assembler magic. |
741 | * We pack 7 stubs into a single 32-byte chunk, which will fit in a | 785 | * We pack 7 stubs into a single 32-byte chunk, which will fit in a |
@@ -1532,10 +1576,17 @@ ENTRY(nmi) | |||
1532 | pushq_cfi %rdx | 1576 | pushq_cfi %rdx |
1533 | 1577 | ||
1534 | /* | 1578 | /* |
1579 | * If %cs was not the kernel segment, then the NMI triggered in user | ||
1580 | * space, which means it is definitely not nested. | ||
1581 | */ | ||
1582 | cmpl $__KERNEL_CS, 16(%rsp) | ||
1583 | jne first_nmi | ||
1584 | |||
1585 | /* | ||
1535 | * Check the special variable on the stack to see if NMIs are | 1586 | * Check the special variable on the stack to see if NMIs are |
1536 | * executing. | 1587 | * executing. |
1537 | */ | 1588 | */ |
1538 | cmp $1, -8(%rsp) | 1589 | cmpl $1, -8(%rsp) |
1539 | je nested_nmi | 1590 | je nested_nmi |
1540 | 1591 | ||
1541 | /* | 1592 | /* |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 313fb5cddbce..7b77062dea11 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -61,7 +61,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id) | |||
61 | outb(0, 0xF0); | 61 | outb(0, 0xF0); |
62 | if (ignore_fpu_irq || !boot_cpu_data.hard_math) | 62 | if (ignore_fpu_irq || !boot_cpu_data.hard_math) |
63 | return IRQ_NONE; | 63 | return IRQ_NONE; |
64 | math_error(get_irq_regs(), 0, 16); | 64 | math_error(get_irq_regs(), 0, X86_TRAP_MF); |
65 | return IRQ_HANDLED; | 65 | return IRQ_HANDLED; |
66 | } | 66 | } |
67 | 67 | ||
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index ac0417be9131..73465aab28f8 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -360,7 +360,6 @@ out: | |||
360 | static enum ucode_state | 360 | static enum ucode_state |
361 | request_microcode_user(int cpu, const void __user *buf, size_t size) | 361 | request_microcode_user(int cpu, const void __user *buf, size_t size) |
362 | { | 362 | { |
363 | pr_info("AMD microcode update via /dev/cpu/microcode not supported\n"); | ||
364 | return UCODE_ERROR; | 363 | return UCODE_ERROR; |
365 | } | 364 | } |
366 | 365 | ||
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 485204f58cda..c08d1ff12b7c 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -214,6 +214,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
214 | 214 | ||
215 | task_user_gs(p) = get_user_gs(regs); | 215 | task_user_gs(p) = get_user_gs(regs); |
216 | 216 | ||
217 | p->fpu_counter = 0; | ||
217 | p->thread.io_bitmap_ptr = NULL; | 218 | p->thread.io_bitmap_ptr = NULL; |
218 | tsk = current; | 219 | tsk = current; |
219 | err = -ENOMEM; | 220 | err = -ENOMEM; |
@@ -299,22 +300,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
299 | *next = &next_p->thread; | 300 | *next = &next_p->thread; |
300 | int cpu = smp_processor_id(); | 301 | int cpu = smp_processor_id(); |
301 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 302 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
302 | bool preload_fpu; | 303 | fpu_switch_t fpu; |
303 | 304 | ||
304 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | 305 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ |
305 | 306 | ||
306 | /* | 307 | fpu = switch_fpu_prepare(prev_p, next_p, cpu); |
307 | * If the task has used fpu the last 5 timeslices, just do a full | ||
308 | * restore of the math state immediately to avoid the trap; the | ||
309 | * chances of needing FPU soon are obviously high now | ||
310 | */ | ||
311 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | ||
312 | |||
313 | __unlazy_fpu(prev_p); | ||
314 | |||
315 | /* we're going to use this soon, after a few expensive things */ | ||
316 | if (preload_fpu) | ||
317 | prefetch(next->fpu.state); | ||
318 | 308 | ||
319 | /* | 309 | /* |
320 | * Reload esp0. | 310 | * Reload esp0. |
@@ -354,11 +344,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
354 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) | 344 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) |
355 | __switch_to_xtra(prev_p, next_p, tss); | 345 | __switch_to_xtra(prev_p, next_p, tss); |
356 | 346 | ||
357 | /* If we're going to preload the fpu context, make sure clts | ||
358 | is run while we're batching the cpu state updates. */ | ||
359 | if (preload_fpu) | ||
360 | clts(); | ||
361 | |||
362 | /* | 347 | /* |
363 | * Leave lazy mode, flushing any hypercalls made here. | 348 | * Leave lazy mode, flushing any hypercalls made here. |
364 | * This must be done before restoring TLS segments so | 349 | * This must be done before restoring TLS segments so |
@@ -368,15 +353,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
368 | */ | 353 | */ |
369 | arch_end_context_switch(next_p); | 354 | arch_end_context_switch(next_p); |
370 | 355 | ||
371 | if (preload_fpu) | ||
372 | __math_state_restore(); | ||
373 | |||
374 | /* | 356 | /* |
375 | * Restore %gs if needed (which is common) | 357 | * Restore %gs if needed (which is common) |
376 | */ | 358 | */ |
377 | if (prev->gs | next->gs) | 359 | if (prev->gs | next->gs) |
378 | lazy_load_gs(next->gs); | 360 | lazy_load_gs(next->gs); |
379 | 361 | ||
362 | switch_fpu_finish(next_p, fpu); | ||
363 | |||
380 | percpu_write(current_task, next_p); | 364 | percpu_write(current_task, next_p); |
381 | 365 | ||
382 | return prev_p; | 366 | return prev_p; |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 9b9fe4a85c87..550e77b1b948 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -286,6 +286,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
286 | 286 | ||
287 | set_tsk_thread_flag(p, TIF_FORK); | 287 | set_tsk_thread_flag(p, TIF_FORK); |
288 | 288 | ||
289 | p->fpu_counter = 0; | ||
289 | p->thread.io_bitmap_ptr = NULL; | 290 | p->thread.io_bitmap_ptr = NULL; |
290 | 291 | ||
291 | savesegment(gs, p->thread.gsindex); | 292 | savesegment(gs, p->thread.gsindex); |
@@ -364,7 +365,9 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | |||
364 | void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp) | 365 | void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp) |
365 | { | 366 | { |
366 | start_thread_common(regs, new_ip, new_sp, | 367 | start_thread_common(regs, new_ip, new_sp, |
367 | __USER32_CS, __USER32_DS, __USER32_DS); | 368 | test_thread_flag(TIF_X32) |
369 | ? __USER_CS : __USER32_CS, | ||
370 | __USER_DS, __USER_DS); | ||
368 | } | 371 | } |
369 | #endif | 372 | #endif |
370 | 373 | ||
@@ -386,18 +389,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
386 | int cpu = smp_processor_id(); | 389 | int cpu = smp_processor_id(); |
387 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 390 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
388 | unsigned fsindex, gsindex; | 391 | unsigned fsindex, gsindex; |
389 | bool preload_fpu; | 392 | fpu_switch_t fpu; |
390 | 393 | ||
391 | /* | 394 | fpu = switch_fpu_prepare(prev_p, next_p, cpu); |
392 | * If the task has used fpu the last 5 timeslices, just do a full | ||
393 | * restore of the math state immediately to avoid the trap; the | ||
394 | * chances of needing FPU soon are obviously high now | ||
395 | */ | ||
396 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | ||
397 | |||
398 | /* we're going to use this soon, after a few expensive things */ | ||
399 | if (preload_fpu) | ||
400 | prefetch(next->fpu.state); | ||
401 | 395 | ||
402 | /* | 396 | /* |
403 | * Reload esp0, LDT and the page table pointer: | 397 | * Reload esp0, LDT and the page table pointer: |
@@ -427,13 +421,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
427 | 421 | ||
428 | load_TLS(next, cpu); | 422 | load_TLS(next, cpu); |
429 | 423 | ||
430 | /* Must be after DS reload */ | ||
431 | __unlazy_fpu(prev_p); | ||
432 | |||
433 | /* Make sure cpu is ready for new context */ | ||
434 | if (preload_fpu) | ||
435 | clts(); | ||
436 | |||
437 | /* | 424 | /* |
438 | * Leave lazy mode, flushing any hypercalls made here. | 425 | * Leave lazy mode, flushing any hypercalls made here. |
439 | * This must be done before restoring TLS segments so | 426 | * This must be done before restoring TLS segments so |
@@ -474,6 +461,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
474 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | 461 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); |
475 | prev->gsindex = gsindex; | 462 | prev->gsindex = gsindex; |
476 | 463 | ||
464 | switch_fpu_finish(next_p, fpu); | ||
465 | |||
477 | /* | 466 | /* |
478 | * Switch the PDA and FPU contexts. | 467 | * Switch the PDA and FPU contexts. |
479 | */ | 468 | */ |
@@ -492,13 +481,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
492 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) | 481 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) |
493 | __switch_to_xtra(prev_p, next_p, tss); | 482 | __switch_to_xtra(prev_p, next_p, tss); |
494 | 483 | ||
495 | /* | ||
496 | * Preload the FPU context, now that we've determined that the | ||
497 | * task is likely to be using it. | ||
498 | */ | ||
499 | if (preload_fpu) | ||
500 | __math_state_restore(); | ||
501 | |||
502 | return prev_p; | 484 | return prev_p; |
503 | } | 485 | } |
504 | 486 | ||
@@ -508,6 +490,8 @@ void set_personality_64bit(void) | |||
508 | 490 | ||
509 | /* Make sure to be in 64bit mode */ | 491 | /* Make sure to be in 64bit mode */ |
510 | clear_thread_flag(TIF_IA32); | 492 | clear_thread_flag(TIF_IA32); |
493 | clear_thread_flag(TIF_ADDR32); | ||
494 | clear_thread_flag(TIF_X32); | ||
511 | 495 | ||
512 | /* Ensure the corresponding mm is not marked. */ | 496 | /* Ensure the corresponding mm is not marked. */ |
513 | if (current->mm) | 497 | if (current->mm) |
@@ -520,20 +504,31 @@ void set_personality_64bit(void) | |||
520 | current->personality &= ~READ_IMPLIES_EXEC; | 504 | current->personality &= ~READ_IMPLIES_EXEC; |
521 | } | 505 | } |
522 | 506 | ||
523 | void set_personality_ia32(void) | 507 | void set_personality_ia32(bool x32) |
524 | { | 508 | { |
525 | /* inherit personality from parent */ | 509 | /* inherit personality from parent */ |
526 | 510 | ||
527 | /* Make sure to be in 32bit mode */ | 511 | /* Make sure to be in 32bit mode */ |
528 | set_thread_flag(TIF_IA32); | 512 | set_thread_flag(TIF_ADDR32); |
529 | current->personality |= force_personality32; | ||
530 | 513 | ||
531 | /* Mark the associated mm as containing 32-bit tasks. */ | 514 | /* Mark the associated mm as containing 32-bit tasks. */ |
532 | if (current->mm) | 515 | if (current->mm) |
533 | current->mm->context.ia32_compat = 1; | 516 | current->mm->context.ia32_compat = 1; |
534 | 517 | ||
535 | /* Prepare the first "return" to user space */ | 518 | if (x32) { |
536 | current_thread_info()->status |= TS_COMPAT; | 519 | clear_thread_flag(TIF_IA32); |
520 | set_thread_flag(TIF_X32); | ||
521 | current->personality &= ~READ_IMPLIES_EXEC; | ||
522 | /* is_compat_task() uses the presence of the x32 | ||
523 | syscall bit flag to determine compat status */ | ||
524 | current_thread_info()->status &= ~TS_COMPAT; | ||
525 | } else { | ||
526 | set_thread_flag(TIF_IA32); | ||
527 | clear_thread_flag(TIF_X32); | ||
528 | current->personality |= force_personality32; | ||
529 | /* Prepare the first "return" to user space */ | ||
530 | current_thread_info()->status |= TS_COMPAT; | ||
531 | } | ||
537 | } | 532 | } |
538 | 533 | ||
539 | unsigned long get_wchan(struct task_struct *p) | 534 | unsigned long get_wchan(struct task_struct *p) |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 50267386b766..6fb330adc7c7 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/prctl.h> | 33 | #include <asm/prctl.h> |
34 | #include <asm/proto.h> | 34 | #include <asm/proto.h> |
35 | #include <asm/hw_breakpoint.h> | 35 | #include <asm/hw_breakpoint.h> |
36 | #include <asm/traps.h> | ||
36 | 37 | ||
37 | #include "tls.h" | 38 | #include "tls.h" |
38 | 39 | ||
@@ -1130,6 +1131,100 @@ static int genregs32_set(struct task_struct *target, | |||
1130 | return ret; | 1131 | return ret; |
1131 | } | 1132 | } |
1132 | 1133 | ||
1134 | #ifdef CONFIG_X86_X32_ABI | ||
1135 | static long x32_arch_ptrace(struct task_struct *child, | ||
1136 | compat_long_t request, compat_ulong_t caddr, | ||
1137 | compat_ulong_t cdata) | ||
1138 | { | ||
1139 | unsigned long addr = caddr; | ||
1140 | unsigned long data = cdata; | ||
1141 | void __user *datap = compat_ptr(data); | ||
1142 | int ret; | ||
1143 | |||
1144 | switch (request) { | ||
1145 | /* Read 32bits at location addr in the USER area. Only allow | ||
1146 | to return the lower 32bits of segment and debug registers. */ | ||
1147 | case PTRACE_PEEKUSR: { | ||
1148 | u32 tmp; | ||
1149 | |||
1150 | ret = -EIO; | ||
1151 | if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) || | ||
1152 | addr < offsetof(struct user_regs_struct, cs)) | ||
1153 | break; | ||
1154 | |||
1155 | tmp = 0; /* Default return condition */ | ||
1156 | if (addr < sizeof(struct user_regs_struct)) | ||
1157 | tmp = getreg(child, addr); | ||
1158 | else if (addr >= offsetof(struct user, u_debugreg[0]) && | ||
1159 | addr <= offsetof(struct user, u_debugreg[7])) { | ||
1160 | addr -= offsetof(struct user, u_debugreg[0]); | ||
1161 | tmp = ptrace_get_debugreg(child, addr / sizeof(data)); | ||
1162 | } | ||
1163 | ret = put_user(tmp, (__u32 __user *)datap); | ||
1164 | break; | ||
1165 | } | ||
1166 | |||
1167 | /* Write the word at location addr in the USER area. Only allow | ||
1168 | to update segment and debug registers with the upper 32bits | ||
1169 | zero-extended. */ | ||
1170 | case PTRACE_POKEUSR: | ||
1171 | ret = -EIO; | ||
1172 | if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) || | ||
1173 | addr < offsetof(struct user_regs_struct, cs)) | ||
1174 | break; | ||
1175 | |||
1176 | if (addr < sizeof(struct user_regs_struct)) | ||
1177 | ret = putreg(child, addr, data); | ||
1178 | else if (addr >= offsetof(struct user, u_debugreg[0]) && | ||
1179 | addr <= offsetof(struct user, u_debugreg[7])) { | ||
1180 | addr -= offsetof(struct user, u_debugreg[0]); | ||
1181 | ret = ptrace_set_debugreg(child, | ||
1182 | addr / sizeof(data), data); | ||
1183 | } | ||
1184 | break; | ||
1185 | |||
1186 | case PTRACE_GETREGS: /* Get all gp regs from the child. */ | ||
1187 | return copy_regset_to_user(child, | ||
1188 | task_user_regset_view(current), | ||
1189 | REGSET_GENERAL, | ||
1190 | 0, sizeof(struct user_regs_struct), | ||
1191 | datap); | ||
1192 | |||
1193 | case PTRACE_SETREGS: /* Set all gp regs in the child. */ | ||
1194 | return copy_regset_from_user(child, | ||
1195 | task_user_regset_view(current), | ||
1196 | REGSET_GENERAL, | ||
1197 | 0, sizeof(struct user_regs_struct), | ||
1198 | datap); | ||
1199 | |||
1200 | case PTRACE_GETFPREGS: /* Get the child FPU state. */ | ||
1201 | return copy_regset_to_user(child, | ||
1202 | task_user_regset_view(current), | ||
1203 | REGSET_FP, | ||
1204 | 0, sizeof(struct user_i387_struct), | ||
1205 | datap); | ||
1206 | |||
1207 | case PTRACE_SETFPREGS: /* Set the child FPU state. */ | ||
1208 | return copy_regset_from_user(child, | ||
1209 | task_user_regset_view(current), | ||
1210 | REGSET_FP, | ||
1211 | 0, sizeof(struct user_i387_struct), | ||
1212 | datap); | ||
1213 | |||
1214 | /* normal 64bit interface to access TLS data. | ||
1215 | Works just like arch_prctl, except that the arguments | ||
1216 | are reversed. */ | ||
1217 | case PTRACE_ARCH_PRCTL: | ||
1218 | return do_arch_prctl(child, data, addr); | ||
1219 | |||
1220 | default: | ||
1221 | return compat_ptrace_request(child, request, addr, data); | ||
1222 | } | ||
1223 | |||
1224 | return ret; | ||
1225 | } | ||
1226 | #endif | ||
1227 | |||
1133 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | 1228 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, |
1134 | compat_ulong_t caddr, compat_ulong_t cdata) | 1229 | compat_ulong_t caddr, compat_ulong_t cdata) |
1135 | { | 1230 | { |
@@ -1139,6 +1234,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
1139 | int ret; | 1234 | int ret; |
1140 | __u32 val; | 1235 | __u32 val; |
1141 | 1236 | ||
1237 | #ifdef CONFIG_X86_X32_ABI | ||
1238 | if (!is_ia32_task()) | ||
1239 | return x32_arch_ptrace(child, request, caddr, cdata); | ||
1240 | #endif | ||
1241 | |||
1142 | switch (request) { | 1242 | switch (request) { |
1143 | case PTRACE_PEEKUSR: | 1243 | case PTRACE_PEEKUSR: |
1144 | ret = getreg32(child, addr, &val); | 1244 | ret = getreg32(child, addr, &val); |
@@ -1326,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk, | |||
1326 | int error_code, int si_code, | 1426 | int error_code, int si_code, |
1327 | struct siginfo *info) | 1427 | struct siginfo *info) |
1328 | { | 1428 | { |
1329 | tsk->thread.trap_no = 1; | 1429 | tsk->thread.trap_nr = X86_TRAP_DB; |
1330 | tsk->thread.error_code = error_code; | 1430 | tsk->thread.error_code = error_code; |
1331 | 1431 | ||
1332 | memset(info, 0, sizeof(*info)); | 1432 | memset(info, 0, sizeof(*info)); |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 46a01bdc27e2..9c73acc1c860 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -10,10 +10,8 @@ | |||
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/smp.h> | 11 | #include <linux/smp.h> |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/signal.h> | ||
14 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
15 | #include <linux/wait.h> | 14 | #include <linux/wait.h> |
16 | #include <linux/ptrace.h> | ||
17 | #include <linux/tracehook.h> | 15 | #include <linux/tracehook.h> |
18 | #include <linux/unistd.h> | 16 | #include <linux/unistd.h> |
19 | #include <linux/stddef.h> | 17 | #include <linux/stddef.h> |
@@ -26,10 +24,12 @@ | |||
26 | #include <asm/i387.h> | 24 | #include <asm/i387.h> |
27 | #include <asm/vdso.h> | 25 | #include <asm/vdso.h> |
28 | #include <asm/mce.h> | 26 | #include <asm/mce.h> |
27 | #include <asm/sighandling.h> | ||
29 | 28 | ||
30 | #ifdef CONFIG_X86_64 | 29 | #ifdef CONFIG_X86_64 |
31 | #include <asm/proto.h> | 30 | #include <asm/proto.h> |
32 | #include <asm/ia32_unistd.h> | 31 | #include <asm/ia32_unistd.h> |
32 | #include <asm/sys_ia32.h> | ||
33 | #endif /* CONFIG_X86_64 */ | 33 | #endif /* CONFIG_X86_64 */ |
34 | 34 | ||
35 | #include <asm/syscall.h> | 35 | #include <asm/syscall.h> |
@@ -37,13 +37,6 @@ | |||
37 | 37 | ||
38 | #include <asm/sigframe.h> | 38 | #include <asm/sigframe.h> |
39 | 39 | ||
40 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
41 | |||
42 | #define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ | ||
43 | X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \ | ||
44 | X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \ | ||
45 | X86_EFLAGS_CF) | ||
46 | |||
47 | #ifdef CONFIG_X86_32 | 40 | #ifdef CONFIG_X86_32 |
48 | # define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) | 41 | # define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) |
49 | #else | 42 | #else |
@@ -68,9 +61,8 @@ | |||
68 | regs->seg = GET_SEG(seg) | 3; \ | 61 | regs->seg = GET_SEG(seg) | 3; \ |
69 | } while (0) | 62 | } while (0) |
70 | 63 | ||
71 | static int | 64 | int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, |
72 | restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | 65 | unsigned long *pax) |
73 | unsigned long *pax) | ||
74 | { | 66 | { |
75 | void __user *buf; | 67 | void __user *buf; |
76 | unsigned int tmpflags; | 68 | unsigned int tmpflags; |
@@ -125,9 +117,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | |||
125 | return err; | 117 | return err; |
126 | } | 118 | } |
127 | 119 | ||
128 | static int | 120 | int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, |
129 | setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, | 121 | struct pt_regs *regs, unsigned long mask) |
130 | struct pt_regs *regs, unsigned long mask) | ||
131 | { | 122 | { |
132 | int err = 0; | 123 | int err = 0; |
133 | 124 | ||
@@ -159,7 +150,7 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, | |||
159 | put_user_ex(regs->r15, &sc->r15); | 150 | put_user_ex(regs->r15, &sc->r15); |
160 | #endif /* CONFIG_X86_64 */ | 151 | #endif /* CONFIG_X86_64 */ |
161 | 152 | ||
162 | put_user_ex(current->thread.trap_no, &sc->trapno); | 153 | put_user_ex(current->thread.trap_nr, &sc->trapno); |
163 | put_user_ex(current->thread.error_code, &sc->err); | 154 | put_user_ex(current->thread.error_code, &sc->err); |
164 | put_user_ex(regs->ip, &sc->ip); | 155 | put_user_ex(regs->ip, &sc->ip); |
165 | #ifdef CONFIG_X86_32 | 156 | #ifdef CONFIG_X86_32 |
@@ -642,6 +633,16 @@ static int signr_convert(int sig) | |||
642 | #define is_ia32 0 | 633 | #define is_ia32 0 |
643 | #endif /* CONFIG_IA32_EMULATION */ | 634 | #endif /* CONFIG_IA32_EMULATION */ |
644 | 635 | ||
636 | #ifdef CONFIG_X86_X32_ABI | ||
637 | #define is_x32 test_thread_flag(TIF_X32) | ||
638 | |||
639 | static int x32_setup_rt_frame(int sig, struct k_sigaction *ka, | ||
640 | siginfo_t *info, compat_sigset_t *set, | ||
641 | struct pt_regs *regs); | ||
642 | #else /* !CONFIG_X86_X32_ABI */ | ||
643 | #define is_x32 0 | ||
644 | #endif /* CONFIG_X86_X32_ABI */ | ||
645 | |||
645 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 646 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
646 | sigset_t *set, struct pt_regs *regs); | 647 | sigset_t *set, struct pt_regs *regs); |
647 | int ia32_setup_frame(int sig, struct k_sigaction *ka, | 648 | int ia32_setup_frame(int sig, struct k_sigaction *ka, |
@@ -666,8 +667,14 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
666 | ret = ia32_setup_rt_frame(usig, ka, info, set, regs); | 667 | ret = ia32_setup_rt_frame(usig, ka, info, set, regs); |
667 | else | 668 | else |
668 | ret = ia32_setup_frame(usig, ka, set, regs); | 669 | ret = ia32_setup_frame(usig, ka, set, regs); |
669 | } else | 670 | #ifdef CONFIG_X86_X32_ABI |
671 | } else if (is_x32) { | ||
672 | ret = x32_setup_rt_frame(usig, ka, info, | ||
673 | (compat_sigset_t *)set, regs); | ||
674 | #endif | ||
675 | } else { | ||
670 | ret = __setup_rt_frame(sig, ka, info, set, regs); | 676 | ret = __setup_rt_frame(sig, ka, info, set, regs); |
677 | } | ||
671 | 678 | ||
672 | if (ret) { | 679 | if (ret) { |
673 | force_sigsegv(sig, current); | 680 | force_sigsegv(sig, current); |
@@ -850,3 +857,102 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where) | |||
850 | 857 | ||
851 | force_sig(SIGSEGV, me); | 858 | force_sig(SIGSEGV, me); |
852 | } | 859 | } |
860 | |||
861 | #ifdef CONFIG_X86_X32_ABI | ||
862 | static int x32_setup_rt_frame(int sig, struct k_sigaction *ka, | ||
863 | siginfo_t *info, compat_sigset_t *set, | ||
864 | struct pt_regs *regs) | ||
865 | { | ||
866 | struct rt_sigframe_x32 __user *frame; | ||
867 | void __user *restorer; | ||
868 | int err = 0; | ||
869 | void __user *fpstate = NULL; | ||
870 | |||
871 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); | ||
872 | |||
873 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
874 | return -EFAULT; | ||
875 | |||
876 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
877 | if (copy_siginfo_to_user32(&frame->info, info)) | ||
878 | return -EFAULT; | ||
879 | } | ||
880 | |||
881 | put_user_try { | ||
882 | /* Create the ucontext. */ | ||
883 | if (cpu_has_xsave) | ||
884 | put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); | ||
885 | else | ||
886 | put_user_ex(0, &frame->uc.uc_flags); | ||
887 | put_user_ex(0, &frame->uc.uc_link); | ||
888 | put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | ||
889 | put_user_ex(sas_ss_flags(regs->sp), | ||
890 | &frame->uc.uc_stack.ss_flags); | ||
891 | put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
892 | put_user_ex(0, &frame->uc.uc__pad0); | ||
893 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, | ||
894 | regs, set->sig[0]); | ||
895 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
896 | |||
897 | if (ka->sa.sa_flags & SA_RESTORER) { | ||
898 | restorer = ka->sa.sa_restorer; | ||
899 | } else { | ||
900 | /* could use a vstub here */ | ||
901 | restorer = NULL; | ||
902 | err |= -EFAULT; | ||
903 | } | ||
904 | put_user_ex(restorer, &frame->pretcode); | ||
905 | } put_user_catch(err); | ||
906 | |||
907 | if (err) | ||
908 | return -EFAULT; | ||
909 | |||
910 | /* Set up registers for signal handler */ | ||
911 | regs->sp = (unsigned long) frame; | ||
912 | regs->ip = (unsigned long) ka->sa.sa_handler; | ||
913 | |||
914 | /* We use the x32 calling convention here... */ | ||
915 | regs->di = sig; | ||
916 | regs->si = (unsigned long) &frame->info; | ||
917 | regs->dx = (unsigned long) &frame->uc; | ||
918 | |||
919 | loadsegment(ds, __USER_DS); | ||
920 | loadsegment(es, __USER_DS); | ||
921 | |||
922 | regs->cs = __USER_CS; | ||
923 | regs->ss = __USER_DS; | ||
924 | |||
925 | return 0; | ||
926 | } | ||
927 | |||
928 | asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs) | ||
929 | { | ||
930 | struct rt_sigframe_x32 __user *frame; | ||
931 | sigset_t set; | ||
932 | unsigned long ax; | ||
933 | struct pt_regs tregs; | ||
934 | |||
935 | frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8); | ||
936 | |||
937 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
938 | goto badframe; | ||
939 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
940 | goto badframe; | ||
941 | |||
942 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
943 | set_current_blocked(&set); | ||
944 | |||
945 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) | ||
946 | goto badframe; | ||
947 | |||
948 | tregs = *regs; | ||
949 | if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT) | ||
950 | goto badframe; | ||
951 | |||
952 | return ax; | ||
953 | |||
954 | badframe: | ||
955 | signal_fault(regs, frame, "x32 rt_sigreturn"); | ||
956 | return 0; | ||
957 | } | ||
958 | #endif | ||
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 051489082d59..f921df8c2099 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -98,7 +98,7 @@ out: | |||
98 | static void find_start_end(unsigned long flags, unsigned long *begin, | 98 | static void find_start_end(unsigned long flags, unsigned long *begin, |
99 | unsigned long *end) | 99 | unsigned long *end) |
100 | { | 100 | { |
101 | if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { | 101 | if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) { |
102 | unsigned long new_begin; | 102 | unsigned long new_begin; |
103 | /* This is usually used needed to map code in small | 103 | /* This is usually used needed to map code in small |
104 | model, so it needs to be in the first 31bit. Limit | 104 | model, so it needs to be in the first 31bit. Limit |
@@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
144 | (!vma || addr + len <= vma->vm_start)) | 144 | (!vma || addr + len <= vma->vm_start)) |
145 | return addr; | 145 | return addr; |
146 | } | 146 | } |
147 | if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) | 147 | if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32)) |
148 | && len <= mm->cached_hole_size) { | 148 | && len <= mm->cached_hole_size) { |
149 | mm->cached_hole_size = 0; | 149 | mm->cached_hole_size = 0; |
150 | mm->free_area_cache = begin; | 150 | mm->free_area_cache = begin; |
@@ -205,7 +205,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
205 | return addr; | 205 | return addr; |
206 | 206 | ||
207 | /* for MAP_32BIT mappings we force the legact mmap base */ | 207 | /* for MAP_32BIT mappings we force the legact mmap base */ |
208 | if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) | 208 | if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) |
209 | goto bottomup; | 209 | goto bottomup; |
210 | 210 | ||
211 | /* requesting a specific address */ | 211 | /* requesting a specific address */ |
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c index 7ac7943be02c..5c7f8c20da74 100644 --- a/arch/x86/kernel/syscall_64.c +++ b/arch/x86/kernel/syscall_64.c | |||
@@ -5,6 +5,14 @@ | |||
5 | #include <linux/cache.h> | 5 | #include <linux/cache.h> |
6 | #include <asm/asm-offsets.h> | 6 | #include <asm/asm-offsets.h> |
7 | 7 | ||
8 | #define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat) | ||
9 | |||
10 | #ifdef CONFIG_X86_X32_ABI | ||
11 | # define __SYSCALL_X32(nr, sym, compat) __SYSCALL_64(nr, sym, compat) | ||
12 | #else | ||
13 | # define __SYSCALL_X32(nr, sym, compat) /* nothing */ | ||
14 | #endif | ||
15 | |||
8 | #define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ; | 16 | #define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ; |
9 | #include <asm/syscalls_64.h> | 17 | #include <asm/syscalls_64.h> |
10 | #undef __SYSCALL_64 | 18 | #undef __SYSCALL_64 |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 482ec3af2067..c6d17ad59b8a 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -119,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, | |||
119 | * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. | 119 | * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. |
120 | * On nmi (interrupt 2), do_trap should not be called. | 120 | * On nmi (interrupt 2), do_trap should not be called. |
121 | */ | 121 | */ |
122 | if (trapnr < 6) | 122 | if (trapnr < X86_TRAP_UD) |
123 | goto vm86_trap; | 123 | goto vm86_trap; |
124 | goto trap_signal; | 124 | goto trap_signal; |
125 | } | 125 | } |
@@ -132,7 +132,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, | |||
132 | trap_signal: | 132 | trap_signal: |
133 | #endif | 133 | #endif |
134 | /* | 134 | /* |
135 | * We want error_code and trap_no set for userspace faults and | 135 | * We want error_code and trap_nr set for userspace faults and |
136 | * kernelspace faults which result in die(), but not | 136 | * kernelspace faults which result in die(), but not |
137 | * kernelspace faults which are fixed up. die() gives the | 137 | * kernelspace faults which are fixed up. die() gives the |
138 | * process no chance to handle the signal and notice the | 138 | * process no chance to handle the signal and notice the |
@@ -141,7 +141,7 @@ trap_signal: | |||
141 | * delivered, faults. See also do_general_protection below. | 141 | * delivered, faults. See also do_general_protection below. |
142 | */ | 142 | */ |
143 | tsk->thread.error_code = error_code; | 143 | tsk->thread.error_code = error_code; |
144 | tsk->thread.trap_no = trapnr; | 144 | tsk->thread.trap_nr = trapnr; |
145 | 145 | ||
146 | #ifdef CONFIG_X86_64 | 146 | #ifdef CONFIG_X86_64 |
147 | if (show_unhandled_signals && unhandled_signal(tsk, signr) && | 147 | if (show_unhandled_signals && unhandled_signal(tsk, signr) && |
@@ -164,7 +164,7 @@ trap_signal: | |||
164 | kernel_trap: | 164 | kernel_trap: |
165 | if (!fixup_exception(regs)) { | 165 | if (!fixup_exception(regs)) { |
166 | tsk->thread.error_code = error_code; | 166 | tsk->thread.error_code = error_code; |
167 | tsk->thread.trap_no = trapnr; | 167 | tsk->thread.trap_nr = trapnr; |
168 | die(str, regs, error_code); | 168 | die(str, regs, error_code); |
169 | } | 169 | } |
170 | return; | 170 | return; |
@@ -203,27 +203,31 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ | |||
203 | do_trap(trapnr, signr, str, regs, error_code, &info); \ | 203 | do_trap(trapnr, signr, str, regs, error_code, &info); \ |
204 | } | 204 | } |
205 | 205 | ||
206 | DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) | 206 | DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, |
207 | DO_ERROR(4, SIGSEGV, "overflow", overflow) | 207 | regs->ip) |
208 | DO_ERROR(5, SIGSEGV, "bounds", bounds) | 208 | DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) |
209 | DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) | 209 | DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds) |
210 | DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) | 210 | DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, |
211 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) | 211 | regs->ip) |
212 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) | 212 | DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", |
213 | coprocessor_segment_overrun) | ||
214 | DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) | ||
215 | DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) | ||
213 | #ifdef CONFIG_X86_32 | 216 | #ifdef CONFIG_X86_32 |
214 | DO_ERROR(12, SIGBUS, "stack segment", stack_segment) | 217 | DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) |
215 | #endif | 218 | #endif |
216 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) | 219 | DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, |
220 | BUS_ADRALN, 0) | ||
217 | 221 | ||
218 | #ifdef CONFIG_X86_64 | 222 | #ifdef CONFIG_X86_64 |
219 | /* Runs on IST stack */ | 223 | /* Runs on IST stack */ |
220 | dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) | 224 | dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) |
221 | { | 225 | { |
222 | if (notify_die(DIE_TRAP, "stack segment", regs, error_code, | 226 | if (notify_die(DIE_TRAP, "stack segment", regs, error_code, |
223 | 12, SIGBUS) == NOTIFY_STOP) | 227 | X86_TRAP_SS, SIGBUS) == NOTIFY_STOP) |
224 | return; | 228 | return; |
225 | preempt_conditional_sti(regs); | 229 | preempt_conditional_sti(regs); |
226 | do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); | 230 | do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); |
227 | preempt_conditional_cli(regs); | 231 | preempt_conditional_cli(regs); |
228 | } | 232 | } |
229 | 233 | ||
@@ -233,10 +237,10 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) | |||
233 | struct task_struct *tsk = current; | 237 | struct task_struct *tsk = current; |
234 | 238 | ||
235 | /* Return not checked because double check cannot be ignored */ | 239 | /* Return not checked because double check cannot be ignored */ |
236 | notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV); | 240 | notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); |
237 | 241 | ||
238 | tsk->thread.error_code = error_code; | 242 | tsk->thread.error_code = error_code; |
239 | tsk->thread.trap_no = 8; | 243 | tsk->thread.trap_nr = X86_TRAP_DF; |
240 | 244 | ||
241 | /* | 245 | /* |
242 | * This is always a kernel trap and never fixable (and thus must | 246 | * This is always a kernel trap and never fixable (and thus must |
@@ -264,7 +268,7 @@ do_general_protection(struct pt_regs *regs, long error_code) | |||
264 | goto gp_in_kernel; | 268 | goto gp_in_kernel; |
265 | 269 | ||
266 | tsk->thread.error_code = error_code; | 270 | tsk->thread.error_code = error_code; |
267 | tsk->thread.trap_no = 13; | 271 | tsk->thread.trap_nr = X86_TRAP_GP; |
268 | 272 | ||
269 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && | 273 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && |
270 | printk_ratelimit()) { | 274 | printk_ratelimit()) { |
@@ -291,9 +295,9 @@ gp_in_kernel: | |||
291 | return; | 295 | return; |
292 | 296 | ||
293 | tsk->thread.error_code = error_code; | 297 | tsk->thread.error_code = error_code; |
294 | tsk->thread.trap_no = 13; | 298 | tsk->thread.trap_nr = X86_TRAP_GP; |
295 | if (notify_die(DIE_GPF, "general protection fault", regs, | 299 | if (notify_die(DIE_GPF, "general protection fault", regs, error_code, |
296 | error_code, 13, SIGSEGV) == NOTIFY_STOP) | 300 | X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP) |
297 | return; | 301 | return; |
298 | die("general protection fault", regs, error_code); | 302 | die("general protection fault", regs, error_code); |
299 | } | 303 | } |
@@ -302,13 +306,13 @@ gp_in_kernel: | |||
302 | dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) | 306 | dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) |
303 | { | 307 | { |
304 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP | 308 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP |
305 | if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) | 309 | if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, |
306 | == NOTIFY_STOP) | 310 | SIGTRAP) == NOTIFY_STOP) |
307 | return; | 311 | return; |
308 | #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ | 312 | #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ |
309 | 313 | ||
310 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) | 314 | if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, |
311 | == NOTIFY_STOP) | 315 | SIGTRAP) == NOTIFY_STOP) |
312 | return; | 316 | return; |
313 | 317 | ||
314 | /* | 318 | /* |
@@ -317,7 +321,7 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) | |||
317 | */ | 321 | */ |
318 | debug_stack_usage_inc(); | 322 | debug_stack_usage_inc(); |
319 | preempt_conditional_sti(regs); | 323 | preempt_conditional_sti(regs); |
320 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); | 324 | do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); |
321 | preempt_conditional_cli(regs); | 325 | preempt_conditional_cli(regs); |
322 | debug_stack_usage_dec(); | 326 | debug_stack_usage_dec(); |
323 | } | 327 | } |
@@ -422,8 +426,8 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) | |||
422 | preempt_conditional_sti(regs); | 426 | preempt_conditional_sti(regs); |
423 | 427 | ||
424 | if (regs->flags & X86_VM_MASK) { | 428 | if (regs->flags & X86_VM_MASK) { |
425 | handle_vm86_trap((struct kernel_vm86_regs *) regs, | 429 | handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, |
426 | error_code, 1); | 430 | X86_TRAP_DB); |
427 | preempt_conditional_cli(regs); | 431 | preempt_conditional_cli(regs); |
428 | debug_stack_usage_dec(); | 432 | debug_stack_usage_dec(); |
429 | return; | 433 | return; |
@@ -460,7 +464,8 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr) | |||
460 | struct task_struct *task = current; | 464 | struct task_struct *task = current; |
461 | siginfo_t info; | 465 | siginfo_t info; |
462 | unsigned short err; | 466 | unsigned short err; |
463 | char *str = (trapnr == 16) ? "fpu exception" : "simd exception"; | 467 | char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : |
468 | "simd exception"; | ||
464 | 469 | ||
465 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) | 470 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) |
466 | return; | 471 | return; |
@@ -470,7 +475,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr) | |||
470 | { | 475 | { |
471 | if (!fixup_exception(regs)) { | 476 | if (!fixup_exception(regs)) { |
472 | task->thread.error_code = error_code; | 477 | task->thread.error_code = error_code; |
473 | task->thread.trap_no = trapnr; | 478 | task->thread.trap_nr = trapnr; |
474 | die(str, regs, error_code); | 479 | die(str, regs, error_code); |
475 | } | 480 | } |
476 | return; | 481 | return; |
@@ -480,12 +485,12 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr) | |||
480 | * Save the info for the exception handler and clear the error. | 485 | * Save the info for the exception handler and clear the error. |
481 | */ | 486 | */ |
482 | save_init_fpu(task); | 487 | save_init_fpu(task); |
483 | task->thread.trap_no = trapnr; | 488 | task->thread.trap_nr = trapnr; |
484 | task->thread.error_code = error_code; | 489 | task->thread.error_code = error_code; |
485 | info.si_signo = SIGFPE; | 490 | info.si_signo = SIGFPE; |
486 | info.si_errno = 0; | 491 | info.si_errno = 0; |
487 | info.si_addr = (void __user *)regs->ip; | 492 | info.si_addr = (void __user *)regs->ip; |
488 | if (trapnr == 16) { | 493 | if (trapnr == X86_TRAP_MF) { |
489 | unsigned short cwd, swd; | 494 | unsigned short cwd, swd; |
490 | /* | 495 | /* |
491 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | 496 | * (~cwd & swd) will mask out exceptions that are not set to unmasked |
@@ -529,10 +534,11 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr) | |||
529 | info.si_code = FPE_FLTRES; | 534 | info.si_code = FPE_FLTRES; |
530 | } else { | 535 | } else { |
531 | /* | 536 | /* |
532 | * If we're using IRQ 13, or supposedly even some trap 16 | 537 | * If we're using IRQ 13, or supposedly even some trap |
533 | * implementations, it's possible we get a spurious trap... | 538 | * X86_TRAP_MF implementations, it's possible |
539 | * we get a spurious trap, which is not an error. | ||
534 | */ | 540 | */ |
535 | return; /* Spurious trap, no error */ | 541 | return; |
536 | } | 542 | } |
537 | force_sig_info(SIGFPE, &info, task); | 543 | force_sig_info(SIGFPE, &info, task); |
538 | } | 544 | } |
@@ -543,13 +549,13 @@ dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) | |||
543 | ignore_fpu_irq = 1; | 549 | ignore_fpu_irq = 1; |
544 | #endif | 550 | #endif |
545 | 551 | ||
546 | math_error(regs, error_code, 16); | 552 | math_error(regs, error_code, X86_TRAP_MF); |
547 | } | 553 | } |
548 | 554 | ||
549 | dotraplinkage void | 555 | dotraplinkage void |
550 | do_simd_coprocessor_error(struct pt_regs *regs, long error_code) | 556 | do_simd_coprocessor_error(struct pt_regs *regs, long error_code) |
551 | { | 557 | { |
552 | math_error(regs, error_code, 19); | 558 | math_error(regs, error_code, X86_TRAP_XF); |
553 | } | 559 | } |
554 | 560 | ||
555 | dotraplinkage void | 561 | dotraplinkage void |
@@ -571,41 +577,18 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) | |||
571 | } | 577 | } |
572 | 578 | ||
573 | /* | 579 | /* |
574 | * __math_state_restore assumes that cr0.TS is already clear and the | ||
575 | * fpu state is all ready for use. Used during context switch. | ||
576 | */ | ||
577 | void __math_state_restore(void) | ||
578 | { | ||
579 | struct thread_info *thread = current_thread_info(); | ||
580 | struct task_struct *tsk = thread->task; | ||
581 | |||
582 | /* | ||
583 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | ||
584 | */ | ||
585 | if (unlikely(restore_fpu_checking(tsk))) { | ||
586 | stts(); | ||
587 | force_sig(SIGSEGV, tsk); | ||
588 | return; | ||
589 | } | ||
590 | |||
591 | thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ | ||
592 | tsk->fpu_counter++; | ||
593 | } | ||
594 | |||
595 | /* | ||
596 | * 'math_state_restore()' saves the current math information in the | 580 | * 'math_state_restore()' saves the current math information in the |
597 | * old math state array, and gets the new ones from the current task | 581 | * old math state array, and gets the new ones from the current task |
598 | * | 582 | * |
599 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | 583 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. |
600 | * Don't touch unless you *really* know how it works. | 584 | * Don't touch unless you *really* know how it works. |
601 | * | 585 | * |
602 | * Must be called with kernel preemption disabled (in this case, | 586 | * Must be called with kernel preemption disabled (eg with local |
603 | * local interrupts are disabled at the call-site in entry.S). | 587 | * local interrupts as in the case of do_device_not_available). |
604 | */ | 588 | */ |
605 | asmlinkage void math_state_restore(void) | 589 | void math_state_restore(void) |
606 | { | 590 | { |
607 | struct thread_info *thread = current_thread_info(); | 591 | struct task_struct *tsk = current; |
608 | struct task_struct *tsk = thread->task; | ||
609 | 592 | ||
610 | if (!tsk_used_math(tsk)) { | 593 | if (!tsk_used_math(tsk)) { |
611 | local_irq_enable(); | 594 | local_irq_enable(); |
@@ -622,9 +605,17 @@ asmlinkage void math_state_restore(void) | |||
622 | local_irq_disable(); | 605 | local_irq_disable(); |
623 | } | 606 | } |
624 | 607 | ||
625 | clts(); /* Allow maths ops (or we recurse) */ | 608 | __thread_fpu_begin(tsk); |
609 | /* | ||
610 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | ||
611 | */ | ||
612 | if (unlikely(restore_fpu_checking(tsk))) { | ||
613 | __thread_fpu_end(tsk); | ||
614 | force_sig(SIGSEGV, tsk); | ||
615 | return; | ||
616 | } | ||
626 | 617 | ||
627 | __math_state_restore(); | 618 | tsk->fpu_counter++; |
628 | } | 619 | } |
629 | EXPORT_SYMBOL_GPL(math_state_restore); | 620 | EXPORT_SYMBOL_GPL(math_state_restore); |
630 | 621 | ||
@@ -658,20 +649,21 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) | |||
658 | info.si_errno = 0; | 649 | info.si_errno = 0; |
659 | info.si_code = ILL_BADSTK; | 650 | info.si_code = ILL_BADSTK; |
660 | info.si_addr = NULL; | 651 | info.si_addr = NULL; |
661 | if (notify_die(DIE_TRAP, "iret exception", | 652 | if (notify_die(DIE_TRAP, "iret exception", regs, error_code, |
662 | regs, error_code, 32, SIGILL) == NOTIFY_STOP) | 653 | X86_TRAP_IRET, SIGILL) == NOTIFY_STOP) |
663 | return; | 654 | return; |
664 | do_trap(32, SIGILL, "iret exception", regs, error_code, &info); | 655 | do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, |
656 | &info); | ||
665 | } | 657 | } |
666 | #endif | 658 | #endif |
667 | 659 | ||
668 | /* Set of traps needed for early debugging. */ | 660 | /* Set of traps needed for early debugging. */ |
669 | void __init early_trap_init(void) | 661 | void __init early_trap_init(void) |
670 | { | 662 | { |
671 | set_intr_gate_ist(1, &debug, DEBUG_STACK); | 663 | set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); |
672 | /* int3 can be called from all */ | 664 | /* int3 can be called from all */ |
673 | set_system_intr_gate_ist(3, &int3, DEBUG_STACK); | 665 | set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); |
674 | set_intr_gate(14, &page_fault); | 666 | set_intr_gate(X86_TRAP_PF, &page_fault); |
675 | load_idt(&idt_descr); | 667 | load_idt(&idt_descr); |
676 | } | 668 | } |
677 | 669 | ||
@@ -687,30 +679,30 @@ void __init trap_init(void) | |||
687 | early_iounmap(p, 4); | 679 | early_iounmap(p, 4); |
688 | #endif | 680 | #endif |
689 | 681 | ||
690 | set_intr_gate(0, ÷_error); | 682 | set_intr_gate(X86_TRAP_DE, ÷_error); |
691 | set_intr_gate_ist(2, &nmi, NMI_STACK); | 683 | set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); |
692 | /* int4 can be called from all */ | 684 | /* int4 can be called from all */ |
693 | set_system_intr_gate(4, &overflow); | 685 | set_system_intr_gate(X86_TRAP_OF, &overflow); |
694 | set_intr_gate(5, &bounds); | 686 | set_intr_gate(X86_TRAP_BR, &bounds); |
695 | set_intr_gate(6, &invalid_op); | 687 | set_intr_gate(X86_TRAP_UD, &invalid_op); |
696 | set_intr_gate(7, &device_not_available); | 688 | set_intr_gate(X86_TRAP_NM, &device_not_available); |
697 | #ifdef CONFIG_X86_32 | 689 | #ifdef CONFIG_X86_32 |
698 | set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); | 690 | set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); |
699 | #else | 691 | #else |
700 | set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); | 692 | set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); |
701 | #endif | 693 | #endif |
702 | set_intr_gate(9, &coprocessor_segment_overrun); | 694 | set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun); |
703 | set_intr_gate(10, &invalid_TSS); | 695 | set_intr_gate(X86_TRAP_TS, &invalid_TSS); |
704 | set_intr_gate(11, &segment_not_present); | 696 | set_intr_gate(X86_TRAP_NP, &segment_not_present); |
705 | set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); | 697 | set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); |
706 | set_intr_gate(13, &general_protection); | 698 | set_intr_gate(X86_TRAP_GP, &general_protection); |
707 | set_intr_gate(15, &spurious_interrupt_bug); | 699 | set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug); |
708 | set_intr_gate(16, &coprocessor_error); | 700 | set_intr_gate(X86_TRAP_MF, &coprocessor_error); |
709 | set_intr_gate(17, &alignment_check); | 701 | set_intr_gate(X86_TRAP_AC, &alignment_check); |
710 | #ifdef CONFIG_X86_MCE | 702 | #ifdef CONFIG_X86_MCE |
711 | set_intr_gate_ist(18, &machine_check, MCE_STACK); | 703 | set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); |
712 | #endif | 704 | #endif |
713 | set_intr_gate(19, &simd_coprocessor_error); | 705 | set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error); |
714 | 706 | ||
715 | /* Reserve all the builtin and the syscall vector: */ | 707 | /* Reserve all the builtin and the syscall vector: */ |
716 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) | 708 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) |
@@ -735,7 +727,7 @@ void __init trap_init(void) | |||
735 | 727 | ||
736 | #ifdef CONFIG_X86_64 | 728 | #ifdef CONFIG_X86_64 |
737 | memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); | 729 | memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); |
738 | set_nmi_gate(1, &debug); | 730 | set_nmi_gate(X86_TRAP_DB, &debug); |
739 | set_nmi_gate(3, &int3); | 731 | set_nmi_gate(X86_TRAP_BP, &int3); |
740 | #endif | 732 | #endif |
741 | } | 733 | } |
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index b466cab5ba15..a1315ab2d6b9 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
@@ -567,7 +567,7 @@ int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) | |||
567 | } | 567 | } |
568 | if (trapno != 1) | 568 | if (trapno != 1) |
569 | return 1; /* we let this handle by the calling routine */ | 569 | return 1; /* we let this handle by the calling routine */ |
570 | current->thread.trap_no = trapno; | 570 | current->thread.trap_nr = trapno; |
571 | current->thread.error_code = error_code; | 571 | current->thread.error_code = error_code; |
572 | force_sig(SIGTRAP, current); | 572 | force_sig(SIGTRAP, current); |
573 | return 0; | 573 | return 0; |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index b07ba9393564..327509b95e0e 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -153,7 +153,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size) | |||
153 | 153 | ||
154 | thread->error_code = 6; /* user fault, no page, write */ | 154 | thread->error_code = 6; /* user fault, no page, write */ |
155 | thread->cr2 = ptr; | 155 | thread->cr2 = ptr; |
156 | thread->trap_no = 14; | 156 | thread->trap_nr = X86_TRAP_PF; |
157 | 157 | ||
158 | memset(&info, 0, sizeof(info)); | 158 | memset(&info, 0, sizeof(info)); |
159 | info.si_signo = SIGSEGV; | 159 | info.si_signo = SIGSEGV; |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index a3911343976b..711091114119 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -47,7 +47,7 @@ void __sanitize_i387_state(struct task_struct *tsk) | |||
47 | if (!fx) | 47 | if (!fx) |
48 | return; | 48 | return; |
49 | 49 | ||
50 | BUG_ON(task_thread_info(tsk)->status & TS_USEDFPU); | 50 | BUG_ON(__thread_has_fpu(tsk)); |
51 | 51 | ||
52 | xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; | 52 | xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; |
53 | 53 | ||
@@ -168,7 +168,7 @@ int save_i387_xstate(void __user *buf) | |||
168 | if (!used_math()) | 168 | if (!used_math()) |
169 | return 0; | 169 | return 0; |
170 | 170 | ||
171 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 171 | if (user_has_fpu()) { |
172 | if (use_xsave()) | 172 | if (use_xsave()) |
173 | err = xsave_user(buf); | 173 | err = xsave_user(buf); |
174 | else | 174 | else |
@@ -176,8 +176,7 @@ int save_i387_xstate(void __user *buf) | |||
176 | 176 | ||
177 | if (err) | 177 | if (err) |
178 | return err; | 178 | return err; |
179 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 179 | user_fpu_end(); |
180 | stts(); | ||
181 | } else { | 180 | } else { |
182 | sanitize_i387_state(tsk); | 181 | sanitize_i387_state(tsk); |
183 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, | 182 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, |
@@ -292,10 +291,7 @@ int restore_i387_xstate(void __user *buf) | |||
292 | return err; | 291 | return err; |
293 | } | 292 | } |
294 | 293 | ||
295 | if (!(task_thread_info(current)->status & TS_USEDFPU)) { | 294 | user_fpu_begin(); |
296 | clts(); | ||
297 | task_thread_info(current)->status |= TS_USEDFPU; | ||
298 | } | ||
299 | if (use_xsave()) | 295 | if (use_xsave()) |
300 | err = restore_user_xstate(buf); | 296 | err = restore_user_xstate(buf); |
301 | else | 297 | else |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 05a562b85025..0982507b962a 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -1891,6 +1891,51 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, | |||
1891 | ss->p = 1; | 1891 | ss->p = 1; |
1892 | } | 1892 | } |
1893 | 1893 | ||
1894 | static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) | ||
1895 | { | ||
1896 | struct x86_emulate_ops *ops = ctxt->ops; | ||
1897 | u32 eax, ebx, ecx, edx; | ||
1898 | |||
1899 | /* | ||
1900 | * syscall should always be enabled in longmode - so only become | ||
1901 | * vendor specific (cpuid) if other modes are active... | ||
1902 | */ | ||
1903 | if (ctxt->mode == X86EMUL_MODE_PROT64) | ||
1904 | return true; | ||
1905 | |||
1906 | eax = 0x00000000; | ||
1907 | ecx = 0x00000000; | ||
1908 | if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) { | ||
1909 | /* | ||
1910 | * Intel ("GenuineIntel") | ||
1911 | * remark: Intel CPUs only support "syscall" in 64bit | ||
1912 | * longmode. Also an 64bit guest with a | ||
1913 | * 32bit compat-app running will #UD !! While this | ||
1914 | * behaviour can be fixed (by emulating) into AMD | ||
1915 | * response - CPUs of AMD can't behave like Intel. | ||
1916 | */ | ||
1917 | if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && | ||
1918 | ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && | ||
1919 | edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) | ||
1920 | return false; | ||
1921 | |||
1922 | /* AMD ("AuthenticAMD") */ | ||
1923 | if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && | ||
1924 | ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && | ||
1925 | edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) | ||
1926 | return true; | ||
1927 | |||
1928 | /* AMD ("AMDisbetter!") */ | ||
1929 | if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && | ||
1930 | ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && | ||
1931 | edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) | ||
1932 | return true; | ||
1933 | } | ||
1934 | |||
1935 | /* default: (not Intel, not AMD), apply Intel's stricter rules... */ | ||
1936 | return false; | ||
1937 | } | ||
1938 | |||
1894 | static int em_syscall(struct x86_emulate_ctxt *ctxt) | 1939 | static int em_syscall(struct x86_emulate_ctxt *ctxt) |
1895 | { | 1940 | { |
1896 | struct x86_emulate_ops *ops = ctxt->ops; | 1941 | struct x86_emulate_ops *ops = ctxt->ops; |
@@ -1904,9 +1949,15 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) | |||
1904 | ctxt->mode == X86EMUL_MODE_VM86) | 1949 | ctxt->mode == X86EMUL_MODE_VM86) |
1905 | return emulate_ud(ctxt); | 1950 | return emulate_ud(ctxt); |
1906 | 1951 | ||
1952 | if (!(em_syscall_is_enabled(ctxt))) | ||
1953 | return emulate_ud(ctxt); | ||
1954 | |||
1907 | ops->get_msr(ctxt, MSR_EFER, &efer); | 1955 | ops->get_msr(ctxt, MSR_EFER, &efer); |
1908 | setup_syscalls_segments(ctxt, &cs, &ss); | 1956 | setup_syscalls_segments(ctxt, &cs, &ss); |
1909 | 1957 | ||
1958 | if (!(efer & EFER_SCE)) | ||
1959 | return emulate_ud(ctxt); | ||
1960 | |||
1910 | ops->get_msr(ctxt, MSR_STAR, &msr_data); | 1961 | ops->get_msr(ctxt, MSR_STAR, &msr_data); |
1911 | msr_data >>= 32; | 1962 | msr_data >>= 32; |
1912 | cs_sel = (u16)(msr_data & 0xfffc); | 1963 | cs_sel = (u16)(msr_data & 0xfffc); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 5fa553babe56..e385214711cb 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/ftrace_event.h> | 29 | #include <linux/ftrace_event.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | 31 | ||
32 | #include <asm/perf_event.h> | ||
32 | #include <asm/tlbflush.h> | 33 | #include <asm/tlbflush.h> |
33 | #include <asm/desc.h> | 34 | #include <asm/desc.h> |
34 | #include <asm/kvm_para.h> | 35 | #include <asm/kvm_para.h> |
@@ -575,6 +576,8 @@ static void svm_hardware_disable(void *garbage) | |||
575 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); | 576 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); |
576 | 577 | ||
577 | cpu_svm_disable(); | 578 | cpu_svm_disable(); |
579 | |||
580 | amd_pmu_disable_virt(); | ||
578 | } | 581 | } |
579 | 582 | ||
580 | static int svm_hardware_enable(void *garbage) | 583 | static int svm_hardware_enable(void *garbage) |
@@ -622,6 +625,8 @@ static int svm_hardware_enable(void *garbage) | |||
622 | 625 | ||
623 | svm_init_erratum_383(); | 626 | svm_init_erratum_383(); |
624 | 627 | ||
628 | amd_pmu_enable_virt(); | ||
629 | |||
625 | return 0; | 630 | return 0; |
626 | } | 631 | } |
627 | 632 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d29216c462b3..3b4c8d8ad906 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1457,7 +1457,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
1457 | #ifdef CONFIG_X86_64 | 1457 | #ifdef CONFIG_X86_64 |
1458 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); | 1458 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
1459 | #endif | 1459 | #endif |
1460 | if (current_thread_info()->status & TS_USEDFPU) | 1460 | if (__thread_has_fpu(current)) |
1461 | clts(); | 1461 | clts(); |
1462 | load_gdt(&__get_cpu_var(host_gdt)); | 1462 | load_gdt(&__get_cpu_var(host_gdt)); |
1463 | } | 1463 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 14d6cadc4ba6..9cbfc0698118 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1495,6 +1495,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu) | |||
1495 | 1495 | ||
1496 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | 1496 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
1497 | { | 1497 | { |
1498 | bool pr = false; | ||
1499 | |||
1498 | switch (msr) { | 1500 | switch (msr) { |
1499 | case MSR_EFER: | 1501 | case MSR_EFER: |
1500 | return set_efer(vcpu, data); | 1502 | return set_efer(vcpu, data); |
@@ -1635,6 +1637,18 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
1635 | pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " | 1637 | pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " |
1636 | "0x%x data 0x%llx\n", msr, data); | 1638 | "0x%x data 0x%llx\n", msr, data); |
1637 | break; | 1639 | break; |
1640 | case MSR_P6_PERFCTR0: | ||
1641 | case MSR_P6_PERFCTR1: | ||
1642 | pr = true; | ||
1643 | case MSR_P6_EVNTSEL0: | ||
1644 | case MSR_P6_EVNTSEL1: | ||
1645 | if (kvm_pmu_msr(vcpu, msr)) | ||
1646 | return kvm_pmu_set_msr(vcpu, msr, data); | ||
1647 | |||
1648 | if (pr || data != 0) | ||
1649 | pr_unimpl(vcpu, "disabled perfctr wrmsr: " | ||
1650 | "0x%x data 0x%llx\n", msr, data); | ||
1651 | break; | ||
1638 | case MSR_K7_CLK_CTL: | 1652 | case MSR_K7_CLK_CTL: |
1639 | /* | 1653 | /* |
1640 | * Ignore all writes to this no longer documented MSR. | 1654 | * Ignore all writes to this no longer documented MSR. |
@@ -1835,6 +1849,14 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |||
1835 | case MSR_FAM10H_MMIO_CONF_BASE: | 1849 | case MSR_FAM10H_MMIO_CONF_BASE: |
1836 | data = 0; | 1850 | data = 0; |
1837 | break; | 1851 | break; |
1852 | case MSR_P6_PERFCTR0: | ||
1853 | case MSR_P6_PERFCTR1: | ||
1854 | case MSR_P6_EVNTSEL0: | ||
1855 | case MSR_P6_EVNTSEL1: | ||
1856 | if (kvm_pmu_msr(vcpu, msr)) | ||
1857 | return kvm_pmu_get_msr(vcpu, msr, pdata); | ||
1858 | data = 0; | ||
1859 | break; | ||
1838 | case MSR_IA32_UCODE_REV: | 1860 | case MSR_IA32_UCODE_REV: |
1839 | data = 0x100000000ULL; | 1861 | data = 0x100000000ULL; |
1840 | break; | 1862 | break; |
@@ -4180,6 +4202,28 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt, | |||
4180 | return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); | 4202 | return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); |
4181 | } | 4203 | } |
4182 | 4204 | ||
4205 | static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, | ||
4206 | u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) | ||
4207 | { | ||
4208 | struct kvm_cpuid_entry2 *cpuid = NULL; | ||
4209 | |||
4210 | if (eax && ecx) | ||
4211 | cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt), | ||
4212 | *eax, *ecx); | ||
4213 | |||
4214 | if (cpuid) { | ||
4215 | *eax = cpuid->eax; | ||
4216 | *ecx = cpuid->ecx; | ||
4217 | if (ebx) | ||
4218 | *ebx = cpuid->ebx; | ||
4219 | if (edx) | ||
4220 | *edx = cpuid->edx; | ||
4221 | return true; | ||
4222 | } | ||
4223 | |||
4224 | return false; | ||
4225 | } | ||
4226 | |||
4183 | static struct x86_emulate_ops emulate_ops = { | 4227 | static struct x86_emulate_ops emulate_ops = { |
4184 | .read_std = kvm_read_guest_virt_system, | 4228 | .read_std = kvm_read_guest_virt_system, |
4185 | .write_std = kvm_write_guest_virt_system, | 4229 | .write_std = kvm_write_guest_virt_system, |
@@ -4211,6 +4255,7 @@ static struct x86_emulate_ops emulate_ops = { | |||
4211 | .get_fpu = emulator_get_fpu, | 4255 | .get_fpu = emulator_get_fpu, |
4212 | .put_fpu = emulator_put_fpu, | 4256 | .put_fpu = emulator_put_fpu, |
4213 | .intercept = emulator_intercept, | 4257 | .intercept = emulator_intercept, |
4258 | .get_cpuid = emulator_get_cpuid, | ||
4214 | }; | 4259 | }; |
4215 | 4260 | ||
4216 | static void cache_all_regs(struct kvm_vcpu *vcpu) | 4261 | static void cache_all_regs(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c index 7718541541d4..9b868124128d 100644 --- a/arch/x86/math-emu/fpu_entry.c +++ b/arch/x86/math-emu/fpu_entry.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/regset.h> | 28 | #include <linux/regset.h> |
29 | 29 | ||
30 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
31 | #include <asm/traps.h> | ||
31 | #include <asm/desc.h> | 32 | #include <asm/desc.h> |
32 | #include <asm/user.h> | 33 | #include <asm/user.h> |
33 | #include <asm/i387.h> | 34 | #include <asm/i387.h> |
@@ -269,7 +270,7 @@ void math_emulate(struct math_emu_info *info) | |||
269 | FPU_EIP = FPU_ORIG_EIP; /* Point to current FPU instruction. */ | 270 | FPU_EIP = FPU_ORIG_EIP; /* Point to current FPU instruction. */ |
270 | 271 | ||
271 | RE_ENTRANT_CHECK_OFF; | 272 | RE_ENTRANT_CHECK_OFF; |
272 | current->thread.trap_no = 16; | 273 | current->thread.trap_nr = X86_TRAP_MF; |
273 | current->thread.error_code = 0; | 274 | current->thread.error_code = 0; |
274 | send_sig(SIGFPE, current, 1); | 275 | send_sig(SIGFPE, current, 1); |
275 | return; | 276 | return; |
@@ -662,7 +663,7 @@ static int valid_prefix(u_char *Byte, u_char __user **fpu_eip, | |||
662 | void math_abort(struct math_emu_info *info, unsigned int signal) | 663 | void math_abort(struct math_emu_info *info, unsigned int signal) |
663 | { | 664 | { |
664 | FPU_EIP = FPU_ORIG_EIP; | 665 | FPU_EIP = FPU_ORIG_EIP; |
665 | current->thread.trap_no = 16; | 666 | current->thread.trap_nr = X86_TRAP_MF; |
666 | current->thread.error_code = 0; | 667 | current->thread.error_code = 0; |
667 | send_sig(signal, current, 1); | 668 | send_sig(signal, current, 1); |
668 | RE_ENTRANT_CHECK_OFF; | 669 | RE_ENTRANT_CHECK_OFF; |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index f0b4caf85c1a..3ecfd1aaf214 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -615,7 +615,7 @@ pgtable_bad(struct pt_regs *regs, unsigned long error_code, | |||
615 | dump_pagetable(address); | 615 | dump_pagetable(address); |
616 | 616 | ||
617 | tsk->thread.cr2 = address; | 617 | tsk->thread.cr2 = address; |
618 | tsk->thread.trap_no = 14; | 618 | tsk->thread.trap_nr = X86_TRAP_PF; |
619 | tsk->thread.error_code = error_code; | 619 | tsk->thread.error_code = error_code; |
620 | 620 | ||
621 | if (__die("Bad pagetable", regs, error_code)) | 621 | if (__die("Bad pagetable", regs, error_code)) |
@@ -636,7 +636,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, | |||
636 | /* Are we prepared to handle this kernel fault? */ | 636 | /* Are we prepared to handle this kernel fault? */ |
637 | if (fixup_exception(regs)) { | 637 | if (fixup_exception(regs)) { |
638 | if (current_thread_info()->sig_on_uaccess_error && signal) { | 638 | if (current_thread_info()->sig_on_uaccess_error && signal) { |
639 | tsk->thread.trap_no = 14; | 639 | tsk->thread.trap_nr = X86_TRAP_PF; |
640 | tsk->thread.error_code = error_code | PF_USER; | 640 | tsk->thread.error_code = error_code | PF_USER; |
641 | tsk->thread.cr2 = address; | 641 | tsk->thread.cr2 = address; |
642 | 642 | ||
@@ -676,7 +676,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, | |||
676 | printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); | 676 | printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); |
677 | 677 | ||
678 | tsk->thread.cr2 = address; | 678 | tsk->thread.cr2 = address; |
679 | tsk->thread.trap_no = 14; | 679 | tsk->thread.trap_nr = X86_TRAP_PF; |
680 | tsk->thread.error_code = error_code; | 680 | tsk->thread.error_code = error_code; |
681 | 681 | ||
682 | sig = SIGKILL; | 682 | sig = SIGKILL; |
@@ -754,7 +754,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |||
754 | /* Kernel addresses are always protection faults: */ | 754 | /* Kernel addresses are always protection faults: */ |
755 | tsk->thread.cr2 = address; | 755 | tsk->thread.cr2 = address; |
756 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | 756 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); |
757 | tsk->thread.trap_no = 14; | 757 | tsk->thread.trap_nr = X86_TRAP_PF; |
758 | 758 | ||
759 | force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); | 759 | force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); |
760 | 760 | ||
@@ -838,7 +838,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, | |||
838 | 838 | ||
839 | tsk->thread.cr2 = address; | 839 | tsk->thread.cr2 = address; |
840 | tsk->thread.error_code = error_code; | 840 | tsk->thread.error_code = error_code; |
841 | tsk->thread.trap_no = 14; | 841 | tsk->thread.trap_nr = X86_TRAP_PF; |
842 | 842 | ||
843 | #ifdef CONFIG_MEMORY_FAILURE | 843 | #ifdef CONFIG_MEMORY_FAILURE |
844 | if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { | 844 | if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { |
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c index bff89dfe3619..d6aa6e8315d1 100644 --- a/arch/x86/oprofile/backtrace.c +++ b/arch/x86/oprofile/backtrace.c | |||
@@ -67,7 +67,7 @@ x86_backtrace_32(struct pt_regs * const regs, unsigned int depth) | |||
67 | { | 67 | { |
68 | struct stack_frame_ia32 *head; | 68 | struct stack_frame_ia32 *head; |
69 | 69 | ||
70 | /* User process is 32-bit */ | 70 | /* User process is IA32 */ |
71 | if (!current || !test_thread_flag(TIF_IA32)) | 71 | if (!current || !test_thread_flag(TIF_IA32)) |
72 | return 0; | 72 | return 0; |
73 | 73 | ||
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 492ade8c978e..d99346ea8fdb 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -374,7 +374,7 @@ int __init pci_xen_init(void) | |||
374 | 374 | ||
375 | int __init pci_xen_hvm_init(void) | 375 | int __init pci_xen_hvm_init(void) |
376 | { | 376 | { |
377 | if (!xen_feature(XENFEAT_hvm_pirqs)) | 377 | if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs)) |
378 | return 0; | 378 | return 0; |
379 | 379 | ||
380 | #ifdef CONFIG_ACPI | 380 | #ifdef CONFIG_ACPI |
diff --git a/arch/x86/syscalls/Makefile b/arch/x86/syscalls/Makefile index 564b2476fede..3236aebc828d 100644 --- a/arch/x86/syscalls/Makefile +++ b/arch/x86/syscalls/Makefile | |||
@@ -10,8 +10,10 @@ syshdr := $(srctree)/$(src)/syscallhdr.sh | |||
10 | systbl := $(srctree)/$(src)/syscalltbl.sh | 10 | systbl := $(srctree)/$(src)/syscalltbl.sh |
11 | 11 | ||
12 | quiet_cmd_syshdr = SYSHDR $@ | 12 | quiet_cmd_syshdr = SYSHDR $@ |
13 | cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' $< $@ \ | 13 | cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ |
14 | $(syshdr_abi_$(basetarget)) $(syshdr_pfx_$(basetarget)) | 14 | '$(syshdr_abi_$(basetarget))' \ |
15 | '$(syshdr_pfx_$(basetarget))' \ | ||
16 | '$(syshdr_offset_$(basetarget))' | ||
15 | quiet_cmd_systbl = SYSTBL $@ | 17 | quiet_cmd_systbl = SYSTBL $@ |
16 | cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@ | 18 | cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@ |
17 | 19 | ||
@@ -24,18 +26,28 @@ syshdr_pfx_unistd_32_ia32 := ia32_ | |||
24 | $(out)/unistd_32_ia32.h: $(syscall32) $(syshdr) | 26 | $(out)/unistd_32_ia32.h: $(syscall32) $(syshdr) |
25 | $(call if_changed,syshdr) | 27 | $(call if_changed,syshdr) |
26 | 28 | ||
27 | syshdr_abi_unistd_64 := 64 | 29 | syshdr_abi_unistd_x32 := common,x32 |
30 | syshdr_offset_unistd_x32 := __X32_SYSCALL_BIT | ||
31 | $(out)/unistd_x32.h: $(syscall64) $(syshdr) | ||
32 | $(call if_changed,syshdr) | ||
33 | |||
34 | syshdr_abi_unistd_64 := common,64 | ||
28 | $(out)/unistd_64.h: $(syscall64) $(syshdr) | 35 | $(out)/unistd_64.h: $(syscall64) $(syshdr) |
29 | $(call if_changed,syshdr) | 36 | $(call if_changed,syshdr) |
30 | 37 | ||
38 | syshdr_abi_unistd_64_x32 := x32 | ||
39 | syshdr_pfx_unistd_64_x32 := x32_ | ||
40 | $(out)/unistd_64_x32.h: $(syscall64) $(syshdr) | ||
41 | $(call if_changed,syshdr) | ||
42 | |||
31 | $(out)/syscalls_32.h: $(syscall32) $(systbl) | 43 | $(out)/syscalls_32.h: $(syscall32) $(systbl) |
32 | $(call if_changed,systbl) | 44 | $(call if_changed,systbl) |
33 | $(out)/syscalls_64.h: $(syscall64) $(systbl) | 45 | $(out)/syscalls_64.h: $(syscall64) $(systbl) |
34 | $(call if_changed,systbl) | 46 | $(call if_changed,systbl) |
35 | 47 | ||
36 | syshdr-y += unistd_32.h unistd_64.h | 48 | syshdr-y += unistd_32.h unistd_64.h unistd_x32.h |
37 | syshdr-y += syscalls_32.h | 49 | syshdr-y += syscalls_32.h |
38 | syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h | 50 | syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h unistd_64_x32.h |
39 | syshdr-$(CONFIG_X86_64) += syscalls_64.h | 51 | syshdr-$(CONFIG_X86_64) += syscalls_64.h |
40 | 52 | ||
41 | targets += $(syshdr-y) | 53 | targets += $(syshdr-y) |
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index ce98e287c066..031cef84fe43 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl | |||
@@ -181,7 +181,7 @@ | |||
181 | 172 i386 prctl sys_prctl | 181 | 172 i386 prctl sys_prctl |
182 | 173 i386 rt_sigreturn ptregs_rt_sigreturn stub32_rt_sigreturn | 182 | 173 i386 rt_sigreturn ptregs_rt_sigreturn stub32_rt_sigreturn |
183 | 174 i386 rt_sigaction sys_rt_sigaction sys32_rt_sigaction | 183 | 174 i386 rt_sigaction sys_rt_sigaction sys32_rt_sigaction |
184 | 175 i386 rt_sigprocmask sys_rt_sigprocmask sys32_rt_sigprocmask | 184 | 175 i386 rt_sigprocmask sys_rt_sigprocmask |
185 | 176 i386 rt_sigpending sys_rt_sigpending sys32_rt_sigpending | 185 | 176 i386 rt_sigpending sys_rt_sigpending sys32_rt_sigpending |
186 | 177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait | 186 | 177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait |
187 | 178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo sys32_rt_sigqueueinfo | 187 | 178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo sys32_rt_sigqueueinfo |
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index b440a8f7eefa..dd29a9ea27c5 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl | |||
@@ -4,317 +4,350 @@ | |||
4 | # The format is: | 4 | # The format is: |
5 | # <number> <abi> <name> <entry point> | 5 | # <number> <abi> <name> <entry point> |
6 | # | 6 | # |
7 | # The abi is always "64" for this file (for now.) | 7 | # The abi is "common", "64" or "x32" for this file. |
8 | # | 8 | # |
9 | 0 64 read sys_read | 9 | 0 common read sys_read |
10 | 1 64 write sys_write | 10 | 1 common write sys_write |
11 | 2 64 open sys_open | 11 | 2 common open sys_open |
12 | 3 64 close sys_close | 12 | 3 common close sys_close |
13 | 4 64 stat sys_newstat | 13 | 4 common stat sys_newstat |
14 | 5 64 fstat sys_newfstat | 14 | 5 common fstat sys_newfstat |
15 | 6 64 lstat sys_newlstat | 15 | 6 common lstat sys_newlstat |
16 | 7 64 poll sys_poll | 16 | 7 common poll sys_poll |
17 | 8 64 lseek sys_lseek | 17 | 8 common lseek sys_lseek |
18 | 9 64 mmap sys_mmap | 18 | 9 common mmap sys_mmap |
19 | 10 64 mprotect sys_mprotect | 19 | 10 common mprotect sys_mprotect |
20 | 11 64 munmap sys_munmap | 20 | 11 common munmap sys_munmap |
21 | 12 64 brk sys_brk | 21 | 12 common brk sys_brk |
22 | 13 64 rt_sigaction sys_rt_sigaction | 22 | 13 64 rt_sigaction sys_rt_sigaction |
23 | 14 64 rt_sigprocmask sys_rt_sigprocmask | 23 | 14 common rt_sigprocmask sys_rt_sigprocmask |
24 | 15 64 rt_sigreturn stub_rt_sigreturn | 24 | 15 64 rt_sigreturn stub_rt_sigreturn |
25 | 16 64 ioctl sys_ioctl | 25 | 16 64 ioctl sys_ioctl |
26 | 17 64 pread64 sys_pread64 | 26 | 17 common pread64 sys_pread64 |
27 | 18 64 pwrite64 sys_pwrite64 | 27 | 18 common pwrite64 sys_pwrite64 |
28 | 19 64 readv sys_readv | 28 | 19 64 readv sys_readv |
29 | 20 64 writev sys_writev | 29 | 20 64 writev sys_writev |
30 | 21 64 access sys_access | 30 | 21 common access sys_access |
31 | 22 64 pipe sys_pipe | 31 | 22 common pipe sys_pipe |
32 | 23 64 select sys_select | 32 | 23 common select sys_select |
33 | 24 64 sched_yield sys_sched_yield | 33 | 24 common sched_yield sys_sched_yield |
34 | 25 64 mremap sys_mremap | 34 | 25 common mremap sys_mremap |
35 | 26 64 msync sys_msync | 35 | 26 common msync sys_msync |
36 | 27 64 mincore sys_mincore | 36 | 27 common mincore sys_mincore |
37 | 28 64 madvise sys_madvise | 37 | 28 common madvise sys_madvise |
38 | 29 64 shmget sys_shmget | 38 | 29 common shmget sys_shmget |
39 | 30 64 shmat sys_shmat | 39 | 30 common shmat sys_shmat |
40 | 31 64 shmctl sys_shmctl | 40 | 31 common shmctl sys_shmctl |
41 | 32 64 dup sys_dup | 41 | 32 common dup sys_dup |
42 | 33 64 dup2 sys_dup2 | 42 | 33 common dup2 sys_dup2 |
43 | 34 64 pause sys_pause | 43 | 34 common pause sys_pause |
44 | 35 64 nanosleep sys_nanosleep | 44 | 35 common nanosleep sys_nanosleep |
45 | 36 64 getitimer sys_getitimer | 45 | 36 common getitimer sys_getitimer |
46 | 37 64 alarm sys_alarm | 46 | 37 common alarm sys_alarm |
47 | 38 64 setitimer sys_setitimer | 47 | 38 common setitimer sys_setitimer |
48 | 39 64 getpid sys_getpid | 48 | 39 common getpid sys_getpid |
49 | 40 64 sendfile sys_sendfile64 | 49 | 40 common sendfile sys_sendfile64 |
50 | 41 64 socket sys_socket | 50 | 41 common socket sys_socket |
51 | 42 64 connect sys_connect | 51 | 42 common connect sys_connect |
52 | 43 64 accept sys_accept | 52 | 43 common accept sys_accept |
53 | 44 64 sendto sys_sendto | 53 | 44 common sendto sys_sendto |
54 | 45 64 recvfrom sys_recvfrom | 54 | 45 64 recvfrom sys_recvfrom |
55 | 46 64 sendmsg sys_sendmsg | 55 | 46 64 sendmsg sys_sendmsg |
56 | 47 64 recvmsg sys_recvmsg | 56 | 47 64 recvmsg sys_recvmsg |
57 | 48 64 shutdown sys_shutdown | 57 | 48 common shutdown sys_shutdown |
58 | 49 64 bind sys_bind | 58 | 49 common bind sys_bind |
59 | 50 64 listen sys_listen | 59 | 50 common listen sys_listen |
60 | 51 64 getsockname sys_getsockname | 60 | 51 common getsockname sys_getsockname |
61 | 52 64 getpeername sys_getpeername | 61 | 52 common getpeername sys_getpeername |
62 | 53 64 socketpair sys_socketpair | 62 | 53 common socketpair sys_socketpair |
63 | 54 64 setsockopt sys_setsockopt | 63 | 54 common setsockopt sys_setsockopt |
64 | 55 64 getsockopt sys_getsockopt | 64 | 55 common getsockopt sys_getsockopt |
65 | 56 64 clone stub_clone | 65 | 56 common clone stub_clone |
66 | 57 64 fork stub_fork | 66 | 57 common fork stub_fork |
67 | 58 64 vfork stub_vfork | 67 | 58 common vfork stub_vfork |
68 | 59 64 execve stub_execve | 68 | 59 64 execve stub_execve |
69 | 60 64 exit sys_exit | 69 | 60 common exit sys_exit |
70 | 61 64 wait4 sys_wait4 | 70 | 61 common wait4 sys_wait4 |
71 | 62 64 kill sys_kill | 71 | 62 common kill sys_kill |
72 | 63 64 uname sys_newuname | 72 | 63 common uname sys_newuname |
73 | 64 64 semget sys_semget | 73 | 64 common semget sys_semget |
74 | 65 64 semop sys_semop | 74 | 65 common semop sys_semop |
75 | 66 64 semctl sys_semctl | 75 | 66 common semctl sys_semctl |
76 | 67 64 shmdt sys_shmdt | 76 | 67 common shmdt sys_shmdt |
77 | 68 64 msgget sys_msgget | 77 | 68 common msgget sys_msgget |
78 | 69 64 msgsnd sys_msgsnd | 78 | 69 common msgsnd sys_msgsnd |
79 | 70 64 msgrcv sys_msgrcv | 79 | 70 common msgrcv sys_msgrcv |
80 | 71 64 msgctl sys_msgctl | 80 | 71 common msgctl sys_msgctl |
81 | 72 64 fcntl sys_fcntl | 81 | 72 common fcntl sys_fcntl |
82 | 73 64 flock sys_flock | 82 | 73 common flock sys_flock |
83 | 74 64 fsync sys_fsync | 83 | 74 common fsync sys_fsync |
84 | 75 64 fdatasync sys_fdatasync | 84 | 75 common fdatasync sys_fdatasync |
85 | 76 64 truncate sys_truncate | 85 | 76 common truncate sys_truncate |
86 | 77 64 ftruncate sys_ftruncate | 86 | 77 common ftruncate sys_ftruncate |
87 | 78 64 getdents sys_getdents | 87 | 78 common getdents sys_getdents |
88 | 79 64 getcwd sys_getcwd | 88 | 79 common getcwd sys_getcwd |
89 | 80 64 chdir sys_chdir | 89 | 80 common chdir sys_chdir |
90 | 81 64 fchdir sys_fchdir | 90 | 81 common fchdir sys_fchdir |
91 | 82 64 rename sys_rename | 91 | 82 common rename sys_rename |
92 | 83 64 mkdir sys_mkdir | 92 | 83 common mkdir sys_mkdir |
93 | 84 64 rmdir sys_rmdir | 93 | 84 common rmdir sys_rmdir |
94 | 85 64 creat sys_creat | 94 | 85 common creat sys_creat |
95 | 86 64 link sys_link | 95 | 86 common link sys_link |
96 | 87 64 unlink sys_unlink | 96 | 87 common unlink sys_unlink |
97 | 88 64 symlink sys_symlink | 97 | 88 common symlink sys_symlink |
98 | 89 64 readlink sys_readlink | 98 | 89 common readlink sys_readlink |
99 | 90 64 chmod sys_chmod | 99 | 90 common chmod sys_chmod |
100 | 91 64 fchmod sys_fchmod | 100 | 91 common fchmod sys_fchmod |
101 | 92 64 chown sys_chown | 101 | 92 common chown sys_chown |
102 | 93 64 fchown sys_fchown | 102 | 93 common fchown sys_fchown |
103 | 94 64 lchown sys_lchown | 103 | 94 common lchown sys_lchown |
104 | 95 64 umask sys_umask | 104 | 95 common umask sys_umask |
105 | 96 64 gettimeofday sys_gettimeofday | 105 | 96 common gettimeofday sys_gettimeofday |
106 | 97 64 getrlimit sys_getrlimit | 106 | 97 common getrlimit sys_getrlimit |
107 | 98 64 getrusage sys_getrusage | 107 | 98 common getrusage sys_getrusage |
108 | 99 64 sysinfo sys_sysinfo | 108 | 99 common sysinfo sys_sysinfo |
109 | 100 64 times sys_times | 109 | 100 common times sys_times |
110 | 101 64 ptrace sys_ptrace | 110 | 101 64 ptrace sys_ptrace |
111 | 102 64 getuid sys_getuid | 111 | 102 common getuid sys_getuid |
112 | 103 64 syslog sys_syslog | 112 | 103 common syslog sys_syslog |
113 | 104 64 getgid sys_getgid | 113 | 104 common getgid sys_getgid |
114 | 105 64 setuid sys_setuid | 114 | 105 common setuid sys_setuid |
115 | 106 64 setgid sys_setgid | 115 | 106 common setgid sys_setgid |
116 | 107 64 geteuid sys_geteuid | 116 | 107 common geteuid sys_geteuid |
117 | 108 64 getegid sys_getegid | 117 | 108 common getegid sys_getegid |
118 | 109 64 setpgid sys_setpgid | 118 | 109 common setpgid sys_setpgid |
119 | 110 64 getppid sys_getppid | 119 | 110 common getppid sys_getppid |
120 | 111 64 getpgrp sys_getpgrp | 120 | 111 common getpgrp sys_getpgrp |
121 | 112 64 setsid sys_setsid | 121 | 112 common setsid sys_setsid |
122 | 113 64 setreuid sys_setreuid | 122 | 113 common setreuid sys_setreuid |
123 | 114 64 setregid sys_setregid | 123 | 114 common setregid sys_setregid |
124 | 115 64 getgroups sys_getgroups | 124 | 115 common getgroups sys_getgroups |
125 | 116 64 setgroups sys_setgroups | 125 | 116 common setgroups sys_setgroups |
126 | 117 64 setresuid sys_setresuid | 126 | 117 common setresuid sys_setresuid |
127 | 118 64 getresuid sys_getresuid | 127 | 118 common getresuid sys_getresuid |
128 | 119 64 setresgid sys_setresgid | 128 | 119 common setresgid sys_setresgid |
129 | 120 64 getresgid sys_getresgid | 129 | 120 common getresgid sys_getresgid |
130 | 121 64 getpgid sys_getpgid | 130 | 121 common getpgid sys_getpgid |
131 | 122 64 setfsuid sys_setfsuid | 131 | 122 common setfsuid sys_setfsuid |
132 | 123 64 setfsgid sys_setfsgid | 132 | 123 common setfsgid sys_setfsgid |
133 | 124 64 getsid sys_getsid | 133 | 124 common getsid sys_getsid |
134 | 125 64 capget sys_capget | 134 | 125 common capget sys_capget |
135 | 126 64 capset sys_capset | 135 | 126 common capset sys_capset |
136 | 127 64 rt_sigpending sys_rt_sigpending | 136 | 127 64 rt_sigpending sys_rt_sigpending |
137 | 128 64 rt_sigtimedwait sys_rt_sigtimedwait | 137 | 128 64 rt_sigtimedwait sys_rt_sigtimedwait |
138 | 129 64 rt_sigqueueinfo sys_rt_sigqueueinfo | 138 | 129 64 rt_sigqueueinfo sys_rt_sigqueueinfo |
139 | 130 64 rt_sigsuspend sys_rt_sigsuspend | 139 | 130 common rt_sigsuspend sys_rt_sigsuspend |
140 | 131 64 sigaltstack stub_sigaltstack | 140 | 131 64 sigaltstack stub_sigaltstack |
141 | 132 64 utime sys_utime | 141 | 132 common utime sys_utime |
142 | 133 64 mknod sys_mknod | 142 | 133 common mknod sys_mknod |
143 | 134 64 uselib | 143 | 134 64 uselib |
144 | 135 64 personality sys_personality | 144 | 135 common personality sys_personality |
145 | 136 64 ustat sys_ustat | 145 | 136 common ustat sys_ustat |
146 | 137 64 statfs sys_statfs | 146 | 137 common statfs sys_statfs |
147 | 138 64 fstatfs sys_fstatfs | 147 | 138 common fstatfs sys_fstatfs |
148 | 139 64 sysfs sys_sysfs | 148 | 139 common sysfs sys_sysfs |
149 | 140 64 getpriority sys_getpriority | 149 | 140 common getpriority sys_getpriority |
150 | 141 64 setpriority sys_setpriority | 150 | 141 common setpriority sys_setpriority |
151 | 142 64 sched_setparam sys_sched_setparam | 151 | 142 common sched_setparam sys_sched_setparam |
152 | 143 64 sched_getparam sys_sched_getparam | 152 | 143 common sched_getparam sys_sched_getparam |
153 | 144 64 sched_setscheduler sys_sched_setscheduler | 153 | 144 common sched_setscheduler sys_sched_setscheduler |
154 | 145 64 sched_getscheduler sys_sched_getscheduler | 154 | 145 common sched_getscheduler sys_sched_getscheduler |
155 | 146 64 sched_get_priority_max sys_sched_get_priority_max | 155 | 146 common sched_get_priority_max sys_sched_get_priority_max |
156 | 147 64 sched_get_priority_min sys_sched_get_priority_min | 156 | 147 common sched_get_priority_min sys_sched_get_priority_min |
157 | 148 64 sched_rr_get_interval sys_sched_rr_get_interval | 157 | 148 common sched_rr_get_interval sys_sched_rr_get_interval |
158 | 149 64 mlock sys_mlock | 158 | 149 common mlock sys_mlock |
159 | 150 64 munlock sys_munlock | 159 | 150 common munlock sys_munlock |
160 | 151 64 mlockall sys_mlockall | 160 | 151 common mlockall sys_mlockall |
161 | 152 64 munlockall sys_munlockall | 161 | 152 common munlockall sys_munlockall |
162 | 153 64 vhangup sys_vhangup | 162 | 153 common vhangup sys_vhangup |
163 | 154 64 modify_ldt sys_modify_ldt | 163 | 154 common modify_ldt sys_modify_ldt |
164 | 155 64 pivot_root sys_pivot_root | 164 | 155 common pivot_root sys_pivot_root |
165 | 156 64 _sysctl sys_sysctl | 165 | 156 64 _sysctl sys_sysctl |
166 | 157 64 prctl sys_prctl | 166 | 157 common prctl sys_prctl |
167 | 158 64 arch_prctl sys_arch_prctl | 167 | 158 common arch_prctl sys_arch_prctl |
168 | 159 64 adjtimex sys_adjtimex | 168 | 159 common adjtimex sys_adjtimex |
169 | 160 64 setrlimit sys_setrlimit | 169 | 160 common setrlimit sys_setrlimit |
170 | 161 64 chroot sys_chroot | 170 | 161 common chroot sys_chroot |
171 | 162 64 sync sys_sync | 171 | 162 common sync sys_sync |
172 | 163 64 acct sys_acct | 172 | 163 common acct sys_acct |
173 | 164 64 settimeofday sys_settimeofday | 173 | 164 common settimeofday sys_settimeofday |
174 | 165 64 mount sys_mount | 174 | 165 common mount sys_mount |
175 | 166 64 umount2 sys_umount | 175 | 166 common umount2 sys_umount |
176 | 167 64 swapon sys_swapon | 176 | 167 common swapon sys_swapon |
177 | 168 64 swapoff sys_swapoff | 177 | 168 common swapoff sys_swapoff |
178 | 169 64 reboot sys_reboot | 178 | 169 common reboot sys_reboot |
179 | 170 64 sethostname sys_sethostname | 179 | 170 common sethostname sys_sethostname |
180 | 171 64 setdomainname sys_setdomainname | 180 | 171 common setdomainname sys_setdomainname |
181 | 172 64 iopl stub_iopl | 181 | 172 common iopl stub_iopl |
182 | 173 64 ioperm sys_ioperm | 182 | 173 common ioperm sys_ioperm |
183 | 174 64 create_module | 183 | 174 64 create_module |
184 | 175 64 init_module sys_init_module | 184 | 175 common init_module sys_init_module |
185 | 176 64 delete_module sys_delete_module | 185 | 176 common delete_module sys_delete_module |
186 | 177 64 get_kernel_syms | 186 | 177 64 get_kernel_syms |
187 | 178 64 query_module | 187 | 178 64 query_module |
188 | 179 64 quotactl sys_quotactl | 188 | 179 common quotactl sys_quotactl |
189 | 180 64 nfsservctl | 189 | 180 64 nfsservctl |
190 | 181 64 getpmsg | 190 | 181 common getpmsg |
191 | 182 64 putpmsg | 191 | 182 common putpmsg |
192 | 183 64 afs_syscall | 192 | 183 common afs_syscall |
193 | 184 64 tuxcall | 193 | 184 common tuxcall |
194 | 185 64 security | 194 | 185 common security |
195 | 186 64 gettid sys_gettid | 195 | 186 common gettid sys_gettid |
196 | 187 64 readahead sys_readahead | 196 | 187 common readahead sys_readahead |
197 | 188 64 setxattr sys_setxattr | 197 | 188 common setxattr sys_setxattr |
198 | 189 64 lsetxattr sys_lsetxattr | 198 | 189 common lsetxattr sys_lsetxattr |
199 | 190 64 fsetxattr sys_fsetxattr | 199 | 190 common fsetxattr sys_fsetxattr |
200 | 191 64 getxattr sys_getxattr | 200 | 191 common getxattr sys_getxattr |
201 | 192 64 lgetxattr sys_lgetxattr | 201 | 192 common lgetxattr sys_lgetxattr |
202 | 193 64 fgetxattr sys_fgetxattr | 202 | 193 common fgetxattr sys_fgetxattr |
203 | 194 64 listxattr sys_listxattr | 203 | 194 common listxattr sys_listxattr |
204 | 195 64 llistxattr sys_llistxattr | 204 | 195 common llistxattr sys_llistxattr |
205 | 196 64 flistxattr sys_flistxattr | 205 | 196 common flistxattr sys_flistxattr |
206 | 197 64 removexattr sys_removexattr | 206 | 197 common removexattr sys_removexattr |
207 | 198 64 lremovexattr sys_lremovexattr | 207 | 198 common lremovexattr sys_lremovexattr |
208 | 199 64 fremovexattr sys_fremovexattr | 208 | 199 common fremovexattr sys_fremovexattr |
209 | 200 64 tkill sys_tkill | 209 | 200 common tkill sys_tkill |
210 | 201 64 time sys_time | 210 | 201 common time sys_time |
211 | 202 64 futex sys_futex | 211 | 202 common futex sys_futex |
212 | 203 64 sched_setaffinity sys_sched_setaffinity | 212 | 203 common sched_setaffinity sys_sched_setaffinity |
213 | 204 64 sched_getaffinity sys_sched_getaffinity | 213 | 204 common sched_getaffinity sys_sched_getaffinity |
214 | 205 64 set_thread_area | 214 | 205 64 set_thread_area |
215 | 206 64 io_setup sys_io_setup | 215 | 206 common io_setup sys_io_setup |
216 | 207 64 io_destroy sys_io_destroy | 216 | 207 common io_destroy sys_io_destroy |
217 | 208 64 io_getevents sys_io_getevents | 217 | 208 common io_getevents sys_io_getevents |
218 | 209 64 io_submit sys_io_submit | 218 | 209 common io_submit sys_io_submit |
219 | 210 64 io_cancel sys_io_cancel | 219 | 210 common io_cancel sys_io_cancel |
220 | 211 64 get_thread_area | 220 | 211 64 get_thread_area |
221 | 212 64 lookup_dcookie sys_lookup_dcookie | 221 | 212 common lookup_dcookie sys_lookup_dcookie |
222 | 213 64 epoll_create sys_epoll_create | 222 | 213 common epoll_create sys_epoll_create |
223 | 214 64 epoll_ctl_old | 223 | 214 64 epoll_ctl_old |
224 | 215 64 epoll_wait_old | 224 | 215 64 epoll_wait_old |
225 | 216 64 remap_file_pages sys_remap_file_pages | 225 | 216 common remap_file_pages sys_remap_file_pages |
226 | 217 64 getdents64 sys_getdents64 | 226 | 217 common getdents64 sys_getdents64 |
227 | 218 64 set_tid_address sys_set_tid_address | 227 | 218 common set_tid_address sys_set_tid_address |
228 | 219 64 restart_syscall sys_restart_syscall | 228 | 219 common restart_syscall sys_restart_syscall |
229 | 220 64 semtimedop sys_semtimedop | 229 | 220 common semtimedop sys_semtimedop |
230 | 221 64 fadvise64 sys_fadvise64 | 230 | 221 common fadvise64 sys_fadvise64 |
231 | 222 64 timer_create sys_timer_create | 231 | 222 64 timer_create sys_timer_create |
232 | 223 64 timer_settime sys_timer_settime | 232 | 223 common timer_settime sys_timer_settime |
233 | 224 64 timer_gettime sys_timer_gettime | 233 | 224 common timer_gettime sys_timer_gettime |
234 | 225 64 timer_getoverrun sys_timer_getoverrun | 234 | 225 common timer_getoverrun sys_timer_getoverrun |
235 | 226 64 timer_delete sys_timer_delete | 235 | 226 common timer_delete sys_timer_delete |
236 | 227 64 clock_settime sys_clock_settime | 236 | 227 common clock_settime sys_clock_settime |
237 | 228 64 clock_gettime sys_clock_gettime | 237 | 228 common clock_gettime sys_clock_gettime |
238 | 229 64 clock_getres sys_clock_getres | 238 | 229 common clock_getres sys_clock_getres |
239 | 230 64 clock_nanosleep sys_clock_nanosleep | 239 | 230 common clock_nanosleep sys_clock_nanosleep |
240 | 231 64 exit_group sys_exit_group | 240 | 231 common exit_group sys_exit_group |
241 | 232 64 epoll_wait sys_epoll_wait | 241 | 232 common epoll_wait sys_epoll_wait |
242 | 233 64 epoll_ctl sys_epoll_ctl | 242 | 233 common epoll_ctl sys_epoll_ctl |
243 | 234 64 tgkill sys_tgkill | 243 | 234 common tgkill sys_tgkill |
244 | 235 64 utimes sys_utimes | 244 | 235 common utimes sys_utimes |
245 | 236 64 vserver | 245 | 236 64 vserver |
246 | 237 64 mbind sys_mbind | 246 | 237 common mbind sys_mbind |
247 | 238 64 set_mempolicy sys_set_mempolicy | 247 | 238 common set_mempolicy sys_set_mempolicy |
248 | 239 64 get_mempolicy sys_get_mempolicy | 248 | 239 common get_mempolicy sys_get_mempolicy |
249 | 240 64 mq_open sys_mq_open | 249 | 240 common mq_open sys_mq_open |
250 | 241 64 mq_unlink sys_mq_unlink | 250 | 241 common mq_unlink sys_mq_unlink |
251 | 242 64 mq_timedsend sys_mq_timedsend | 251 | 242 common mq_timedsend sys_mq_timedsend |
252 | 243 64 mq_timedreceive sys_mq_timedreceive | 252 | 243 common mq_timedreceive sys_mq_timedreceive |
253 | 244 64 mq_notify sys_mq_notify | 253 | 244 64 mq_notify sys_mq_notify |
254 | 245 64 mq_getsetattr sys_mq_getsetattr | 254 | 245 common mq_getsetattr sys_mq_getsetattr |
255 | 246 64 kexec_load sys_kexec_load | 255 | 246 64 kexec_load sys_kexec_load |
256 | 247 64 waitid sys_waitid | 256 | 247 64 waitid sys_waitid |
257 | 248 64 add_key sys_add_key | 257 | 248 common add_key sys_add_key |
258 | 249 64 request_key sys_request_key | 258 | 249 common request_key sys_request_key |
259 | 250 64 keyctl sys_keyctl | 259 | 250 common keyctl sys_keyctl |
260 | 251 64 ioprio_set sys_ioprio_set | 260 | 251 common ioprio_set sys_ioprio_set |
261 | 252 64 ioprio_get sys_ioprio_get | 261 | 252 common ioprio_get sys_ioprio_get |
262 | 253 64 inotify_init sys_inotify_init | 262 | 253 common inotify_init sys_inotify_init |
263 | 254 64 inotify_add_watch sys_inotify_add_watch | 263 | 254 common inotify_add_watch sys_inotify_add_watch |
264 | 255 64 inotify_rm_watch sys_inotify_rm_watch | 264 | 255 common inotify_rm_watch sys_inotify_rm_watch |
265 | 256 64 migrate_pages sys_migrate_pages | 265 | 256 common migrate_pages sys_migrate_pages |
266 | 257 64 openat sys_openat | 266 | 257 common openat sys_openat |
267 | 258 64 mkdirat sys_mkdirat | 267 | 258 common mkdirat sys_mkdirat |
268 | 259 64 mknodat sys_mknodat | 268 | 259 common mknodat sys_mknodat |
269 | 260 64 fchownat sys_fchownat | 269 | 260 common fchownat sys_fchownat |
270 | 261 64 futimesat sys_futimesat | 270 | 261 common futimesat sys_futimesat |
271 | 262 64 newfstatat sys_newfstatat | 271 | 262 common newfstatat sys_newfstatat |
272 | 263 64 unlinkat sys_unlinkat | 272 | 263 common unlinkat sys_unlinkat |
273 | 264 64 renameat sys_renameat | 273 | 264 common renameat sys_renameat |
274 | 265 64 linkat sys_linkat | 274 | 265 common linkat sys_linkat |
275 | 266 64 symlinkat sys_symlinkat | 275 | 266 common symlinkat sys_symlinkat |
276 | 267 64 readlinkat sys_readlinkat | 276 | 267 common readlinkat sys_readlinkat |
277 | 268 64 fchmodat sys_fchmodat | 277 | 268 common fchmodat sys_fchmodat |
278 | 269 64 faccessat sys_faccessat | 278 | 269 common faccessat sys_faccessat |
279 | 270 64 pselect6 sys_pselect6 | 279 | 270 common pselect6 sys_pselect6 |
280 | 271 64 ppoll sys_ppoll | 280 | 271 common ppoll sys_ppoll |
281 | 272 64 unshare sys_unshare | 281 | 272 common unshare sys_unshare |
282 | 273 64 set_robust_list sys_set_robust_list | 282 | 273 64 set_robust_list sys_set_robust_list |
283 | 274 64 get_robust_list sys_get_robust_list | 283 | 274 64 get_robust_list sys_get_robust_list |
284 | 275 64 splice sys_splice | 284 | 275 common splice sys_splice |
285 | 276 64 tee sys_tee | 285 | 276 common tee sys_tee |
286 | 277 64 sync_file_range sys_sync_file_range | 286 | 277 common sync_file_range sys_sync_file_range |
287 | 278 64 vmsplice sys_vmsplice | 287 | 278 64 vmsplice sys_vmsplice |
288 | 279 64 move_pages sys_move_pages | 288 | 279 64 move_pages sys_move_pages |
289 | 280 64 utimensat sys_utimensat | 289 | 280 common utimensat sys_utimensat |
290 | 281 64 epoll_pwait sys_epoll_pwait | 290 | 281 common epoll_pwait sys_epoll_pwait |
291 | 282 64 signalfd sys_signalfd | 291 | 282 common signalfd sys_signalfd |
292 | 283 64 timerfd_create sys_timerfd_create | 292 | 283 common timerfd_create sys_timerfd_create |
293 | 284 64 eventfd sys_eventfd | 293 | 284 common eventfd sys_eventfd |
294 | 285 64 fallocate sys_fallocate | 294 | 285 common fallocate sys_fallocate |
295 | 286 64 timerfd_settime sys_timerfd_settime | 295 | 286 common timerfd_settime sys_timerfd_settime |
296 | 287 64 timerfd_gettime sys_timerfd_gettime | 296 | 287 common timerfd_gettime sys_timerfd_gettime |
297 | 288 64 accept4 sys_accept4 | 297 | 288 common accept4 sys_accept4 |
298 | 289 64 signalfd4 sys_signalfd4 | 298 | 289 common signalfd4 sys_signalfd4 |
299 | 290 64 eventfd2 sys_eventfd2 | 299 | 290 common eventfd2 sys_eventfd2 |
300 | 291 64 epoll_create1 sys_epoll_create1 | 300 | 291 common epoll_create1 sys_epoll_create1 |
301 | 292 64 dup3 sys_dup3 | 301 | 292 common dup3 sys_dup3 |
302 | 293 64 pipe2 sys_pipe2 | 302 | 293 common pipe2 sys_pipe2 |
303 | 294 64 inotify_init1 sys_inotify_init1 | 303 | 294 common inotify_init1 sys_inotify_init1 |
304 | 295 64 preadv sys_preadv | 304 | 295 64 preadv sys_preadv |
305 | 296 64 pwritev sys_pwritev | 305 | 296 64 pwritev sys_pwritev |
306 | 297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo | 306 | 297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo |
307 | 298 64 perf_event_open sys_perf_event_open | 307 | 298 common perf_event_open sys_perf_event_open |
308 | 299 64 recvmmsg sys_recvmmsg | 308 | 299 64 recvmmsg sys_recvmmsg |
309 | 300 64 fanotify_init sys_fanotify_init | 309 | 300 common fanotify_init sys_fanotify_init |
310 | 301 64 fanotify_mark sys_fanotify_mark | 310 | 301 common fanotify_mark sys_fanotify_mark |
311 | 302 64 prlimit64 sys_prlimit64 | 311 | 302 common prlimit64 sys_prlimit64 |
312 | 303 64 name_to_handle_at sys_name_to_handle_at | 312 | 303 common name_to_handle_at sys_name_to_handle_at |
313 | 304 64 open_by_handle_at sys_open_by_handle_at | 313 | 304 common open_by_handle_at sys_open_by_handle_at |
314 | 305 64 clock_adjtime sys_clock_adjtime | 314 | 305 common clock_adjtime sys_clock_adjtime |
315 | 306 64 syncfs sys_syncfs | 315 | 306 common syncfs sys_syncfs |
316 | 307 64 sendmmsg sys_sendmmsg | 316 | 307 64 sendmmsg sys_sendmmsg |
317 | 308 64 setns sys_setns | 317 | 308 common setns sys_setns |
318 | 309 64 getcpu sys_getcpu | 318 | 309 common getcpu sys_getcpu |
319 | 310 64 process_vm_readv sys_process_vm_readv | 319 | 310 64 process_vm_readv sys_process_vm_readv |
320 | 311 64 process_vm_writev sys_process_vm_writev | 320 | 311 64 process_vm_writev sys_process_vm_writev |
321 | # | ||
322 | # x32-specific system call numbers start at 512 to avoid cache impact | ||
323 | # for native 64-bit operation. | ||
324 | # | ||
325 | 512 x32 rt_sigaction sys32_rt_sigaction | ||
326 | 513 x32 rt_sigreturn stub_x32_rt_sigreturn | ||
327 | 514 x32 ioctl compat_sys_ioctl | ||
328 | 515 x32 readv compat_sys_readv | ||
329 | 516 x32 writev compat_sys_writev | ||
330 | 517 x32 recvfrom compat_sys_recvfrom | ||
331 | 518 x32 sendmsg compat_sys_sendmsg | ||
332 | 519 x32 recvmsg compat_sys_recvmsg | ||
333 | 520 x32 execve stub_x32_execve | ||
334 | 521 x32 ptrace compat_sys_ptrace | ||
335 | 522 x32 rt_sigpending sys32_rt_sigpending | ||
336 | 523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait | ||
337 | 524 x32 rt_sigqueueinfo sys32_rt_sigqueueinfo | ||
338 | 525 x32 sigaltstack stub_x32_sigaltstack | ||
339 | 526 x32 timer_create compat_sys_timer_create | ||
340 | 527 x32 mq_notify compat_sys_mq_notify | ||
341 | 528 x32 kexec_load compat_sys_kexec_load | ||
342 | 529 x32 waitid compat_sys_waitid | ||
343 | 530 x32 set_robust_list compat_sys_set_robust_list | ||
344 | 531 x32 get_robust_list compat_sys_get_robust_list | ||
345 | 532 x32 vmsplice compat_sys_vmsplice | ||
346 | 533 x32 move_pages compat_sys_move_pages | ||
347 | 534 x32 preadv compat_sys_preadv64 | ||
348 | 535 x32 pwritev compat_sys_pwritev64 | ||
349 | 536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo | ||
350 | 537 x32 recvmmsg compat_sys_recvmmsg | ||
351 | 538 x32 sendmmsg compat_sys_sendmmsg | ||
352 | 539 x32 process_vm_readv compat_sys_process_vm_readv | ||
353 | 540 x32 process_vm_writev compat_sys_process_vm_writev | ||
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c index fe626c3ba01b..9924776f4265 100644 --- a/arch/x86/um/sys_call_table_64.c +++ b/arch/x86/um/sys_call_table_64.c | |||
@@ -35,6 +35,9 @@ | |||
35 | #define stub_sigaltstack sys_sigaltstack | 35 | #define stub_sigaltstack sys_sigaltstack |
36 | #define stub_rt_sigreturn sys_rt_sigreturn | 36 | #define stub_rt_sigreturn sys_rt_sigreturn |
37 | 37 | ||
38 | #define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat) | ||
39 | #define __SYSCALL_X32(nr, sym, compat) /* Not supported */ | ||
40 | |||
38 | #define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ; | 41 | #define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ; |
39 | #include <asm/syscalls_64.h> | 42 | #include <asm/syscalls_64.h> |
40 | 43 | ||
diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c index 5edf4f4bbf53..ce7e3607a870 100644 --- a/arch/x86/um/user-offsets.c +++ b/arch/x86/um/user-offsets.c | |||
@@ -15,6 +15,8 @@ static char syscalls[] = { | |||
15 | }; | 15 | }; |
16 | #else | 16 | #else |
17 | #define __SYSCALL_64(nr, sym, compat) [nr] = 1, | 17 | #define __SYSCALL_64(nr, sym, compat) [nr] = 1, |
18 | #define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1, | ||
19 | #define __SYSCALL_X32(nr, sym, compat) /* Not supported */ | ||
18 | static char syscalls[] = { | 20 | static char syscalls[] = { |
19 | #include <asm/syscalls_64.h> | 21 | #include <asm/syscalls_64.h> |
20 | }; | 22 | }; |
diff --git a/arch/x86/vdso/.gitignore b/arch/x86/vdso/.gitignore index 60274d5746e1..3282874bc61d 100644 --- a/arch/x86/vdso/.gitignore +++ b/arch/x86/vdso/.gitignore | |||
@@ -1,5 +1,7 @@ | |||
1 | vdso.lds | 1 | vdso.lds |
2 | vdso-syms.lds | 2 | vdso-syms.lds |
3 | vdsox32.lds | ||
4 | vdsox32-syms.lds | ||
3 | vdso32-syms.lds | 5 | vdso32-syms.lds |
4 | vdso32-syscall-syms.lds | 6 | vdso32-syscall-syms.lds |
5 | vdso32-sysenter-syms.lds | 7 | vdso32-sysenter-syms.lds |
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index 5d179502a52c..fd14be1d1472 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile | |||
@@ -3,21 +3,29 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | VDSO64-$(CONFIG_X86_64) := y | 5 | VDSO64-$(CONFIG_X86_64) := y |
6 | VDSOX32-$(CONFIG_X86_X32_ABI) := y | ||
6 | VDSO32-$(CONFIG_X86_32) := y | 7 | VDSO32-$(CONFIG_X86_32) := y |
7 | VDSO32-$(CONFIG_COMPAT) := y | 8 | VDSO32-$(CONFIG_COMPAT) := y |
8 | 9 | ||
9 | vdso-install-$(VDSO64-y) += vdso.so | 10 | vdso-install-$(VDSO64-y) += vdso.so |
11 | vdso-install-$(VDSOX32-y) += vdsox32.so | ||
10 | vdso-install-$(VDSO32-y) += $(vdso32-images) | 12 | vdso-install-$(VDSO32-y) += $(vdso32-images) |
11 | 13 | ||
12 | 14 | ||
13 | # files to link into the vdso | 15 | # files to link into the vdso |
14 | vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o | 16 | vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o |
15 | 17 | ||
18 | vobjs-$(VDSOX32-y) += $(vobjx32s-compat) | ||
19 | |||
20 | # Filter out x32 objects. | ||
21 | vobj64s := $(filter-out $(vobjx32s-compat),$(vobjs-y)) | ||
22 | |||
16 | # files to link into kernel | 23 | # files to link into kernel |
17 | obj-$(VDSO64-y) += vma.o vdso.o | 24 | obj-$(VDSO64-y) += vma.o vdso.o |
25 | obj-$(VDSOX32-y) += vdsox32.o | ||
18 | obj-$(VDSO32-y) += vdso32.o vdso32-setup.o | 26 | obj-$(VDSO32-y) += vdso32.o vdso32-setup.o |
19 | 27 | ||
20 | vobjs := $(foreach F,$(vobjs-y),$(obj)/$F) | 28 | vobjs := $(foreach F,$(vobj64s),$(obj)/$F) |
21 | 29 | ||
22 | $(obj)/vdso.o: $(obj)/vdso.so | 30 | $(obj)/vdso.o: $(obj)/vdso.so |
23 | 31 | ||
@@ -73,6 +81,42 @@ $(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE | |||
73 | $(call if_changed,vdsosym) | 81 | $(call if_changed,vdsosym) |
74 | 82 | ||
75 | # | 83 | # |
84 | # X32 processes use x32 vDSO to access 64bit kernel data. | ||
85 | # | ||
86 | # Build x32 vDSO image: | ||
87 | # 1. Compile x32 vDSO as 64bit. | ||
88 | # 2. Convert object files to x32. | ||
89 | # 3. Build x32 VDSO image with x32 objects, which contains 64bit codes | ||
90 | # so that it can reach 64bit address space with 64bit pointers. | ||
91 | # | ||
92 | |||
93 | targets += vdsox32-syms.lds | ||
94 | obj-$(VDSOX32-y) += vdsox32-syms.lds | ||
95 | |||
96 | CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds) | ||
97 | VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \ | ||
98 | -Wl,-soname=linux-vdso.so.1 \ | ||
99 | -Wl,-z,max-page-size=4096 \ | ||
100 | -Wl,-z,common-page-size=4096 | ||
101 | |||
102 | vobjx32s-y := $(vobj64s:.o=-x32.o) | ||
103 | vobjx32s := $(foreach F,$(vobjx32s-y),$(obj)/$F) | ||
104 | |||
105 | # Convert 64bit object file to x32 for x32 vDSO. | ||
106 | quiet_cmd_x32 = X32 $@ | ||
107 | cmd_x32 = $(OBJCOPY) -O elf32-x86-64 $< $@ | ||
108 | |||
109 | $(obj)/%-x32.o: $(obj)/%.o FORCE | ||
110 | $(call if_changed,x32) | ||
111 | |||
112 | targets += vdsox32.so vdsox32.so.dbg vdsox32.lds $(vobjx32s-y) | ||
113 | |||
114 | $(obj)/vdsox32.o: $(src)/vdsox32.S $(obj)/vdsox32.so | ||
115 | |||
116 | $(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE | ||
117 | $(call if_changed,vdso) | ||
118 | |||
119 | # | ||
76 | # Build multiple 32-bit vDSO images to choose from at boot time. | 120 | # Build multiple 32-bit vDSO images to choose from at boot time. |
77 | # | 121 | # |
78 | obj-$(VDSO32-y) += vdso32-syms.lds | 122 | obj-$(VDSO32-y) += vdso32-syms.lds |
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 468d591dde31..10f9f59477db 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c | |||
@@ -317,6 +317,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
317 | int ret = 0; | 317 | int ret = 0; |
318 | bool compat; | 318 | bool compat; |
319 | 319 | ||
320 | #ifdef CONFIG_X86_X32_ABI | ||
321 | if (test_thread_flag(TIF_X32)) | ||
322 | return x32_setup_additional_pages(bprm, uses_interp); | ||
323 | #endif | ||
324 | |||
320 | if (vdso_enabled == VDSO_DISABLED) | 325 | if (vdso_enabled == VDSO_DISABLED) |
321 | return 0; | 326 | return 0; |
322 | 327 | ||
diff --git a/arch/x86/vdso/vdsox32.S b/arch/x86/vdso/vdsox32.S new file mode 100644 index 000000000000..d6b9a7f42a8a --- /dev/null +++ b/arch/x86/vdso/vdsox32.S | |||
@@ -0,0 +1,22 @@ | |||
1 | #include <asm/page_types.h> | ||
2 | #include <linux/linkage.h> | ||
3 | #include <linux/init.h> | ||
4 | |||
5 | __PAGE_ALIGNED_DATA | ||
6 | |||
7 | .globl vdsox32_start, vdsox32_end | ||
8 | .align PAGE_SIZE | ||
9 | vdsox32_start: | ||
10 | .incbin "arch/x86/vdso/vdsox32.so" | ||
11 | vdsox32_end: | ||
12 | .align PAGE_SIZE /* extra data here leaks to userspace. */ | ||
13 | |||
14 | .previous | ||
15 | |||
16 | .globl vdsox32_pages | ||
17 | .bss | ||
18 | .align 8 | ||
19 | .type vdsox32_pages, @object | ||
20 | vdsox32_pages: | ||
21 | .zero (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE * 8 | ||
22 | .size vdsox32_pages, .-vdsox32_pages | ||
diff --git a/arch/x86/vdso/vdsox32.lds.S b/arch/x86/vdso/vdsox32.lds.S new file mode 100644 index 000000000000..62272aa2ae0a --- /dev/null +++ b/arch/x86/vdso/vdsox32.lds.S | |||
@@ -0,0 +1,28 @@ | |||
1 | /* | ||
2 | * Linker script for x32 vDSO. | ||
3 | * We #include the file to define the layout details. | ||
4 | * Here we only choose the prelinked virtual address. | ||
5 | * | ||
6 | * This file defines the version script giving the user-exported symbols in | ||
7 | * the DSO. We can define local symbols here called VDSO* to make their | ||
8 | * values visible using the asm-x86/vdso.h macros from the kernel proper. | ||
9 | */ | ||
10 | |||
11 | #define VDSO_PRELINK 0 | ||
12 | #include "vdso-layout.lds.S" | ||
13 | |||
14 | /* | ||
15 | * This controls what userland symbols we export from the vDSO. | ||
16 | */ | ||
17 | VERSION { | ||
18 | LINUX_2.6 { | ||
19 | global: | ||
20 | __vdso_clock_gettime; | ||
21 | __vdso_gettimeofday; | ||
22 | __vdso_getcpu; | ||
23 | __vdso_time; | ||
24 | local: *; | ||
25 | }; | ||
26 | } | ||
27 | |||
28 | VDSOX32_PRELINK = VDSO_PRELINK; | ||
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 153407c35b75..d7dce1dbf8c9 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c | |||
@@ -24,7 +24,44 @@ extern unsigned short vdso_sync_cpuid; | |||
24 | extern struct page *vdso_pages[]; | 24 | extern struct page *vdso_pages[]; |
25 | static unsigned vdso_size; | 25 | static unsigned vdso_size; |
26 | 26 | ||
27 | static void __init patch_vdso(void *vdso, size_t len) | 27 | #ifdef CONFIG_X86_X32_ABI |
28 | extern char vdsox32_start[], vdsox32_end[]; | ||
29 | extern struct page *vdsox32_pages[]; | ||
30 | static unsigned vdsox32_size; | ||
31 | |||
32 | static void __init patch_vdsox32(void *vdso, size_t len) | ||
33 | { | ||
34 | Elf32_Ehdr *hdr = vdso; | ||
35 | Elf32_Shdr *sechdrs, *alt_sec = 0; | ||
36 | char *secstrings; | ||
37 | void *alt_data; | ||
38 | int i; | ||
39 | |||
40 | BUG_ON(len < sizeof(Elf32_Ehdr)); | ||
41 | BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0); | ||
42 | |||
43 | sechdrs = (void *)hdr + hdr->e_shoff; | ||
44 | secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
45 | |||
46 | for (i = 1; i < hdr->e_shnum; i++) { | ||
47 | Elf32_Shdr *shdr = &sechdrs[i]; | ||
48 | if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) { | ||
49 | alt_sec = shdr; | ||
50 | goto found; | ||
51 | } | ||
52 | } | ||
53 | |||
54 | /* If we get here, it's probably a bug. */ | ||
55 | pr_warning("patch_vdsox32: .altinstructions not found\n"); | ||
56 | return; /* nothing to patch */ | ||
57 | |||
58 | found: | ||
59 | alt_data = (void *)hdr + alt_sec->sh_offset; | ||
60 | apply_alternatives(alt_data, alt_data + alt_sec->sh_size); | ||
61 | } | ||
62 | #endif | ||
63 | |||
64 | static void __init patch_vdso64(void *vdso, size_t len) | ||
28 | { | 65 | { |
29 | Elf64_Ehdr *hdr = vdso; | 66 | Elf64_Ehdr *hdr = vdso; |
30 | Elf64_Shdr *sechdrs, *alt_sec = 0; | 67 | Elf64_Shdr *sechdrs, *alt_sec = 0; |
@@ -47,7 +84,7 @@ static void __init patch_vdso(void *vdso, size_t len) | |||
47 | } | 84 | } |
48 | 85 | ||
49 | /* If we get here, it's probably a bug. */ | 86 | /* If we get here, it's probably a bug. */ |
50 | pr_warning("patch_vdso: .altinstructions not found\n"); | 87 | pr_warning("patch_vdso64: .altinstructions not found\n"); |
51 | return; /* nothing to patch */ | 88 | return; /* nothing to patch */ |
52 | 89 | ||
53 | found: | 90 | found: |
@@ -60,12 +97,20 @@ static int __init init_vdso(void) | |||
60 | int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; | 97 | int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; |
61 | int i; | 98 | int i; |
62 | 99 | ||
63 | patch_vdso(vdso_start, vdso_end - vdso_start); | 100 | patch_vdso64(vdso_start, vdso_end - vdso_start); |
64 | 101 | ||
65 | vdso_size = npages << PAGE_SHIFT; | 102 | vdso_size = npages << PAGE_SHIFT; |
66 | for (i = 0; i < npages; i++) | 103 | for (i = 0; i < npages; i++) |
67 | vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE); | 104 | vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE); |
68 | 105 | ||
106 | #ifdef CONFIG_X86_X32_ABI | ||
107 | patch_vdsox32(vdsox32_start, vdsox32_end - vdsox32_start); | ||
108 | npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE; | ||
109 | vdsox32_size = npages << PAGE_SHIFT; | ||
110 | for (i = 0; i < npages; i++) | ||
111 | vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE); | ||
112 | #endif | ||
113 | |||
69 | return 0; | 114 | return 0; |
70 | } | 115 | } |
71 | subsys_initcall(init_vdso); | 116 | subsys_initcall(init_vdso); |
@@ -103,7 +148,10 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) | |||
103 | 148 | ||
104 | /* Setup a VMA at program startup for the vsyscall page. | 149 | /* Setup a VMA at program startup for the vsyscall page. |
105 | Not called for compat tasks */ | 150 | Not called for compat tasks */ |
106 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | 151 | static int setup_additional_pages(struct linux_binprm *bprm, |
152 | int uses_interp, | ||
153 | struct page **pages, | ||
154 | unsigned size) | ||
107 | { | 155 | { |
108 | struct mm_struct *mm = current->mm; | 156 | struct mm_struct *mm = current->mm; |
109 | unsigned long addr; | 157 | unsigned long addr; |
@@ -113,8 +161,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
113 | return 0; | 161 | return 0; |
114 | 162 | ||
115 | down_write(&mm->mmap_sem); | 163 | down_write(&mm->mmap_sem); |
116 | addr = vdso_addr(mm->start_stack, vdso_size); | 164 | addr = vdso_addr(mm->start_stack, size); |
117 | addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0); | 165 | addr = get_unmapped_area(NULL, addr, size, 0, 0); |
118 | if (IS_ERR_VALUE(addr)) { | 166 | if (IS_ERR_VALUE(addr)) { |
119 | ret = addr; | 167 | ret = addr; |
120 | goto up_fail; | 168 | goto up_fail; |
@@ -122,11 +170,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
122 | 170 | ||
123 | current->mm->context.vdso = (void *)addr; | 171 | current->mm->context.vdso = (void *)addr; |
124 | 172 | ||
125 | ret = install_special_mapping(mm, addr, vdso_size, | 173 | ret = install_special_mapping(mm, addr, size, |
126 | VM_READ|VM_EXEC| | 174 | VM_READ|VM_EXEC| |
127 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| | 175 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| |
128 | VM_ALWAYSDUMP, | 176 | VM_ALWAYSDUMP, |
129 | vdso_pages); | 177 | pages); |
130 | if (ret) { | 178 | if (ret) { |
131 | current->mm->context.vdso = NULL; | 179 | current->mm->context.vdso = NULL; |
132 | goto up_fail; | 180 | goto up_fail; |
@@ -137,6 +185,20 @@ up_fail: | |||
137 | return ret; | 185 | return ret; |
138 | } | 186 | } |
139 | 187 | ||
188 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
189 | { | ||
190 | return setup_additional_pages(bprm, uses_interp, vdso_pages, | ||
191 | vdso_size); | ||
192 | } | ||
193 | |||
194 | #ifdef CONFIG_X86_X32_ABI | ||
195 | int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
196 | { | ||
197 | return setup_additional_pages(bprm, uses_interp, vdsox32_pages, | ||
198 | vdsox32_size); | ||
199 | } | ||
200 | #endif | ||
201 | |||
140 | static __init int vdso_setup(char *s) | 202 | static __init int vdso_setup(char *s) |
141 | { | 203 | { |
142 | vdso_enabled = simple_strtoul(s, NULL, 0); | 204 | vdso_enabled = simple_strtoul(s, NULL, 0); |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 12eb07bfb267..4172af8ceeb3 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1141,7 +1141,9 @@ asmlinkage void __init xen_start_kernel(void) | |||
1141 | 1141 | ||
1142 | /* Prevent unwanted bits from being set in PTEs. */ | 1142 | /* Prevent unwanted bits from being set in PTEs. */ |
1143 | __supported_pte_mask &= ~_PAGE_GLOBAL; | 1143 | __supported_pte_mask &= ~_PAGE_GLOBAL; |
1144 | #if 0 | ||
1144 | if (!xen_initial_domain()) | 1145 | if (!xen_initial_domain()) |
1146 | #endif | ||
1145 | __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); | 1147 | __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); |
1146 | 1148 | ||
1147 | __supported_pte_mask |= _PAGE_IOMAP; | 1149 | __supported_pte_mask |= _PAGE_IOMAP; |
@@ -1204,10 +1206,6 @@ asmlinkage void __init xen_start_kernel(void) | |||
1204 | 1206 | ||
1205 | pgd = (pgd_t *)xen_start_info->pt_base; | 1207 | pgd = (pgd_t *)xen_start_info->pt_base; |
1206 | 1208 | ||
1207 | if (!xen_initial_domain()) | ||
1208 | __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); | ||
1209 | |||
1210 | __supported_pte_mask |= _PAGE_IOMAP; | ||
1211 | /* Don't do the full vcpu_info placement stuff until we have a | 1209 | /* Don't do the full vcpu_info placement stuff until we have a |
1212 | possible map and a non-dummy shared_info. */ | 1210 | possible map and a non-dummy shared_info. */ |
1213 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; | 1211 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 58a0e46c404d..95c1cf60c669 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -415,13 +415,13 @@ static pteval_t iomap_pte(pteval_t val) | |||
415 | static pteval_t xen_pte_val(pte_t pte) | 415 | static pteval_t xen_pte_val(pte_t pte) |
416 | { | 416 | { |
417 | pteval_t pteval = pte.pte; | 417 | pteval_t pteval = pte.pte; |
418 | 418 | #if 0 | |
419 | /* If this is a WC pte, convert back from Xen WC to Linux WC */ | 419 | /* If this is a WC pte, convert back from Xen WC to Linux WC */ |
420 | if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { | 420 | if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { |
421 | WARN_ON(!pat_enabled); | 421 | WARN_ON(!pat_enabled); |
422 | pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; | 422 | pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; |
423 | } | 423 | } |
424 | 424 | #endif | |
425 | if (xen_initial_domain() && (pteval & _PAGE_IOMAP)) | 425 | if (xen_initial_domain() && (pteval & _PAGE_IOMAP)) |
426 | return pteval; | 426 | return pteval; |
427 | 427 | ||
@@ -463,7 +463,7 @@ void xen_set_pat(u64 pat) | |||
463 | static pte_t xen_make_pte(pteval_t pte) | 463 | static pte_t xen_make_pte(pteval_t pte) |
464 | { | 464 | { |
465 | phys_addr_t addr = (pte & PTE_PFN_MASK); | 465 | phys_addr_t addr = (pte & PTE_PFN_MASK); |
466 | 466 | #if 0 | |
467 | /* If Linux is trying to set a WC pte, then map to the Xen WC. | 467 | /* If Linux is trying to set a WC pte, then map to the Xen WC. |
468 | * If _PAGE_PAT is set, then it probably means it is really | 468 | * If _PAGE_PAT is set, then it probably means it is really |
469 | * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope | 469 | * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope |
@@ -476,7 +476,7 @@ static pte_t xen_make_pte(pteval_t pte) | |||
476 | if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) | 476 | if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) |
477 | pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; | 477 | pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; |
478 | } | 478 | } |
479 | 479 | #endif | |
480 | /* | 480 | /* |
481 | * Unprivileged domains are allowed to do IOMAPpings for | 481 | * Unprivileged domains are allowed to do IOMAPpings for |
482 | * PCI passthrough, but not map ISA space. The ISA | 482 | * PCI passthrough, but not map ISA space. The ISA |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 041d4fe9dfe4..501d4e0244ba 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -409,6 +409,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ | |||
409 | play_dead_common(); | 409 | play_dead_common(); |
410 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | 410 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); |
411 | cpu_bringup(); | 411 | cpu_bringup(); |
412 | /* | ||
413 | * Balance out the preempt calls - as we are running in cpu_idle | ||
414 | * loop which has been called at bootup from cpu_bringup_and_idle. | ||
415 | * The cpucpu_bringup_and_idle called cpu_bringup which made a | ||
416 | * preempt_disable() So this preempt_enable will balance it out. | ||
417 | */ | ||
418 | preempt_enable(); | ||
412 | } | 419 | } |
413 | 420 | ||
414 | #else /* !CONFIG_HOTPLUG_CPU */ | 421 | #else /* !CONFIG_HOTPLUG_CPU */ |