aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-03-30 02:47:19 -0400
committerIngo Molnar <mingo@kernel.org>2012-03-30 02:50:06 -0400
commit186e54cbe1145f4d11e32fe10e7e20a11f1b27dd (patch)
tree9b6cf3667a3ea90e0cec0ea7119688ba76c55a71 /arch/x86/include
parent99dd5497e5be4fe4194cad181d45fd6569a930db (diff)
parent4bde23f8751f388867766b0a62ed1ef8b7e01561 (diff)
Merge branch 'linus' into x86/urgent
Merge reason: Needed for include file dependencies. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/Kbuild2
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/auxvec.h7
-rw-r--r--arch/x86/include/asm/barrier.h116
-rw-r--r--arch/x86/include/asm/bug.h4
-rw-r--r--arch/x86/include/asm/cacheflush.h1
-rw-r--r--arch/x86/include/asm/compat.h40
-rw-r--r--arch/x86/include/asm/debugreg.h67
-rw-r--r--arch/x86/include/asm/elf.h32
-rw-r--r--arch/x86/include/asm/exec.h1
-rw-r--r--arch/x86/include/asm/futex.h1
-rw-r--r--arch/x86/include/asm/i387.h1
-rw-r--r--arch/x86/include/asm/ia32.h18
-rw-r--r--arch/x86/include/asm/kgdb.h10
-rw-r--r--arch/x86/include/asm/kvm.h4
-rw-r--r--arch/x86/include/asm/kvm_emulate.h3
-rw-r--r--arch/x86/include/asm/kvm_host.h63
-rw-r--r--arch/x86/include/asm/local.h1
-rw-r--r--arch/x86/include/asm/mc146818rtc.h1
-rw-r--r--arch/x86/include/asm/mtrr.h28
-rw-r--r--arch/x86/include/asm/page_types.h1
-rw-r--r--arch/x86/include/asm/paravirt.h1
-rw-r--r--arch/x86/include/asm/perf_event.h1
-rw-r--r--arch/x86/include/asm/posix_types.h4
-rw-r--r--arch/x86/include/asm/posix_types_32.h75
-rw-r--r--arch/x86/include/asm/posix_types_64.h106
-rw-r--r--arch/x86/include/asm/posix_types_x32.h19
-rw-r--r--arch/x86/include/asm/processor.h106
-rw-r--r--arch/x86/include/asm/ptrace.h1
-rw-r--r--arch/x86/include/asm/segment.h58
-rw-r--r--arch/x86/include/asm/sigcontext.h57
-rw-r--r--arch/x86/include/asm/sigframe.h13
-rw-r--r--arch/x86/include/asm/sighandling.h24
-rw-r--r--arch/x86/include/asm/special_insns.h199
-rw-r--r--arch/x86/include/asm/stackprotector.h1
-rw-r--r--arch/x86/include/asm/switch_to.h129
-rw-r--r--arch/x86/include/asm/sys_ia32.h7
-rw-r--r--arch/x86/include/asm/syscall.h5
-rw-r--r--arch/x86/include/asm/system.h523
-rw-r--r--arch/x86/include/asm/thread_info.h18
-rw-r--r--arch/x86/include/asm/tlbflush.h2
-rw-r--r--arch/x86/include/asm/traps.h25
-rw-r--r--arch/x86/include/asm/tsc.h4
-rw-r--r--arch/x86/include/asm/unistd.h15
-rw-r--r--arch/x86/include/asm/vgtod.h17
-rw-r--r--arch/x86/include/asm/virtext.h1
-rw-r--r--arch/x86/include/asm/x86_init.h6
-rw-r--r--arch/x86/include/asm/xen/interface.h1
48 files changed, 963 insertions, 857 deletions
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index b57e6a43a37a..f9c0d3ba9e84 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -14,6 +14,7 @@ header-y += msr.h
14header-y += mtrr.h 14header-y += mtrr.h
15header-y += posix_types_32.h 15header-y += posix_types_32.h
16header-y += posix_types_64.h 16header-y += posix_types_64.h
17header-y += posix_types_x32.h
17header-y += prctl.h 18header-y += prctl.h
18header-y += processor-flags.h 19header-y += processor-flags.h
19header-y += ptrace-abi.h 20header-y += ptrace-abi.h
@@ -24,3 +25,4 @@ header-y += vsyscall.h
24 25
25genhdr-y += unistd_32.h 26genhdr-y += unistd_32.h
26genhdr-y += unistd_64.h 27genhdr-y += unistd_64.h
28genhdr-y += unistd_x32.h
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index d3eaac44860a..d85410171260 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -11,7 +11,6 @@
11#include <linux/atomic.h> 11#include <linux/atomic.h>
12#include <asm/fixmap.h> 12#include <asm/fixmap.h>
13#include <asm/mpspec.h> 13#include <asm/mpspec.h>
14#include <asm/system.h>
15#include <asm/msr.h> 14#include <asm/msr.h>
16 15
17#define ARCH_APICTIMER_STOPS_ON_C3 1 16#define ARCH_APICTIMER_STOPS_ON_C3 1
diff --git a/arch/x86/include/asm/auxvec.h b/arch/x86/include/asm/auxvec.h
index 1316b4c35425..77203ac352de 100644
--- a/arch/x86/include/asm/auxvec.h
+++ b/arch/x86/include/asm/auxvec.h
@@ -9,4 +9,11 @@
9#endif 9#endif
10#define AT_SYSINFO_EHDR 33 10#define AT_SYSINFO_EHDR 33
11 11
12/* entries in ARCH_DLINFO: */
13#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
14# define AT_VECTOR_SIZE_ARCH 2
15#else /* else it's non-compat x86-64 */
16# define AT_VECTOR_SIZE_ARCH 1
17#endif
18
12#endif /* _ASM_X86_AUXVEC_H */ 19#endif /* _ASM_X86_AUXVEC_H */
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
new file mode 100644
index 000000000000..c6cd358a1eec
--- /dev/null
+++ b/arch/x86/include/asm/barrier.h
@@ -0,0 +1,116 @@
1#ifndef _ASM_X86_BARRIER_H
2#define _ASM_X86_BARRIER_H
3
4#include <asm/alternative.h>
5#include <asm/nops.h>
6
7/*
8 * Force strict CPU ordering.
9 * And yes, this is required on UP too when we're talking
10 * to devices.
11 */
12
13#ifdef CONFIG_X86_32
14/*
15 * Some non-Intel clones support out of order store. wmb() ceases to be a
16 * nop for these.
17 */
18#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
19#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
20#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
21#else
22#define mb() asm volatile("mfence":::"memory")
23#define rmb() asm volatile("lfence":::"memory")
24#define wmb() asm volatile("sfence" ::: "memory")
25#endif
26
27/**
28 * read_barrier_depends - Flush all pending reads that subsequents reads
29 * depend on.
30 *
31 * No data-dependent reads from memory-like regions are ever reordered
32 * over this barrier. All reads preceding this primitive are guaranteed
33 * to access memory (but not necessarily other CPUs' caches) before any
34 * reads following this primitive that depend on the data return by
35 * any of the preceding reads. This primitive is much lighter weight than
36 * rmb() on most CPUs, and is never heavier weight than is
37 * rmb().
38 *
39 * These ordering constraints are respected by both the local CPU
40 * and the compiler.
41 *
42 * Ordering is not guaranteed by anything other than these primitives,
43 * not even by data dependencies. See the documentation for
44 * memory_barrier() for examples and URLs to more information.
45 *
46 * For example, the following code would force ordering (the initial
47 * value of "a" is zero, "b" is one, and "p" is "&a"):
48 *
49 * <programlisting>
50 * CPU 0 CPU 1
51 *
52 * b = 2;
53 * memory_barrier();
54 * p = &b; q = p;
55 * read_barrier_depends();
56 * d = *q;
57 * </programlisting>
58 *
59 * because the read of "*q" depends on the read of "p" and these
60 * two reads are separated by a read_barrier_depends(). However,
61 * the following code, with the same initial values for "a" and "b":
62 *
63 * <programlisting>
64 * CPU 0 CPU 1
65 *
66 * a = 2;
67 * memory_barrier();
68 * b = 3; y = b;
69 * read_barrier_depends();
70 * x = a;
71 * </programlisting>
72 *
73 * does not enforce ordering, since there is no data dependency between
74 * the read of "a" and the read of "b". Therefore, on some CPUs, such
75 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
76 * in cases like this where there are no data dependencies.
77 **/
78
79#define read_barrier_depends() do { } while (0)
80
81#ifdef CONFIG_SMP
82#define smp_mb() mb()
83#ifdef CONFIG_X86_PPRO_FENCE
84# define smp_rmb() rmb()
85#else
86# define smp_rmb() barrier()
87#endif
88#ifdef CONFIG_X86_OOSTORE
89# define smp_wmb() wmb()
90#else
91# define smp_wmb() barrier()
92#endif
93#define smp_read_barrier_depends() read_barrier_depends()
94#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
95#else
96#define smp_mb() barrier()
97#define smp_rmb() barrier()
98#define smp_wmb() barrier()
99#define smp_read_barrier_depends() do { } while (0)
100#define set_mb(var, value) do { var = value; barrier(); } while (0)
101#endif
102
103/*
104 * Stop RDTSC speculation. This is needed when you need to use RDTSC
105 * (or get_cycles or vread that possibly accesses the TSC) in a defined
106 * code region.
107 *
108 * (Could use an alternative three way for this if there was one.)
109 */
110static __always_inline void rdtsc_barrier(void)
111{
112 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
113 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
114}
115
116#endif /* _ASM_X86_BARRIER_H */
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index f654d1bb17fb..11e1152222d0 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -36,4 +36,8 @@ do { \
36#endif /* !CONFIG_BUG */ 36#endif /* !CONFIG_BUG */
37 37
38#include <asm-generic/bug.h> 38#include <asm-generic/bug.h>
39
40
41extern void show_regs_common(void);
42
39#endif /* _ASM_X86_BUG_H */ 43#endif /* _ASM_X86_BUG_H */
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 4e12668711e5..9863ee3747da 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -3,6 +3,7 @@
3 3
4/* Caches aren't brain-dead on the intel. */ 4/* Caches aren't brain-dead on the intel. */
5#include <asm-generic/cacheflush.h> 5#include <asm-generic/cacheflush.h>
6#include <asm/special_insns.h>
6 7
7#ifdef CONFIG_X86_PAT 8#ifdef CONFIG_X86_PAT
8/* 9/*
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 30d737ef2a42..d6805798d6fc 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -6,7 +6,9 @@
6 */ 6 */
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <asm/processor.h>
9#include <asm/user32.h> 10#include <asm/user32.h>
11#include <asm/unistd.h>
10 12
11#define COMPAT_USER_HZ 100 13#define COMPAT_USER_HZ 100
12#define COMPAT_UTS_MACHINE "i686\0\0" 14#define COMPAT_UTS_MACHINE "i686\0\0"
@@ -186,7 +188,20 @@ struct compat_shmid64_ds {
186/* 188/*
187 * The type of struct elf_prstatus.pr_reg in compatible core dumps. 189 * The type of struct elf_prstatus.pr_reg in compatible core dumps.
188 */ 190 */
191#ifdef CONFIG_X86_X32_ABI
192typedef struct user_regs_struct compat_elf_gregset_t;
193
194#define PR_REG_SIZE(S) (test_thread_flag(TIF_IA32) ? 68 : 216)
195#define PRSTATUS_SIZE(S) (test_thread_flag(TIF_IA32) ? 144 : 296)
196#define SET_PR_FPVALID(S,V) \
197 do { *(int *) (((void *) &((S)->pr_reg)) + PR_REG_SIZE(0)) = (V); } \
198 while (0)
199
200#define COMPAT_USE_64BIT_TIME \
201 (!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
202#else
189typedef struct user_regs_struct32 compat_elf_gregset_t; 203typedef struct user_regs_struct32 compat_elf_gregset_t;
204#endif
190 205
191/* 206/*
192 * A pointer passed in from user mode. This should not 207 * A pointer passed in from user mode. This should not
@@ -208,13 +223,30 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
208 223
209static inline void __user *arch_compat_alloc_user_space(long len) 224static inline void __user *arch_compat_alloc_user_space(long len)
210{ 225{
211 struct pt_regs *regs = task_pt_regs(current); 226 compat_uptr_t sp;
212 return (void __user *)regs->sp - len; 227
228 if (test_thread_flag(TIF_IA32)) {
229 sp = task_pt_regs(current)->sp;
230 } else {
231 /* -128 for the x32 ABI redzone */
232 sp = percpu_read(old_rsp) - 128;
233 }
234
235 return (void __user *)round_down(sp - len, 16);
236}
237
238static inline bool is_x32_task(void)
239{
240#ifdef CONFIG_X86_X32_ABI
241 if (task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT)
242 return true;
243#endif
244 return false;
213} 245}
214 246
215static inline int is_compat_task(void) 247static inline bool is_compat_task(void)
216{ 248{
217 return current_thread_info()->status & TS_COMPAT; 249 return is_ia32_task() || is_x32_task();
218} 250}
219 251
220#endif /* _ASM_X86_COMPAT_H */ 252#endif /* _ASM_X86_COMPAT_H */
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index b903d5ea3941..2d91580bf228 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -78,8 +78,75 @@
78 */ 78 */
79#ifdef __KERNEL__ 79#ifdef __KERNEL__
80 80
81#include <linux/bug.h>
82
81DECLARE_PER_CPU(unsigned long, cpu_dr7); 83DECLARE_PER_CPU(unsigned long, cpu_dr7);
82 84
85#ifndef CONFIG_PARAVIRT
86/*
87 * These special macros can be used to get or set a debugging register
88 */
89#define get_debugreg(var, register) \
90 (var) = native_get_debugreg(register)
91#define set_debugreg(value, register) \
92 native_set_debugreg(register, value)
93#endif
94
95static inline unsigned long native_get_debugreg(int regno)
96{
97 unsigned long val = 0; /* Damn you, gcc! */
98
99 switch (regno) {
100 case 0:
101 asm("mov %%db0, %0" :"=r" (val));
102 break;
103 case 1:
104 asm("mov %%db1, %0" :"=r" (val));
105 break;
106 case 2:
107 asm("mov %%db2, %0" :"=r" (val));
108 break;
109 case 3:
110 asm("mov %%db3, %0" :"=r" (val));
111 break;
112 case 6:
113 asm("mov %%db6, %0" :"=r" (val));
114 break;
115 case 7:
116 asm("mov %%db7, %0" :"=r" (val));
117 break;
118 default:
119 BUG();
120 }
121 return val;
122}
123
124static inline void native_set_debugreg(int regno, unsigned long value)
125{
126 switch (regno) {
127 case 0:
128 asm("mov %0, %%db0" ::"r" (value));
129 break;
130 case 1:
131 asm("mov %0, %%db1" ::"r" (value));
132 break;
133 case 2:
134 asm("mov %0, %%db2" ::"r" (value));
135 break;
136 case 3:
137 asm("mov %0, %%db3" ::"r" (value));
138 break;
139 case 6:
140 asm("mov %0, %%db6" ::"r" (value));
141 break;
142 case 7:
143 asm("mov %0, %%db7" ::"r" (value));
144 break;
145 default:
146 BUG();
147 }
148}
149
83static inline void hw_breakpoint_disable(void) 150static inline void hw_breakpoint_disable(void)
84{ 151{
85 /* Zero the control register for HW Breakpoint */ 152 /* Zero the control register for HW Breakpoint */
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 5f962df30d0f..5939f44fe0c0 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -84,7 +84,6 @@ extern unsigned int vdso_enabled;
84 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) 84 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
85 85
86#include <asm/processor.h> 86#include <asm/processor.h>
87#include <asm/system.h>
88 87
89#ifdef CONFIG_X86_32 88#ifdef CONFIG_X86_32
90#include <asm/desc.h> 89#include <asm/desc.h>
@@ -156,7 +155,12 @@ do { \
156#define elf_check_arch(x) \ 155#define elf_check_arch(x) \
157 ((x)->e_machine == EM_X86_64) 156 ((x)->e_machine == EM_X86_64)
158 157
159#define compat_elf_check_arch(x) elf_check_arch_ia32(x) 158#define compat_elf_check_arch(x) \
159 (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
160
161#if __USER32_DS != __USER_DS
162# error "The following code assumes __USER32_DS == __USER_DS"
163#endif
160 164
161static inline void elf_common_init(struct thread_struct *t, 165static inline void elf_common_init(struct thread_struct *t,
162 struct pt_regs *regs, const u16 ds) 166 struct pt_regs *regs, const u16 ds)
@@ -179,8 +183,9 @@ static inline void elf_common_init(struct thread_struct *t,
179void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp); 183void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp);
180#define compat_start_thread start_thread_ia32 184#define compat_start_thread start_thread_ia32
181 185
182void set_personality_ia32(void); 186void set_personality_ia32(bool);
183#define COMPAT_SET_PERSONALITY(ex) set_personality_ia32() 187#define COMPAT_SET_PERSONALITY(ex) \
188 set_personality_ia32((ex).e_machine == EM_X86_64)
184 189
185#define COMPAT_ELF_PLATFORM ("i686") 190#define COMPAT_ELF_PLATFORM ("i686")
186 191
@@ -287,7 +292,7 @@ do { \
287#define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */ 292#define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */
288 293
289/* 1GB for 64bit, 8MB for 32bit */ 294/* 1GB for 64bit, 8MB for 32bit */
290#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) 295#define STACK_RND_MASK (test_thread_flag(TIF_ADDR32) ? 0x7ff : 0x3fffff)
291 296
292#define ARCH_DLINFO \ 297#define ARCH_DLINFO \
293do { \ 298do { \
@@ -296,9 +301,20 @@ do { \
296 (unsigned long)current->mm->context.vdso); \ 301 (unsigned long)current->mm->context.vdso); \
297} while (0) 302} while (0)
298 303
304#define ARCH_DLINFO_X32 \
305do { \
306 if (vdso_enabled) \
307 NEW_AUX_ENT(AT_SYSINFO_EHDR, \
308 (unsigned long)current->mm->context.vdso); \
309} while (0)
310
299#define AT_SYSINFO 32 311#define AT_SYSINFO 32
300 312
301#define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(sysctl_vsyscall32) 313#define COMPAT_ARCH_DLINFO \
314if (test_thread_flag(TIF_X32)) \
315 ARCH_DLINFO_X32; \
316else \
317 ARCH_DLINFO_IA32(sysctl_vsyscall32)
302 318
303#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) 319#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
304 320
@@ -314,6 +330,8 @@ struct linux_binprm;
314#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 330#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
315extern int arch_setup_additional_pages(struct linux_binprm *bprm, 331extern int arch_setup_additional_pages(struct linux_binprm *bprm,
316 int uses_interp); 332 int uses_interp);
333extern int x32_setup_additional_pages(struct linux_binprm *bprm,
334 int uses_interp);
317 335
318extern int syscall32_setup_pages(struct linux_binprm *, int exstack); 336extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
319#define compat_arch_setup_additional_pages syscall32_setup_pages 337#define compat_arch_setup_additional_pages syscall32_setup_pages
@@ -330,7 +348,7 @@ static inline int mmap_is_ia32(void)
330 return 1; 348 return 1;
331#endif 349#endif
332#ifdef CONFIG_IA32_EMULATION 350#ifdef CONFIG_IA32_EMULATION
333 if (test_thread_flag(TIF_IA32)) 351 if (test_thread_flag(TIF_ADDR32))
334 return 1; 352 return 1;
335#endif 353#endif
336 return 0; 354 return 0;
diff --git a/arch/x86/include/asm/exec.h b/arch/x86/include/asm/exec.h
new file mode 100644
index 000000000000..54c2e1db274a
--- /dev/null
+++ b/arch/x86/include/asm/exec.h
@@ -0,0 +1 @@
/* define arch_align_stack() here */
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index d09bb03653f0..71ecbcba1a4e 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -9,7 +9,6 @@
9#include <asm/asm.h> 9#include <asm/asm.h>
10#include <asm/errno.h> 10#include <asm/errno.h>
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/system.h>
13 12
14#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ 13#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
15 asm volatile("1:\t" insn "\n" \ 14 asm volatile("1:\t" insn "\n" \
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 7ce0798b1b26..257d9cca214f 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -14,7 +14,6 @@
14 14
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/hardirq.h> 16#include <linux/hardirq.h>
17#include <asm/system.h>
18 17
19struct pt_regs; 18struct pt_regs;
20struct user_i387_struct; 19struct user_i387_struct;
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index 1f7e62517284..ee52760549f0 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -43,6 +43,15 @@ struct ucontext_ia32 {
43 compat_sigset_t uc_sigmask; /* mask last for extensibility */ 43 compat_sigset_t uc_sigmask; /* mask last for extensibility */
44}; 44};
45 45
46struct ucontext_x32 {
47 unsigned int uc_flags;
48 unsigned int uc_link;
49 stack_ia32_t uc_stack;
50 unsigned int uc__pad0; /* needed for alignment */
51 struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */
52 compat_sigset_t uc_sigmask; /* mask last for extensibility */
53};
54
46/* This matches struct stat64 in glibc2.2, hence the absolutely 55/* This matches struct stat64 in glibc2.2, hence the absolutely
47 * insane amounts of padding around dev_t's. 56 * insane amounts of padding around dev_t's.
48 */ 57 */
@@ -116,6 +125,15 @@ typedef struct compat_siginfo {
116 compat_clock_t _stime; 125 compat_clock_t _stime;
117 } _sigchld; 126 } _sigchld;
118 127
128 /* SIGCHLD (x32 version) */
129 struct {
130 unsigned int _pid; /* which child */
131 unsigned int _uid; /* sender's uid */
132 int _status; /* exit code */
133 compat_s64 _utime;
134 compat_s64 _stime;
135 } _sigchld_x32;
136
119 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ 137 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
120 struct { 138 struct {
121 unsigned int _addr; /* faulting insn/memory ref. */ 139 unsigned int _addr; /* faulting insn/memory ref. */
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
index 77e95f54570a..332f98c9111f 100644
--- a/arch/x86/include/asm/kgdb.h
+++ b/arch/x86/include/asm/kgdb.h
@@ -64,11 +64,15 @@ enum regnames {
64 GDB_PS, /* 17 */ 64 GDB_PS, /* 17 */
65 GDB_CS, /* 18 */ 65 GDB_CS, /* 18 */
66 GDB_SS, /* 19 */ 66 GDB_SS, /* 19 */
67 GDB_DS, /* 20 */
68 GDB_ES, /* 21 */
69 GDB_FS, /* 22 */
70 GDB_GS, /* 23 */
67}; 71};
68#define GDB_ORIG_AX 57 72#define GDB_ORIG_AX 57
69#define DBG_MAX_REG_NUM 20 73#define DBG_MAX_REG_NUM 24
70/* 17 64 bit regs and 3 32 bit regs */ 74/* 17 64 bit regs and 5 32 bit regs */
71#define NUMREGBYTES ((17 * 8) + (3 * 4)) 75#define NUMREGBYTES ((17 * 8) + (5 * 4))
72#endif /* ! CONFIG_X86_32 */ 76#endif /* ! CONFIG_X86_32 */
73 77
74static inline void arch_kgdb_breakpoint(void) 78static inline void arch_kgdb_breakpoint(void)
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index 4d8dcbdfc120..e7d1c194d272 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -321,4 +321,8 @@ struct kvm_xcrs {
321 __u64 padding[16]; 321 __u64 padding[16];
322}; 322};
323 323
324/* definition of registers in kvm_run */
325struct kvm_sync_regs {
326};
327
324#endif /* _ASM_X86_KVM_H */ 328#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 7b9cfc4878af..c222e1a1b12a 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -176,6 +176,7 @@ struct x86_emulate_ops {
176 void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt); 176 void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
177 ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr); 177 ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr);
178 int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val); 178 int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val);
179 void (*set_rflags)(struct x86_emulate_ctxt *ctxt, ulong val);
179 int (*cpl)(struct x86_emulate_ctxt *ctxt); 180 int (*cpl)(struct x86_emulate_ctxt *ctxt);
180 int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest); 181 int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
181 int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); 182 int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
@@ -388,7 +389,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
388#define EMULATION_INTERCEPTED 2 389#define EMULATION_INTERCEPTED 2
389int x86_emulate_insn(struct x86_emulate_ctxt *ctxt); 390int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
390int emulator_task_switch(struct x86_emulate_ctxt *ctxt, 391int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
391 u16 tss_selector, int reason, 392 u16 tss_selector, int idt_index, int reason,
392 bool has_error_code, u32 error_code); 393 bool has_error_code, u32 error_code);
393int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq); 394int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
394#endif /* _ASM_X86_KVM_X86_EMULATE_H */ 395#endif /* _ASM_X86_KVM_X86_EMULATE_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 52d6640a5ca1..e216ba066e79 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -29,7 +29,7 @@
29#include <asm/msr-index.h> 29#include <asm/msr-index.h>
30 30
31#define KVM_MAX_VCPUS 254 31#define KVM_MAX_VCPUS 254
32#define KVM_SOFT_MAX_VCPUS 64 32#define KVM_SOFT_MAX_VCPUS 160
33#define KVM_MEMORY_SLOTS 32 33#define KVM_MEMORY_SLOTS 32
34/* memory slots that does not exposed to userspace */ 34/* memory slots that does not exposed to userspace */
35#define KVM_PRIVATE_MEM_SLOTS 4 35#define KVM_PRIVATE_MEM_SLOTS 4
@@ -181,13 +181,6 @@ struct kvm_mmu_memory_cache {
181 void *objects[KVM_NR_MEM_OBJS]; 181 void *objects[KVM_NR_MEM_OBJS];
182}; 182};
183 183
184#define NR_PTE_CHAIN_ENTRIES 5
185
186struct kvm_pte_chain {
187 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
188 struct hlist_node link;
189};
190
191/* 184/*
192 * kvm_mmu_page_role, below, is defined as: 185 * kvm_mmu_page_role, below, is defined as:
193 * 186 *
@@ -427,12 +420,16 @@ struct kvm_vcpu_arch {
427 420
428 u64 last_guest_tsc; 421 u64 last_guest_tsc;
429 u64 last_kernel_ns; 422 u64 last_kernel_ns;
430 u64 last_tsc_nsec; 423 u64 last_host_tsc;
431 u64 last_tsc_write; 424 u64 tsc_offset_adjustment;
432 u32 virtual_tsc_khz; 425 u64 this_tsc_nsec;
426 u64 this_tsc_write;
427 u8 this_tsc_generation;
433 bool tsc_catchup; 428 bool tsc_catchup;
434 u32 tsc_catchup_mult; 429 bool tsc_always_catchup;
435 s8 tsc_catchup_shift; 430 s8 virtual_tsc_shift;
431 u32 virtual_tsc_mult;
432 u32 virtual_tsc_khz;
436 433
437 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ 434 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
438 unsigned nmi_pending; /* NMI queued after currently running handler */ 435 unsigned nmi_pending; /* NMI queued after currently running handler */
@@ -478,6 +475,21 @@ struct kvm_vcpu_arch {
478 u32 id; 475 u32 id;
479 bool send_user_only; 476 bool send_user_only;
480 } apf; 477 } apf;
478
479 /* OSVW MSRs (AMD only) */
480 struct {
481 u64 length;
482 u64 status;
483 } osvw;
484};
485
486struct kvm_lpage_info {
487 unsigned long rmap_pde;
488 int write_count;
489};
490
491struct kvm_arch_memory_slot {
492 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
481}; 493};
482 494
483struct kvm_arch { 495struct kvm_arch {
@@ -511,8 +523,12 @@ struct kvm_arch {
511 s64 kvmclock_offset; 523 s64 kvmclock_offset;
512 raw_spinlock_t tsc_write_lock; 524 raw_spinlock_t tsc_write_lock;
513 u64 last_tsc_nsec; 525 u64 last_tsc_nsec;
514 u64 last_tsc_offset;
515 u64 last_tsc_write; 526 u64 last_tsc_write;
527 u32 last_tsc_khz;
528 u64 cur_tsc_nsec;
529 u64 cur_tsc_write;
530 u64 cur_tsc_offset;
531 u8 cur_tsc_generation;
516 532
517 struct kvm_xen_hvm_config xen_hvm_config; 533 struct kvm_xen_hvm_config xen_hvm_config;
518 534
@@ -644,7 +660,7 @@ struct kvm_x86_ops {
644 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 660 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
645 int (*get_lpage_level)(void); 661 int (*get_lpage_level)(void);
646 bool (*rdtscp_supported)(void); 662 bool (*rdtscp_supported)(void);
647 void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment); 663 void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
648 664
649 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 665 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
650 666
@@ -652,7 +668,7 @@ struct kvm_x86_ops {
652 668
653 bool (*has_wbinvd_exit)(void); 669 bool (*has_wbinvd_exit)(void);
654 670
655 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz); 671 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
656 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 672 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
657 673
658 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc); 674 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
@@ -674,6 +690,17 @@ struct kvm_arch_async_pf {
674 690
675extern struct kvm_x86_ops *kvm_x86_ops; 691extern struct kvm_x86_ops *kvm_x86_ops;
676 692
693static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
694 s64 adjustment)
695{
696 kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
697}
698
699static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
700{
701 kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
702}
703
677int kvm_mmu_module_init(void); 704int kvm_mmu_module_init(void);
678void kvm_mmu_module_exit(void); 705void kvm_mmu_module_exit(void);
679 706
@@ -741,8 +768,8 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
741void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 768void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
742int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); 769int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
743 770
744int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, 771int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
745 bool has_error_code, u32 error_code); 772 int reason, bool has_error_code, u32 error_code);
746 773
747int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 774int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
748int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 775int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 9cdae5d47e8f..c8bed0da434a 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5 5
6#include <asm/system.h>
7#include <linux/atomic.h> 6#include <linux/atomic.h>
8#include <asm/asm.h> 7#include <asm/asm.h>
9 8
diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h
index 0e8e85bb7c51..d354fb781c57 100644
--- a/arch/x86/include/asm/mc146818rtc.h
+++ b/arch/x86/include/asm/mc146818rtc.h
@@ -5,7 +5,6 @@
5#define _ASM_X86_MC146818RTC_H 5#define _ASM_X86_MC146818RTC_H
6 6
7#include <asm/io.h> 7#include <asm/io.h>
8#include <asm/system.h>
9#include <asm/processor.h> 8#include <asm/processor.h>
10#include <linux/mc146818rtc.h> 9#include <linux/mc146818rtc.h>
11 10
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 4365ffdb461f..7e3f17f92c66 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -29,18 +29,18 @@
29 29
30#define MTRR_IOCTL_BASE 'M' 30#define MTRR_IOCTL_BASE 'M'
31 31
32struct mtrr_sentry {
33 unsigned long base; /* Base address */
34 unsigned int size; /* Size of region */
35 unsigned int type; /* Type of region */
36};
37
38/* Warning: this structure has a different order from i386 32/* Warning: this structure has a different order from i386
39 on x86-64. The 32bit emulation code takes care of that. 33 on x86-64. The 32bit emulation code takes care of that.
40 But you need to use this for 64bit, otherwise your X server 34 But you need to use this for 64bit, otherwise your X server
41 will break. */ 35 will break. */
42 36
43#ifdef __i386__ 37#ifdef __i386__
38struct mtrr_sentry {
39 unsigned long base; /* Base address */
40 unsigned int size; /* Size of region */
41 unsigned int type; /* Type of region */
42};
43
44struct mtrr_gentry { 44struct mtrr_gentry {
45 unsigned int regnum; /* Register number */ 45 unsigned int regnum; /* Register number */
46 unsigned long base; /* Base address */ 46 unsigned long base; /* Base address */
@@ -50,12 +50,20 @@ struct mtrr_gentry {
50 50
51#else /* __i386__ */ 51#else /* __i386__ */
52 52
53struct mtrr_sentry {
54 __u64 base; /* Base address */
55 __u32 size; /* Size of region */
56 __u32 type; /* Type of region */
57};
58
53struct mtrr_gentry { 59struct mtrr_gentry {
54 unsigned long base; /* Base address */ 60 __u64 base; /* Base address */
55 unsigned int size; /* Size of region */ 61 __u32 size; /* Size of region */
56 unsigned int regnum; /* Register number */ 62 __u32 regnum; /* Register number */
57 unsigned int type; /* Type of region */ 63 __u32 type; /* Type of region */
64 __u32 _pad; /* Unused */
58}; 65};
66
59#endif /* !__i386__ */ 67#endif /* !__i386__ */
60 68
61struct mtrr_var_range { 69struct mtrr_var_range {
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index bce688d54c12..e21fdd10479f 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -55,7 +55,6 @@ extern unsigned long init_memory_mapping(unsigned long start,
55 unsigned long end); 55 unsigned long end);
56 56
57extern void initmem_init(void); 57extern void initmem_init(void);
58extern void free_initmem(void);
59 58
60#endif /* !__ASSEMBLY__ */ 59#endif /* !__ASSEMBLY__ */
61 60
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index c0180fd372d2..aa0f91308367 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -10,6 +10,7 @@
10#include <asm/paravirt_types.h> 10#include <asm/paravirt_types.h>
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13#include <linux/bug.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/cpumask.h> 15#include <linux/cpumask.h>
15 16
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index e8fb2c7a5f4f..2291895b1836 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -23,6 +23,7 @@
23#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16) 23#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
24#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17) 24#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
25#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18) 25#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
26#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
26#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20) 27#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
27#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21) 28#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
28#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) 29#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h
index bb7133dc155d..3427b7798dbc 100644
--- a/arch/x86/include/asm/posix_types.h
+++ b/arch/x86/include/asm/posix_types.h
@@ -7,7 +7,9 @@
7#else 7#else
8# ifdef __i386__ 8# ifdef __i386__
9# include "posix_types_32.h" 9# include "posix_types_32.h"
10# else 10# elif defined(__LP64__)
11# include "posix_types_64.h" 11# include "posix_types_64.h"
12# else
13# include "posix_types_x32.h"
12# endif 14# endif
13#endif 15#endif
diff --git a/arch/x86/include/asm/posix_types_32.h b/arch/x86/include/asm/posix_types_32.h
index f7d9adf82e53..99f262e04b91 100644
--- a/arch/x86/include/asm/posix_types_32.h
+++ b/arch/x86/include/asm/posix_types_32.h
@@ -7,79 +7,22 @@
7 * assume GCC is being used. 7 * assume GCC is being used.
8 */ 8 */
9 9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned short __kernel_mode_t; 10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t
12
12typedef unsigned short __kernel_nlink_t; 13typedef unsigned short __kernel_nlink_t;
13typedef long __kernel_off_t; 14#define __kernel_nlink_t __kernel_nlink_t
14typedef int __kernel_pid_t; 15
15typedef unsigned short __kernel_ipc_pid_t; 16typedef unsigned short __kernel_ipc_pid_t;
17#define __kernel_ipc_pid_t __kernel_ipc_pid_t
18
16typedef unsigned short __kernel_uid_t; 19typedef unsigned short __kernel_uid_t;
17typedef unsigned short __kernel_gid_t; 20typedef unsigned short __kernel_gid_t;
18typedef unsigned int __kernel_size_t; 21#define __kernel_uid_t __kernel_uid_t
19typedef int __kernel_ssize_t;
20typedef int __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30typedef unsigned int __kernel_uid32_t;
31typedef unsigned int __kernel_gid32_t;
32 22
33typedef unsigned short __kernel_old_uid_t;
34typedef unsigned short __kernel_old_gid_t;
35typedef unsigned short __kernel_old_dev_t; 23typedef unsigned short __kernel_old_dev_t;
24#define __kernel_old_dev_t __kernel_old_dev_t
36 25
37#ifdef __GNUC__ 26#include <asm-generic/posix_types.h>
38typedef long long __kernel_loff_t;
39#endif
40
41typedef struct {
42 int val[2];
43} __kernel_fsid_t;
44
45#if defined(__KERNEL__)
46
47#undef __FD_SET
48#define __FD_SET(fd,fdsetp) \
49 asm volatile("btsl %1,%0": \
50 "+m" (*(__kernel_fd_set *)(fdsetp)) \
51 : "r" ((int)(fd)))
52
53#undef __FD_CLR
54#define __FD_CLR(fd,fdsetp) \
55 asm volatile("btrl %1,%0": \
56 "+m" (*(__kernel_fd_set *)(fdsetp)) \
57 : "r" ((int) (fd)))
58
59#undef __FD_ISSET
60#define __FD_ISSET(fd,fdsetp) \
61 (__extension__ \
62 ({ \
63 unsigned char __result; \
64 asm volatile("btl %1,%2 ; setb %0" \
65 : "=q" (__result) \
66 : "r" ((int)(fd)), \
67 "m" (*(__kernel_fd_set *)(fdsetp))); \
68 __result; \
69}))
70
71#undef __FD_ZERO
72#define __FD_ZERO(fdsetp) \
73do { \
74 int __d0, __d1; \
75 asm volatile("cld ; rep ; stosl" \
76 : "=m" (*(__kernel_fd_set *)(fdsetp)), \
77 "=&c" (__d0), "=&D" (__d1) \
78 : "a" (0), "1" (__FDSET_LONGS), \
79 "2" ((__kernel_fd_set *)(fdsetp)) \
80 : "memory"); \
81} while (0)
82
83#endif /* defined(__KERNEL__) */
84 27
85#endif /* _ASM_X86_POSIX_TYPES_32_H */ 28#endif /* _ASM_X86_POSIX_TYPES_32_H */
diff --git a/arch/x86/include/asm/posix_types_64.h b/arch/x86/include/asm/posix_types_64.h
index eb8d2d92b63e..cba0c1ead162 100644
--- a/arch/x86/include/asm/posix_types_64.h
+++ b/arch/x86/include/asm/posix_types_64.h
@@ -7,113 +7,13 @@
7 * assume GCC is being used. 7 * assume GCC is being used.
8 */ 8 */
9 9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned int __kernel_mode_t;
12typedef unsigned long __kernel_nlink_t;
13typedef long __kernel_off_t;
14typedef int __kernel_pid_t;
15typedef int __kernel_ipc_pid_t;
16typedef unsigned int __kernel_uid_t;
17typedef unsigned int __kernel_gid_t;
18typedef unsigned long __kernel_size_t;
19typedef long __kernel_ssize_t;
20typedef long __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30
31#ifdef __GNUC__
32typedef long long __kernel_loff_t;
33#endif
34
35typedef struct {
36 int val[2];
37} __kernel_fsid_t;
38
39typedef unsigned short __kernel_old_uid_t; 10typedef unsigned short __kernel_old_uid_t;
40typedef unsigned short __kernel_old_gid_t; 11typedef unsigned short __kernel_old_gid_t;
41typedef __kernel_uid_t __kernel_uid32_t; 12#define __kernel_old_uid_t __kernel_old_uid_t
42typedef __kernel_gid_t __kernel_gid32_t;
43 13
44typedef unsigned long __kernel_old_dev_t; 14typedef unsigned long __kernel_old_dev_t;
15#define __kernel_old_dev_t __kernel_old_dev_t
45 16
46#ifdef __KERNEL__ 17#include <asm-generic/posix_types.h>
47
48#undef __FD_SET
49static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
50{
51 unsigned long _tmp = fd / __NFDBITS;
52 unsigned long _rem = fd % __NFDBITS;
53 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
54}
55
56#undef __FD_CLR
57static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
58{
59 unsigned long _tmp = fd / __NFDBITS;
60 unsigned long _rem = fd % __NFDBITS;
61 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
62}
63
64#undef __FD_ISSET
65static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
66{
67 unsigned long _tmp = fd / __NFDBITS;
68 unsigned long _rem = fd % __NFDBITS;
69 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
70}
71
72/*
73 * This will unroll the loop for the normal constant cases (8 or 32 longs,
74 * for 256 and 1024-bit fd_sets respectively)
75 */
76#undef __FD_ZERO
77static inline void __FD_ZERO(__kernel_fd_set *p)
78{
79 unsigned long *tmp = p->fds_bits;
80 int i;
81
82 if (__builtin_constant_p(__FDSET_LONGS)) {
83 switch (__FDSET_LONGS) {
84 case 32:
85 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
86 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
87 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
88 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
89 tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
90 tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
91 tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
92 tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
93 return;
94 case 16:
95 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
96 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
97 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
98 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
99 return;
100 case 8:
101 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
102 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
103 return;
104 case 4:
105 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
106 return;
107 }
108 }
109 i = __FDSET_LONGS;
110 while (i) {
111 i--;
112 *tmp = 0;
113 tmp++;
114 }
115}
116
117#endif /* defined(__KERNEL__) */
118 18
119#endif /* _ASM_X86_POSIX_TYPES_64_H */ 19#endif /* _ASM_X86_POSIX_TYPES_64_H */
diff --git a/arch/x86/include/asm/posix_types_x32.h b/arch/x86/include/asm/posix_types_x32.h
new file mode 100644
index 000000000000..85f9bdafa93c
--- /dev/null
+++ b/arch/x86/include/asm/posix_types_x32.h
@@ -0,0 +1,19 @@
1#ifndef _ASM_X86_POSIX_TYPES_X32_H
2#define _ASM_X86_POSIX_TYPES_X32_H
3
4/*
5 * This file is only used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 *
9 * These types should generally match the ones used by the 64-bit kernel,
10 *
11 */
12
13typedef long long __kernel_long_t;
14typedef unsigned long long __kernel_ulong_t;
15#define __kernel_long_t __kernel_long_t
16
17#include <asm/posix_types_64.h>
18
19#endif /* _ASM_X86_POSIX_TYPES_X32_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 95da14f7ee85..7284c9a6a0b5 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -14,13 +14,13 @@ struct mm_struct;
14#include <asm/sigcontext.h> 14#include <asm/sigcontext.h>
15#include <asm/current.h> 15#include <asm/current.h>
16#include <asm/cpufeature.h> 16#include <asm/cpufeature.h>
17#include <asm/system.h>
18#include <asm/page.h> 17#include <asm/page.h>
19#include <asm/pgtable_types.h> 18#include <asm/pgtable_types.h>
20#include <asm/percpu.h> 19#include <asm/percpu.h>
21#include <asm/msr.h> 20#include <asm/msr.h>
22#include <asm/desc_defs.h> 21#include <asm/desc_defs.h>
23#include <asm/nops.h> 22#include <asm/nops.h>
23#include <asm/special_insns.h>
24 24
25#include <linux/personality.h> 25#include <linux/personality.h>
26#include <linux/cpumask.h> 26#include <linux/cpumask.h>
@@ -29,6 +29,15 @@ struct mm_struct;
29#include <linux/math64.h> 29#include <linux/math64.h>
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/err.h> 31#include <linux/err.h>
32#include <linux/irqflags.h>
33
34/*
35 * We handle most unaligned accesses in hardware. On the other hand
36 * unaligned DMA can be quite expensive on some Nehalem processors.
37 *
38 * Based on this we disable the IP header alignment in network drivers.
39 */
40#define NET_IP_ALIGN 0
32 41
33#define HBP_NUM 4 42#define HBP_NUM 4
34/* 43/*
@@ -454,7 +463,7 @@ struct thread_struct {
454 unsigned long ptrace_dr7; 463 unsigned long ptrace_dr7;
455 /* Fault info: */ 464 /* Fault info: */
456 unsigned long cr2; 465 unsigned long cr2;
457 unsigned long trap_no; 466 unsigned long trap_nr;
458 unsigned long error_code; 467 unsigned long error_code;
459 /* floating point and extended processor state */ 468 /* floating point and extended processor state */
460 struct fpu fpu; 469 struct fpu fpu;
@@ -475,61 +484,6 @@ struct thread_struct {
475 unsigned io_bitmap_max; 484 unsigned io_bitmap_max;
476}; 485};
477 486
478static inline unsigned long native_get_debugreg(int regno)
479{
480 unsigned long val = 0; /* Damn you, gcc! */
481
482 switch (regno) {
483 case 0:
484 asm("mov %%db0, %0" :"=r" (val));
485 break;
486 case 1:
487 asm("mov %%db1, %0" :"=r" (val));
488 break;
489 case 2:
490 asm("mov %%db2, %0" :"=r" (val));
491 break;
492 case 3:
493 asm("mov %%db3, %0" :"=r" (val));
494 break;
495 case 6:
496 asm("mov %%db6, %0" :"=r" (val));
497 break;
498 case 7:
499 asm("mov %%db7, %0" :"=r" (val));
500 break;
501 default:
502 BUG();
503 }
504 return val;
505}
506
507static inline void native_set_debugreg(int regno, unsigned long value)
508{
509 switch (regno) {
510 case 0:
511 asm("mov %0, %%db0" ::"r" (value));
512 break;
513 case 1:
514 asm("mov %0, %%db1" ::"r" (value));
515 break;
516 case 2:
517 asm("mov %0, %%db2" ::"r" (value));
518 break;
519 case 3:
520 asm("mov %0, %%db3" ::"r" (value));
521 break;
522 case 6:
523 asm("mov %0, %%db6" ::"r" (value));
524 break;
525 case 7:
526 asm("mov %0, %%db7" ::"r" (value));
527 break;
528 default:
529 BUG();
530 }
531}
532
533/* 487/*
534 * Set IOPL bits in EFLAGS from given mask 488 * Set IOPL bits in EFLAGS from given mask
535 */ 489 */
@@ -575,14 +529,6 @@ static inline void native_swapgs(void)
575#define __cpuid native_cpuid 529#define __cpuid native_cpuid
576#define paravirt_enabled() 0 530#define paravirt_enabled() 0
577 531
578/*
579 * These special macros can be used to get or set a debugging register
580 */
581#define get_debugreg(var, register) \
582 (var) = native_get_debugreg(register)
583#define set_debugreg(value, register) \
584 native_set_debugreg(register, value)
585
586static inline void load_sp0(struct tss_struct *tss, 532static inline void load_sp0(struct tss_struct *tss,
587 struct thread_struct *thread) 533 struct thread_struct *thread)
588{ 534{
@@ -927,9 +873,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
927#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ 873#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
928 0xc0000000 : 0xFFFFe000) 874 0xc0000000 : 0xFFFFe000)
929 875
930#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ 876#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
931 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 877 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
932#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ 878#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
933 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 879 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
934 880
935#define STACK_TOP TASK_SIZE 881#define STACK_TOP TASK_SIZE
@@ -951,6 +897,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
951 897
952#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) 898#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
953extern unsigned long KSTK_ESP(struct task_struct *task); 899extern unsigned long KSTK_ESP(struct task_struct *task);
900
901/*
902 * User space RSP while inside the SYSCALL fast path
903 */
904DECLARE_PER_CPU(unsigned long, old_rsp);
905
954#endif /* CONFIG_X86_64 */ 906#endif /* CONFIG_X86_64 */
955 907
956extern void start_thread(struct pt_regs *regs, unsigned long new_ip, 908extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
@@ -1022,4 +974,24 @@ extern bool cpu_has_amd_erratum(const int *);
1022#define cpu_has_amd_erratum(x) (false) 974#define cpu_has_amd_erratum(x) (false)
1023#endif /* CONFIG_CPU_SUP_AMD */ 975#endif /* CONFIG_CPU_SUP_AMD */
1024 976
977#ifdef CONFIG_X86_32
978/*
979 * disable hlt during certain critical i/o operations
980 */
981#define HAVE_DISABLE_HLT
982#endif
983
984void disable_hlt(void);
985void enable_hlt(void);
986
987void cpu_idle_wait(void);
988
989extern unsigned long arch_align_stack(unsigned long sp);
990extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
991
992void default_idle(void);
993bool set_pm_idle_to_default(void);
994
995void stop_this_cpu(void *dummy);
996
1025#endif /* _ASM_X86_PROCESSOR_H */ 997#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 35664547125b..dcfde52979c3 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -145,7 +145,6 @@ extern unsigned long
145convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); 145convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
146extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 146extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
147 int error_code, int si_code); 147 int error_code, int si_code);
148void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
149 148
150extern long syscall_trace_enter(struct pt_regs *); 149extern long syscall_trace_enter(struct pt_regs *);
151extern void syscall_trace_leave(struct pt_regs *); 150extern void syscall_trace_leave(struct pt_regs *);
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 5e641715c3fe..165466233ab0 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -212,7 +212,61 @@
212#ifdef __KERNEL__ 212#ifdef __KERNEL__
213#ifndef __ASSEMBLY__ 213#ifndef __ASSEMBLY__
214extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10]; 214extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10];
215#endif 215
216#endif 216/*
217 * Load a segment. Fall back on loading the zero
218 * segment if something goes wrong..
219 */
220#define loadsegment(seg, value) \
221do { \
222 unsigned short __val = (value); \
223 \
224 asm volatile(" \n" \
225 "1: movl %k0,%%" #seg " \n" \
226 \
227 ".section .fixup,\"ax\" \n" \
228 "2: xorl %k0,%k0 \n" \
229 " jmp 1b \n" \
230 ".previous \n" \
231 \
232 _ASM_EXTABLE(1b, 2b) \
233 \
234 : "+r" (__val) : : "memory"); \
235} while (0)
236
237/*
238 * Save a segment register away
239 */
240#define savesegment(seg, value) \
241 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
242
243/*
244 * x86_32 user gs accessors.
245 */
246#ifdef CONFIG_X86_32
247#ifdef CONFIG_X86_32_LAZY_GS
248#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
249#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
250#define task_user_gs(tsk) ((tsk)->thread.gs)
251#define lazy_save_gs(v) savesegment(gs, (v))
252#define lazy_load_gs(v) loadsegment(gs, (v))
253#else /* X86_32_LAZY_GS */
254#define get_user_gs(regs) (u16)((regs)->gs)
255#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
256#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
257#define lazy_save_gs(v) do { } while (0)
258#define lazy_load_gs(v) do { } while (0)
259#endif /* X86_32_LAZY_GS */
260#endif /* X86_32 */
261
262static inline unsigned long get_limit(unsigned long segment)
263{
264 unsigned long __limit;
265 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
266 return __limit + 1;
267}
268
269#endif /* !__ASSEMBLY__ */
270#endif /* __KERNEL__ */
217 271
218#endif /* _ASM_X86_SEGMENT_H */ 272#endif /* _ASM_X86_SEGMENT_H */
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 04459d25e66e..4a085383af27 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -230,34 +230,37 @@ struct sigcontext {
230 * User-space might still rely on the old definition: 230 * User-space might still rely on the old definition:
231 */ 231 */
232struct sigcontext { 232struct sigcontext {
233 unsigned long r8; 233 __u64 r8;
234 unsigned long r9; 234 __u64 r9;
235 unsigned long r10; 235 __u64 r10;
236 unsigned long r11; 236 __u64 r11;
237 unsigned long r12; 237 __u64 r12;
238 unsigned long r13; 238 __u64 r13;
239 unsigned long r14; 239 __u64 r14;
240 unsigned long r15; 240 __u64 r15;
241 unsigned long rdi; 241 __u64 rdi;
242 unsigned long rsi; 242 __u64 rsi;
243 unsigned long rbp; 243 __u64 rbp;
244 unsigned long rbx; 244 __u64 rbx;
245 unsigned long rdx; 245 __u64 rdx;
246 unsigned long rax; 246 __u64 rax;
247 unsigned long rcx; 247 __u64 rcx;
248 unsigned long rsp; 248 __u64 rsp;
249 unsigned long rip; 249 __u64 rip;
250 unsigned long eflags; /* RFLAGS */ 250 __u64 eflags; /* RFLAGS */
251 unsigned short cs; 251 __u16 cs;
252 unsigned short gs; 252 __u16 gs;
253 unsigned short fs; 253 __u16 fs;
254 unsigned short __pad0; 254 __u16 __pad0;
255 unsigned long err; 255 __u64 err;
256 unsigned long trapno; 256 __u64 trapno;
257 unsigned long oldmask; 257 __u64 oldmask;
258 unsigned long cr2; 258 __u64 cr2;
259 struct _fpstate __user *fpstate; /* zero when no FPU context */ 259 struct _fpstate __user *fpstate; /* zero when no FPU context */
260 unsigned long reserved1[8]; 260#ifndef __LP64__
261 __u32 __fpstate_pad;
262#endif
263 __u64 reserved1[8];
261}; 264};
262#endif /* !__KERNEL__ */ 265#endif /* !__KERNEL__ */
263 266
diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h
index 4e0fe26d27d3..7c7c27c97daa 100644
--- a/arch/x86/include/asm/sigframe.h
+++ b/arch/x86/include/asm/sigframe.h
@@ -59,12 +59,25 @@ struct rt_sigframe_ia32 {
59#endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */ 59#endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */
60 60
61#ifdef CONFIG_X86_64 61#ifdef CONFIG_X86_64
62
62struct rt_sigframe { 63struct rt_sigframe {
63 char __user *pretcode; 64 char __user *pretcode;
64 struct ucontext uc; 65 struct ucontext uc;
65 struct siginfo info; 66 struct siginfo info;
66 /* fp state follows here */ 67 /* fp state follows here */
67}; 68};
69
70#ifdef CONFIG_X86_X32_ABI
71
72struct rt_sigframe_x32 {
73 u64 pretcode;
74 struct ucontext_x32 uc;
75 compat_siginfo_t info;
76 /* fp state follows here */
77};
78
79#endif /* CONFIG_X86_X32_ABI */
80
68#endif /* CONFIG_X86_64 */ 81#endif /* CONFIG_X86_64 */
69 82
70#endif /* _ASM_X86_SIGFRAME_H */ 83#endif /* _ASM_X86_SIGFRAME_H */
diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h
new file mode 100644
index 000000000000..ada93b3b8c66
--- /dev/null
+++ b/arch/x86/include/asm/sighandling.h
@@ -0,0 +1,24 @@
1#ifndef _ASM_X86_SIGHANDLING_H
2#define _ASM_X86_SIGHANDLING_H
3
4#include <linux/compiler.h>
5#include <linux/ptrace.h>
6#include <linux/signal.h>
7
8#include <asm/processor-flags.h>
9
10#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
11
12#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
13 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
14 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
15 X86_EFLAGS_CF)
16
17void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
18
19int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
20 unsigned long *pax);
21int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
22 struct pt_regs *regs, unsigned long mask);
23
24#endif /* _ASM_X86_SIGHANDLING_H */
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
new file mode 100644
index 000000000000..41fc93a2e225
--- /dev/null
+++ b/arch/x86/include/asm/special_insns.h
@@ -0,0 +1,199 @@
1#ifndef _ASM_X86_SPECIAL_INSNS_H
2#define _ASM_X86_SPECIAL_INSNS_H
3
4
5#ifdef __KERNEL__
6
7static inline void native_clts(void)
8{
9 asm volatile("clts");
10}
11
12/*
13 * Volatile isn't enough to prevent the compiler from reordering the
14 * read/write functions for the control registers and messing everything up.
15 * A memory clobber would solve the problem, but would prevent reordering of
16 * all loads stores around it, which can hurt performance. Solution is to
17 * use a variable and mimic reads and writes to it to enforce serialization
18 */
19static unsigned long __force_order;
20
21static inline unsigned long native_read_cr0(void)
22{
23 unsigned long val;
24 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
25 return val;
26}
27
28static inline void native_write_cr0(unsigned long val)
29{
30 asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
31}
32
33static inline unsigned long native_read_cr2(void)
34{
35 unsigned long val;
36 asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
37 return val;
38}
39
40static inline void native_write_cr2(unsigned long val)
41{
42 asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
43}
44
45static inline unsigned long native_read_cr3(void)
46{
47 unsigned long val;
48 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
49 return val;
50}
51
52static inline void native_write_cr3(unsigned long val)
53{
54 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
55}
56
57static inline unsigned long native_read_cr4(void)
58{
59 unsigned long val;
60 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
61 return val;
62}
63
64static inline unsigned long native_read_cr4_safe(void)
65{
66 unsigned long val;
67 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
68 * exists, so it will never fail. */
69#ifdef CONFIG_X86_32
70 asm volatile("1: mov %%cr4, %0\n"
71 "2:\n"
72 _ASM_EXTABLE(1b, 2b)
73 : "=r" (val), "=m" (__force_order) : "0" (0));
74#else
75 val = native_read_cr4();
76#endif
77 return val;
78}
79
80static inline void native_write_cr4(unsigned long val)
81{
82 asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
83}
84
85#ifdef CONFIG_X86_64
86static inline unsigned long native_read_cr8(void)
87{
88 unsigned long cr8;
89 asm volatile("movq %%cr8,%0" : "=r" (cr8));
90 return cr8;
91}
92
93static inline void native_write_cr8(unsigned long val)
94{
95 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
96}
97#endif
98
99static inline void native_wbinvd(void)
100{
101 asm volatile("wbinvd": : :"memory");
102}
103
104extern void native_load_gs_index(unsigned);
105
106#ifdef CONFIG_PARAVIRT
107#include <asm/paravirt.h>
108#else
109
110static inline unsigned long read_cr0(void)
111{
112 return native_read_cr0();
113}
114
115static inline void write_cr0(unsigned long x)
116{
117 native_write_cr0(x);
118}
119
120static inline unsigned long read_cr2(void)
121{
122 return native_read_cr2();
123}
124
125static inline void write_cr2(unsigned long x)
126{
127 native_write_cr2(x);
128}
129
130static inline unsigned long read_cr3(void)
131{
132 return native_read_cr3();
133}
134
135static inline void write_cr3(unsigned long x)
136{
137 native_write_cr3(x);
138}
139
140static inline unsigned long read_cr4(void)
141{
142 return native_read_cr4();
143}
144
145static inline unsigned long read_cr4_safe(void)
146{
147 return native_read_cr4_safe();
148}
149
150static inline void write_cr4(unsigned long x)
151{
152 native_write_cr4(x);
153}
154
155static inline void wbinvd(void)
156{
157 native_wbinvd();
158}
159
160#ifdef CONFIG_X86_64
161
162static inline unsigned long read_cr8(void)
163{
164 return native_read_cr8();
165}
166
167static inline void write_cr8(unsigned long x)
168{
169 native_write_cr8(x);
170}
171
172static inline void load_gs_index(unsigned selector)
173{
174 native_load_gs_index(selector);
175}
176
177#endif
178
179/* Clear the 'TS' bit */
180static inline void clts(void)
181{
182 native_clts();
183}
184
185#endif/* CONFIG_PARAVIRT */
186
187#define stts() write_cr0(read_cr0() | X86_CR0_TS)
188
189static inline void clflush(volatile void *__p)
190{
191 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
192}
193
194#define nop() asm volatile ("nop")
195
196
197#endif /* __KERNEL__ */
198
199#endif /* _ASM_X86_SPECIAL_INSNS_H */
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 157517763565..b5d9533d2c38 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -38,7 +38,6 @@
38#include <asm/tsc.h> 38#include <asm/tsc.h>
39#include <asm/processor.h> 39#include <asm/processor.h>
40#include <asm/percpu.h> 40#include <asm/percpu.h>
41#include <asm/system.h>
42#include <asm/desc.h> 41#include <asm/desc.h>
43#include <linux/random.h> 42#include <linux/random.h>
44 43
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
new file mode 100644
index 000000000000..4ec45b3abba1
--- /dev/null
+++ b/arch/x86/include/asm/switch_to.h
@@ -0,0 +1,129 @@
1#ifndef _ASM_X86_SWITCH_TO_H
2#define _ASM_X86_SWITCH_TO_H
3
4struct task_struct; /* one of the stranger aspects of C forward declarations */
5struct task_struct *__switch_to(struct task_struct *prev,
6 struct task_struct *next);
7struct tss_struct;
8void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
9 struct tss_struct *tss);
10
11#ifdef CONFIG_X86_32
12
13#ifdef CONFIG_CC_STACKPROTECTOR
14#define __switch_canary \
15 "movl %P[task_canary](%[next]), %%ebx\n\t" \
16 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
17#define __switch_canary_oparam \
18 , [stack_canary] "=m" (stack_canary.canary)
19#define __switch_canary_iparam \
20 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
21#else /* CC_STACKPROTECTOR */
22#define __switch_canary
23#define __switch_canary_oparam
24#define __switch_canary_iparam
25#endif /* CC_STACKPROTECTOR */
26
27/*
28 * Saving eflags is important. It switches not only IOPL between tasks,
29 * it also protects other tasks from NT leaking through sysenter etc.
30 */
31#define switch_to(prev, next, last) \
32do { \
33 /* \
34 * Context-switching clobbers all registers, so we clobber \
35 * them explicitly, via unused output variables. \
36 * (EAX and EBP is not listed because EBP is saved/restored \
37 * explicitly for wchan access and EAX is the return value of \
38 * __switch_to()) \
39 */ \
40 unsigned long ebx, ecx, edx, esi, edi; \
41 \
42 asm volatile("pushfl\n\t" /* save flags */ \
43 "pushl %%ebp\n\t" /* save EBP */ \
44 "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
45 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
46 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
47 "pushl %[next_ip]\n\t" /* restore EIP */ \
48 __switch_canary \
49 "jmp __switch_to\n" /* regparm call */ \
50 "1:\t" \
51 "popl %%ebp\n\t" /* restore EBP */ \
52 "popfl\n" /* restore flags */ \
53 \
54 /* output parameters */ \
55 : [prev_sp] "=m" (prev->thread.sp), \
56 [prev_ip] "=m" (prev->thread.ip), \
57 "=a" (last), \
58 \
59 /* clobbered output registers: */ \
60 "=b" (ebx), "=c" (ecx), "=d" (edx), \
61 "=S" (esi), "=D" (edi) \
62 \
63 __switch_canary_oparam \
64 \
65 /* input parameters: */ \
66 : [next_sp] "m" (next->thread.sp), \
67 [next_ip] "m" (next->thread.ip), \
68 \
69 /* regparm parameters for __switch_to(): */ \
70 [prev] "a" (prev), \
71 [next] "d" (next) \
72 \
73 __switch_canary_iparam \
74 \
75 : /* reloaded segment registers */ \
76 "memory"); \
77} while (0)
78
79#else /* CONFIG_X86_32 */
80
81/* frame pointer must be last for get_wchan */
82#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
83#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
84
85#define __EXTRA_CLOBBER \
86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
87 "r12", "r13", "r14", "r15"
88
89#ifdef CONFIG_CC_STACKPROTECTOR
90#define __switch_canary \
91 "movq %P[task_canary](%%rsi),%%r8\n\t" \
92 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
93#define __switch_canary_oparam \
94 , [gs_canary] "=m" (irq_stack_union.stack_canary)
95#define __switch_canary_iparam \
96 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
97#else /* CC_STACKPROTECTOR */
98#define __switch_canary
99#define __switch_canary_oparam
100#define __switch_canary_iparam
101#endif /* CC_STACKPROTECTOR */
102
103/* Save restore flags to clear handle leaking NT */
104#define switch_to(prev, next, last) \
105 asm volatile(SAVE_CONTEXT \
106 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
107 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
108 "call __switch_to\n\t" \
109 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
110 __switch_canary \
111 "movq %P[thread_info](%%rsi),%%r8\n\t" \
112 "movq %%rax,%%rdi\n\t" \
113 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
114 "jnz ret_from_fork\n\t" \
115 RESTORE_CONTEXT \
116 : "=a" (last) \
117 __switch_canary_oparam \
118 : [next] "S" (next), [prev] "D" (prev), \
119 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
120 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
121 [_tif_fork] "i" (_TIF_FORK), \
122 [thread_info] "i" (offsetof(struct task_struct, stack)), \
123 [current_task] "m" (current_task) \
124 __switch_canary_iparam \
125 : "memory", "cc" __EXTRA_CLOBBER)
126
127#endif /* CONFIG_X86_32 */
128
129#endif /* _ASM_X86_SWITCH_TO_H */
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index cb238526a9f1..3fda9db48819 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -10,6 +10,8 @@
10#ifndef _ASM_X86_SYS_IA32_H 10#ifndef _ASM_X86_SYS_IA32_H
11#define _ASM_X86_SYS_IA32_H 11#define _ASM_X86_SYS_IA32_H
12 12
13#ifdef CONFIG_COMPAT
14
13#include <linux/compiler.h> 15#include <linux/compiler.h>
14#include <linux/linkage.h> 16#include <linux/linkage.h>
15#include <linux/types.h> 17#include <linux/types.h>
@@ -36,8 +38,6 @@ asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *,
36 struct sigaction32 __user *, unsigned int); 38 struct sigaction32 __user *, unsigned int);
37asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *, 39asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
38 struct old_sigaction32 __user *); 40 struct old_sigaction32 __user *);
39asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
40 compat_sigset_t __user *, unsigned int);
41asmlinkage long sys32_alarm(unsigned int); 41asmlinkage long sys32_alarm(unsigned int);
42 42
43asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); 43asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
@@ -83,4 +83,7 @@ asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32);
83 83
84asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int, 84asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
85 const char __user *); 85 const char __user *);
86
87#endif /* CONFIG_COMPAT */
88
86#endif /* _ASM_X86_SYS_IA32_H */ 89#endif /* _ASM_X86_SYS_IA32_H */
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index d962e5652a73..386b78686c4d 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -16,6 +16,7 @@
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <asm/asm-offsets.h> /* For NR_syscalls */ 18#include <asm/asm-offsets.h> /* For NR_syscalls */
19#include <asm/unistd.h>
19 20
20extern const unsigned long sys_call_table[]; 21extern const unsigned long sys_call_table[];
21 22
@@ -26,13 +27,13 @@ extern const unsigned long sys_call_table[];
26 */ 27 */
27static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) 28static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
28{ 29{
29 return regs->orig_ax; 30 return regs->orig_ax & __SYSCALL_MASK;
30} 31}
31 32
32static inline void syscall_rollback(struct task_struct *task, 33static inline void syscall_rollback(struct task_struct *task,
33 struct pt_regs *regs) 34 struct pt_regs *regs)
34{ 35{
35 regs->ax = regs->orig_ax; 36 regs->ax = regs->orig_ax & __SYSCALL_MASK;
36} 37}
37 38
38static inline long syscall_get_error(struct task_struct *task, 39static inline long syscall_get_error(struct task_struct *task,
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
deleted file mode 100644
index 2d2f01ce6dcb..000000000000
--- a/arch/x86/include/asm/system.h
+++ /dev/null
@@ -1,523 +0,0 @@
1#ifndef _ASM_X86_SYSTEM_H
2#define _ASM_X86_SYSTEM_H
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9
10#include <linux/kernel.h>
11#include <linux/irqflags.h>
12
13/* entries in ARCH_DLINFO: */
14#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
15# define AT_VECTOR_SIZE_ARCH 2
16#else /* else it's non-compat x86-64 */
17# define AT_VECTOR_SIZE_ARCH 1
18#endif
19
20struct task_struct; /* one of the stranger aspects of C forward declarations */
21struct task_struct *__switch_to(struct task_struct *prev,
22 struct task_struct *next);
23struct tss_struct;
24void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
25 struct tss_struct *tss);
26extern void show_regs_common(void);
27
28#ifdef CONFIG_X86_32
29
30#ifdef CONFIG_CC_STACKPROTECTOR
31#define __switch_canary \
32 "movl %P[task_canary](%[next]), %%ebx\n\t" \
33 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
34#define __switch_canary_oparam \
35 , [stack_canary] "=m" (stack_canary.canary)
36#define __switch_canary_iparam \
37 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
38#else /* CC_STACKPROTECTOR */
39#define __switch_canary
40#define __switch_canary_oparam
41#define __switch_canary_iparam
42#endif /* CC_STACKPROTECTOR */
43
44/*
45 * Saving eflags is important. It switches not only IOPL between tasks,
46 * it also protects other tasks from NT leaking through sysenter etc.
47 */
48#define switch_to(prev, next, last) \
49do { \
50 /* \
51 * Context-switching clobbers all registers, so we clobber \
52 * them explicitly, via unused output variables. \
53 * (EAX and EBP is not listed because EBP is saved/restored \
54 * explicitly for wchan access and EAX is the return value of \
55 * __switch_to()) \
56 */ \
57 unsigned long ebx, ecx, edx, esi, edi; \
58 \
59 asm volatile("pushfl\n\t" /* save flags */ \
60 "pushl %%ebp\n\t" /* save EBP */ \
61 "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
62 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
63 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
64 "pushl %[next_ip]\n\t" /* restore EIP */ \
65 __switch_canary \
66 "jmp __switch_to\n" /* regparm call */ \
67 "1:\t" \
68 "popl %%ebp\n\t" /* restore EBP */ \
69 "popfl\n" /* restore flags */ \
70 \
71 /* output parameters */ \
72 : [prev_sp] "=m" (prev->thread.sp), \
73 [prev_ip] "=m" (prev->thread.ip), \
74 "=a" (last), \
75 \
76 /* clobbered output registers: */ \
77 "=b" (ebx), "=c" (ecx), "=d" (edx), \
78 "=S" (esi), "=D" (edi) \
79 \
80 __switch_canary_oparam \
81 \
82 /* input parameters: */ \
83 : [next_sp] "m" (next->thread.sp), \
84 [next_ip] "m" (next->thread.ip), \
85 \
86 /* regparm parameters for __switch_to(): */ \
87 [prev] "a" (prev), \
88 [next] "d" (next) \
89 \
90 __switch_canary_iparam \
91 \
92 : /* reloaded segment registers */ \
93 "memory"); \
94} while (0)
95
96/*
97 * disable hlt during certain critical i/o operations
98 */
99#define HAVE_DISABLE_HLT
100#else
101
102/* frame pointer must be last for get_wchan */
103#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
104#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
105
106#define __EXTRA_CLOBBER \
107 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
108 "r12", "r13", "r14", "r15"
109
110#ifdef CONFIG_CC_STACKPROTECTOR
111#define __switch_canary \
112 "movq %P[task_canary](%%rsi),%%r8\n\t" \
113 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
114#define __switch_canary_oparam \
115 , [gs_canary] "=m" (irq_stack_union.stack_canary)
116#define __switch_canary_iparam \
117 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
118#else /* CC_STACKPROTECTOR */
119#define __switch_canary
120#define __switch_canary_oparam
121#define __switch_canary_iparam
122#endif /* CC_STACKPROTECTOR */
123
124/* Save restore flags to clear handle leaking NT */
125#define switch_to(prev, next, last) \
126 asm volatile(SAVE_CONTEXT \
127 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
128 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
129 "call __switch_to\n\t" \
130 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
131 __switch_canary \
132 "movq %P[thread_info](%%rsi),%%r8\n\t" \
133 "movq %%rax,%%rdi\n\t" \
134 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
135 "jnz ret_from_fork\n\t" \
136 RESTORE_CONTEXT \
137 : "=a" (last) \
138 __switch_canary_oparam \
139 : [next] "S" (next), [prev] "D" (prev), \
140 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
141 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
142 [_tif_fork] "i" (_TIF_FORK), \
143 [thread_info] "i" (offsetof(struct task_struct, stack)), \
144 [current_task] "m" (current_task) \
145 __switch_canary_iparam \
146 : "memory", "cc" __EXTRA_CLOBBER)
147#endif
148
149#ifdef __KERNEL__
150
151extern void native_load_gs_index(unsigned);
152
153/*
154 * Load a segment. Fall back on loading the zero
155 * segment if something goes wrong..
156 */
157#define loadsegment(seg, value) \
158do { \
159 unsigned short __val = (value); \
160 \
161 asm volatile(" \n" \
162 "1: movl %k0,%%" #seg " \n" \
163 \
164 ".section .fixup,\"ax\" \n" \
165 "2: xorl %k0,%k0 \n" \
166 " jmp 1b \n" \
167 ".previous \n" \
168 \
169 _ASM_EXTABLE(1b, 2b) \
170 \
171 : "+r" (__val) : : "memory"); \
172} while (0)
173
174/*
175 * Save a segment register away
176 */
177#define savesegment(seg, value) \
178 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
179
180/*
181 * x86_32 user gs accessors.
182 */
183#ifdef CONFIG_X86_32
184#ifdef CONFIG_X86_32_LAZY_GS
185#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
186#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
187#define task_user_gs(tsk) ((tsk)->thread.gs)
188#define lazy_save_gs(v) savesegment(gs, (v))
189#define lazy_load_gs(v) loadsegment(gs, (v))
190#else /* X86_32_LAZY_GS */
191#define get_user_gs(regs) (u16)((regs)->gs)
192#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
193#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
194#define lazy_save_gs(v) do { } while (0)
195#define lazy_load_gs(v) do { } while (0)
196#endif /* X86_32_LAZY_GS */
197#endif /* X86_32 */
198
199static inline unsigned long get_limit(unsigned long segment)
200{
201 unsigned long __limit;
202 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
203 return __limit + 1;
204}
205
206static inline void native_clts(void)
207{
208 asm volatile("clts");
209}
210
211/*
212 * Volatile isn't enough to prevent the compiler from reordering the
213 * read/write functions for the control registers and messing everything up.
214 * A memory clobber would solve the problem, but would prevent reordering of
215 * all loads stores around it, which can hurt performance. Solution is to
216 * use a variable and mimic reads and writes to it to enforce serialization
217 */
218static unsigned long __force_order;
219
220static inline unsigned long native_read_cr0(void)
221{
222 unsigned long val;
223 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
224 return val;
225}
226
227static inline void native_write_cr0(unsigned long val)
228{
229 asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
230}
231
232static inline unsigned long native_read_cr2(void)
233{
234 unsigned long val;
235 asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
236 return val;
237}
238
239static inline void native_write_cr2(unsigned long val)
240{
241 asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
242}
243
244static inline unsigned long native_read_cr3(void)
245{
246 unsigned long val;
247 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
248 return val;
249}
250
251static inline void native_write_cr3(unsigned long val)
252{
253 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
254}
255
256static inline unsigned long native_read_cr4(void)
257{
258 unsigned long val;
259 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
260 return val;
261}
262
263static inline unsigned long native_read_cr4_safe(void)
264{
265 unsigned long val;
266 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
267 * exists, so it will never fail. */
268#ifdef CONFIG_X86_32
269 asm volatile("1: mov %%cr4, %0\n"
270 "2:\n"
271 _ASM_EXTABLE(1b, 2b)
272 : "=r" (val), "=m" (__force_order) : "0" (0));
273#else
274 val = native_read_cr4();
275#endif
276 return val;
277}
278
279static inline void native_write_cr4(unsigned long val)
280{
281 asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
282}
283
284#ifdef CONFIG_X86_64
285static inline unsigned long native_read_cr8(void)
286{
287 unsigned long cr8;
288 asm volatile("movq %%cr8,%0" : "=r" (cr8));
289 return cr8;
290}
291
292static inline void native_write_cr8(unsigned long val)
293{
294 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
295}
296#endif
297
298static inline void native_wbinvd(void)
299{
300 asm volatile("wbinvd": : :"memory");
301}
302
303#ifdef CONFIG_PARAVIRT
304#include <asm/paravirt.h>
305#else
306
307static inline unsigned long read_cr0(void)
308{
309 return native_read_cr0();
310}
311
312static inline void write_cr0(unsigned long x)
313{
314 native_write_cr0(x);
315}
316
317static inline unsigned long read_cr2(void)
318{
319 return native_read_cr2();
320}
321
322static inline void write_cr2(unsigned long x)
323{
324 native_write_cr2(x);
325}
326
327static inline unsigned long read_cr3(void)
328{
329 return native_read_cr3();
330}
331
332static inline void write_cr3(unsigned long x)
333{
334 native_write_cr3(x);
335}
336
337static inline unsigned long read_cr4(void)
338{
339 return native_read_cr4();
340}
341
342static inline unsigned long read_cr4_safe(void)
343{
344 return native_read_cr4_safe();
345}
346
347static inline void write_cr4(unsigned long x)
348{
349 native_write_cr4(x);
350}
351
352static inline void wbinvd(void)
353{
354 native_wbinvd();
355}
356
357#ifdef CONFIG_X86_64
358
359static inline unsigned long read_cr8(void)
360{
361 return native_read_cr8();
362}
363
364static inline void write_cr8(unsigned long x)
365{
366 native_write_cr8(x);
367}
368
369static inline void load_gs_index(unsigned selector)
370{
371 native_load_gs_index(selector);
372}
373
374#endif
375
376/* Clear the 'TS' bit */
377static inline void clts(void)
378{
379 native_clts();
380}
381
382#endif/* CONFIG_PARAVIRT */
383
384#define stts() write_cr0(read_cr0() | X86_CR0_TS)
385
386#endif /* __KERNEL__ */
387
388static inline void clflush(volatile void *__p)
389{
390 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
391}
392
393#define nop() asm volatile ("nop")
394
395void disable_hlt(void);
396void enable_hlt(void);
397
398void cpu_idle_wait(void);
399
400extern unsigned long arch_align_stack(unsigned long sp);
401extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
402
403void default_idle(void);
404bool set_pm_idle_to_default(void);
405
406void stop_this_cpu(void *dummy);
407
408/*
409 * Force strict CPU ordering.
410 * And yes, this is required on UP too when we're talking
411 * to devices.
412 */
413#ifdef CONFIG_X86_32
414/*
415 * Some non-Intel clones support out of order store. wmb() ceases to be a
416 * nop for these.
417 */
418#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
419#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
420#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
421#else
422#define mb() asm volatile("mfence":::"memory")
423#define rmb() asm volatile("lfence":::"memory")
424#define wmb() asm volatile("sfence" ::: "memory")
425#endif
426
427/**
428 * read_barrier_depends - Flush all pending reads that subsequents reads
429 * depend on.
430 *
431 * No data-dependent reads from memory-like regions are ever reordered
432 * over this barrier. All reads preceding this primitive are guaranteed
433 * to access memory (but not necessarily other CPUs' caches) before any
434 * reads following this primitive that depend on the data return by
435 * any of the preceding reads. This primitive is much lighter weight than
436 * rmb() on most CPUs, and is never heavier weight than is
437 * rmb().
438 *
439 * These ordering constraints are respected by both the local CPU
440 * and the compiler.
441 *
442 * Ordering is not guaranteed by anything other than these primitives,
443 * not even by data dependencies. See the documentation for
444 * memory_barrier() for examples and URLs to more information.
445 *
446 * For example, the following code would force ordering (the initial
447 * value of "a" is zero, "b" is one, and "p" is "&a"):
448 *
449 * <programlisting>
450 * CPU 0 CPU 1
451 *
452 * b = 2;
453 * memory_barrier();
454 * p = &b; q = p;
455 * read_barrier_depends();
456 * d = *q;
457 * </programlisting>
458 *
459 * because the read of "*q" depends on the read of "p" and these
460 * two reads are separated by a read_barrier_depends(). However,
461 * the following code, with the same initial values for "a" and "b":
462 *
463 * <programlisting>
464 * CPU 0 CPU 1
465 *
466 * a = 2;
467 * memory_barrier();
468 * b = 3; y = b;
469 * read_barrier_depends();
470 * x = a;
471 * </programlisting>
472 *
473 * does not enforce ordering, since there is no data dependency between
474 * the read of "a" and the read of "b". Therefore, on some CPUs, such
475 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
476 * in cases like this where there are no data dependencies.
477 **/
478
479#define read_barrier_depends() do { } while (0)
480
481#ifdef CONFIG_SMP
482#define smp_mb() mb()
483#ifdef CONFIG_X86_PPRO_FENCE
484# define smp_rmb() rmb()
485#else
486# define smp_rmb() barrier()
487#endif
488#ifdef CONFIG_X86_OOSTORE
489# define smp_wmb() wmb()
490#else
491# define smp_wmb() barrier()
492#endif
493#define smp_read_barrier_depends() read_barrier_depends()
494#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
495#else
496#define smp_mb() barrier()
497#define smp_rmb() barrier()
498#define smp_wmb() barrier()
499#define smp_read_barrier_depends() do { } while (0)
500#define set_mb(var, value) do { var = value; barrier(); } while (0)
501#endif
502
503/*
504 * Stop RDTSC speculation. This is needed when you need to use RDTSC
505 * (or get_cycles or vread that possibly accesses the TSC) in a defined
506 * code region.
507 *
508 * (Could use an alternative three way for this if there was one.)
509 */
510static __always_inline void rdtsc_barrier(void)
511{
512 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
513 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
514}
515
516/*
517 * We handle most unaligned accesses in hardware. On the other hand
518 * unaligned DMA can be quite expensive on some Nehalem processors.
519 *
520 * Based on this we disable the IP header alignment in network drivers.
521 */
522#define NET_IP_ALIGN 0
523#endif /* _ASM_X86_SYSTEM_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index cfd8144d5527..ad6df8ccd715 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -86,7 +86,7 @@ struct thread_info {
86#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ 86#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
87#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ 87#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
88#define TIF_NOTSC 16 /* TSC is not accessible in userland */ 88#define TIF_NOTSC 16 /* TSC is not accessible in userland */
89#define TIF_IA32 17 /* 32bit process */ 89#define TIF_IA32 17 /* IA32 compatibility process */
90#define TIF_FORK 18 /* ret_from_fork */ 90#define TIF_FORK 18 /* ret_from_fork */
91#define TIF_MEMDIE 20 /* is terminating due to OOM killer */ 91#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
92#define TIF_DEBUG 21 /* uses debug registers */ 92#define TIF_DEBUG 21 /* uses debug registers */
@@ -95,6 +95,8 @@ struct thread_info {
95#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ 95#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
96#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ 96#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
97#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ 97#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
98#define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
99#define TIF_X32 30 /* 32-bit native x86-64 binary */
98 100
99#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 101#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
100#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 102#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -116,6 +118,8 @@ struct thread_info {
116#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) 118#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
117#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) 119#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
118#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) 120#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
121#define _TIF_ADDR32 (1 << TIF_ADDR32)
122#define _TIF_X32 (1 << TIF_X32)
119 123
120/* work to do in syscall_trace_enter() */ 124/* work to do in syscall_trace_enter() */
121#define _TIF_WORK_SYSCALL_ENTRY \ 125#define _TIF_WORK_SYSCALL_ENTRY \
@@ -262,6 +266,18 @@ static inline void set_restore_sigmask(void)
262 ti->status |= TS_RESTORE_SIGMASK; 266 ti->status |= TS_RESTORE_SIGMASK;
263 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); 267 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
264} 268}
269
270static inline bool is_ia32_task(void)
271{
272#ifdef CONFIG_X86_32
273 return true;
274#endif
275#ifdef CONFIG_IA32_EMULATION
276 if (current_thread_info()->status & TS_COMPAT)
277 return true;
278#endif
279 return false;
280}
265#endif /* !__ASSEMBLY__ */ 281#endif /* !__ASSEMBLY__ */
266 282
267#ifndef __ASSEMBLY__ 283#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 169be8938b96..c0e108e08079 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -5,7 +5,7 @@
5#include <linux/sched.h> 5#include <linux/sched.h>
6 6
7#include <asm/processor.h> 7#include <asm/processor.h>
8#include <asm/system.h> 8#include <asm/special_insns.h>
9 9
10#ifdef CONFIG_PARAVIRT 10#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h> 11#include <asm/paravirt.h>
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 0012d0902c5f..88eae2aec619 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -89,4 +89,29 @@ asmlinkage void smp_thermal_interrupt(void);
89asmlinkage void mce_threshold_interrupt(void); 89asmlinkage void mce_threshold_interrupt(void);
90#endif 90#endif
91 91
92/* Interrupts/Exceptions */
93enum {
94 X86_TRAP_DE = 0, /* 0, Divide-by-zero */
95 X86_TRAP_DB, /* 1, Debug */
96 X86_TRAP_NMI, /* 2, Non-maskable Interrupt */
97 X86_TRAP_BP, /* 3, Breakpoint */
98 X86_TRAP_OF, /* 4, Overflow */
99 X86_TRAP_BR, /* 5, Bound Range Exceeded */
100 X86_TRAP_UD, /* 6, Invalid Opcode */
101 X86_TRAP_NM, /* 7, Device Not Available */
102 X86_TRAP_DF, /* 8, Double Fault */
103 X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */
104 X86_TRAP_TS, /* 10, Invalid TSS */
105 X86_TRAP_NP, /* 11, Segment Not Present */
106 X86_TRAP_SS, /* 12, Stack Segment Fault */
107 X86_TRAP_GP, /* 13, General Protection Fault */
108 X86_TRAP_PF, /* 14, Page Fault */
109 X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */
110 X86_TRAP_MF, /* 16, x87 Floating-Point Exception */
111 X86_TRAP_AC, /* 17, Alignment Check */
112 X86_TRAP_MC, /* 18, Machine Check */
113 X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */
114 X86_TRAP_IRET = 32, /* 32, IRET Exception */
115};
116
92#endif /* _ASM_X86_TRAPS_H */ 117#endif /* _ASM_X86_TRAPS_H */
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 15d99153a96d..c91e8b9d588b 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -61,7 +61,7 @@ extern void check_tsc_sync_source(int cpu);
61extern void check_tsc_sync_target(void); 61extern void check_tsc_sync_target(void);
62 62
63extern int notsc_setup(char *); 63extern int notsc_setup(char *);
64extern void save_sched_clock_state(void); 64extern void tsc_save_sched_clock_state(void);
65extern void restore_sched_clock_state(void); 65extern void tsc_restore_sched_clock_state(void);
66 66
67#endif /* _ASM_X86_TSC_H */ 67#endif /* _ASM_X86_TSC_H */
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 21f77b89e47a..37cdc9d99bb1 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -1,7 +1,17 @@
1#ifndef _ASM_X86_UNISTD_H 1#ifndef _ASM_X86_UNISTD_H
2#define _ASM_X86_UNISTD_H 1 2#define _ASM_X86_UNISTD_H 1
3 3
4/* x32 syscall flag bit */
5#define __X32_SYSCALL_BIT 0x40000000
6
4#ifdef __KERNEL__ 7#ifdef __KERNEL__
8
9# ifdef CONFIG_X86_X32_ABI
10# define __SYSCALL_MASK (~(__X32_SYSCALL_BIT))
11# else
12# define __SYSCALL_MASK (~0)
13# endif
14
5# ifdef CONFIG_X86_32 15# ifdef CONFIG_X86_32
6 16
7# include <asm/unistd_32.h> 17# include <asm/unistd_32.h>
@@ -14,6 +24,7 @@
14# else 24# else
15 25
16# include <asm/unistd_64.h> 26# include <asm/unistd_64.h>
27# include <asm/unistd_64_x32.h>
17# define __ARCH_WANT_COMPAT_SYS_TIME 28# define __ARCH_WANT_COMPAT_SYS_TIME
18 29
19# endif 30# endif
@@ -52,8 +63,10 @@
52#else 63#else
53# ifdef __i386__ 64# ifdef __i386__
54# include <asm/unistd_32.h> 65# include <asm/unistd_32.h>
55# else 66# elif defined(__LP64__)
56# include <asm/unistd_64.h> 67# include <asm/unistd_64.h>
68# else
69# include <asm/unistd_x32.h>
57# endif 70# endif
58#endif 71#endif
59 72
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 815285bcaceb..8b38be2de9e1 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -5,13 +5,8 @@
5#include <linux/clocksource.h> 5#include <linux/clocksource.h>
6 6
7struct vsyscall_gtod_data { 7struct vsyscall_gtod_data {
8 seqlock_t lock; 8 seqcount_t seq;
9 9
10 /* open coded 'struct timespec' */
11 time_t wall_time_sec;
12 u32 wall_time_nsec;
13
14 struct timezone sys_tz;
15 struct { /* extract of a clocksource struct */ 10 struct { /* extract of a clocksource struct */
16 int vclock_mode; 11 int vclock_mode;
17 cycle_t cycle_last; 12 cycle_t cycle_last;
@@ -19,8 +14,16 @@ struct vsyscall_gtod_data {
19 u32 mult; 14 u32 mult;
20 u32 shift; 15 u32 shift;
21 } clock; 16 } clock;
22 struct timespec wall_to_monotonic; 17
18 /* open coded 'struct timespec' */
19 time_t wall_time_sec;
20 u32 wall_time_nsec;
21 u32 monotonic_time_nsec;
22 time_t monotonic_time_sec;
23
24 struct timezone sys_tz;
23 struct timespec wall_time_coarse; 25 struct timespec wall_time_coarse;
26 struct timespec monotonic_time_coarse;
24}; 27};
25extern struct vsyscall_gtod_data vsyscall_gtod_data; 28extern struct vsyscall_gtod_data vsyscall_gtod_data;
26 29
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
index e0f9aa16358b..5da71c27cc59 100644
--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -16,7 +16,6 @@
16#define _ASM_X86_VIRTEX_H 16#define _ASM_X86_VIRTEX_H
17 17
18#include <asm/processor.h> 18#include <asm/processor.h>
19#include <asm/system.h>
20 19
21#include <asm/vmx.h> 20#include <asm/vmx.h>
22#include <asm/svm.h> 21#include <asm/svm.h>
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 517d4767ffdd..baaca8defec8 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -145,9 +145,11 @@ struct x86_init_ops {
145/** 145/**
146 * struct x86_cpuinit_ops - platform specific cpu hotplug setups 146 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
147 * @setup_percpu_clockev: set up the per cpu clock event device 147 * @setup_percpu_clockev: set up the per cpu clock event device
148 * @early_percpu_clock_init: early init of the per cpu clock event device
148 */ 149 */
149struct x86_cpuinit_ops { 150struct x86_cpuinit_ops {
150 void (*setup_percpu_clockev)(void); 151 void (*setup_percpu_clockev)(void);
152 void (*early_percpu_clock_init)(void);
151 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node); 153 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
152}; 154};
153 155
@@ -160,6 +162,8 @@ struct x86_cpuinit_ops {
160 * @is_untracked_pat_range exclude from PAT logic 162 * @is_untracked_pat_range exclude from PAT logic
161 * @nmi_init enable NMI on cpus 163 * @nmi_init enable NMI on cpus
162 * @i8042_detect pre-detect if i8042 controller exists 164 * @i8042_detect pre-detect if i8042 controller exists
165 * @save_sched_clock_state: save state for sched_clock() on suspend
166 * @restore_sched_clock_state: restore state for sched_clock() on resume
163 */ 167 */
164struct x86_platform_ops { 168struct x86_platform_ops {
165 unsigned long (*calibrate_tsc)(void); 169 unsigned long (*calibrate_tsc)(void);
@@ -171,6 +175,8 @@ struct x86_platform_ops {
171 void (*nmi_init)(void); 175 void (*nmi_init)(void);
172 unsigned char (*get_nmi_reason)(void); 176 unsigned char (*get_nmi_reason)(void);
173 int (*i8042_detect)(void); 177 int (*i8042_detect)(void);
178 void (*save_sched_clock_state)(void);
179 void (*restore_sched_clock_state)(void);
174}; 180};
175 181
176struct pci_dev; 182struct pci_dev;
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index a1f2db5f1170..cbf0c9d50b92 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -56,6 +56,7 @@ DEFINE_GUEST_HANDLE(int);
56DEFINE_GUEST_HANDLE(long); 56DEFINE_GUEST_HANDLE(long);
57DEFINE_GUEST_HANDLE(void); 57DEFINE_GUEST_HANDLE(void);
58DEFINE_GUEST_HANDLE(uint64_t); 58DEFINE_GUEST_HANDLE(uint64_t);
59DEFINE_GUEST_HANDLE(uint32_t);
59#endif 60#endif
60 61
61#ifndef HYPERVISOR_VIRT_START 62#ifndef HYPERVISOR_VIRT_START