aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/apic.h4
-rw-r--r--include/asm-x86/asm.h40
-rw-r--r--include/asm-x86/atomic_64.h6
-rw-r--r--include/asm-x86/bitops.h68
-rw-r--r--include/asm-x86/cpufeature.h4
-rw-r--r--include/asm-x86/current.h42
-rw-r--r--include/asm-x86/current_32.h17
-rw-r--r--include/asm-x86/current_64.h27
-rw-r--r--include/asm-x86/e820.h1
-rw-r--r--include/asm-x86/fixmap_32.h8
-rw-r--r--include/asm-x86/gart.h63
-rw-r--r--include/asm-x86/geode.h4
-rw-r--r--include/asm-x86/hardirq.h6
-rw-r--r--include/asm-x86/i387.h2
-rw-r--r--include/asm-x86/i8259.h2
-rw-r--r--include/asm-x86/io_apic.h13
-rw-r--r--include/asm-x86/kvm_host.h4
-rw-r--r--include/asm-x86/kvm_para.h33
-rw-r--r--include/asm-x86/mach-default/smpboot_hooks.h4
-rw-r--r--include/asm-x86/mmconfig.h12
-rw-r--r--include/asm-x86/msr-index.h4
-rw-r--r--include/asm-x86/msr.h2
-rw-r--r--include/asm-x86/nmi.h27
-rw-r--r--include/asm-x86/page.h11
-rw-r--r--include/asm-x86/page_32.h11
-rw-r--r--include/asm-x86/paravirt.h43
-rw-r--r--include/asm-x86/pat.h8
-rw-r--r--include/asm-x86/pci.h2
-rw-r--r--include/asm-x86/pci_32.h14
-rw-r--r--include/asm-x86/pgtable.h72
-rw-r--r--include/asm-x86/processor-flags.h6
-rw-r--r--include/asm-x86/processor.h6
-rw-r--r--include/asm-x86/ptrace.h8
-rw-r--r--include/asm-x86/pvclock-abi.h42
-rw-r--r--include/asm-x86/pvclock.h13
-rw-r--r--include/asm-x86/reboot.h2
-rw-r--r--include/asm-x86/required-features.h6
-rw-r--r--include/asm-x86/resume-trace.h2
-rw-r--r--include/asm-x86/seccomp_32.h1
-rw-r--r--include/asm-x86/seccomp_64.h1
-rw-r--r--include/asm-x86/string_32.h323
-rw-r--r--include/asm-x86/suspend_32.h5
-rw-r--r--include/asm-x86/system.h2
-rw-r--r--include/asm-x86/thread_info.h248
-rw-r--r--include/asm-x86/thread_info_32.h205
-rw-r--r--include/asm-x86/thread_info_64.h195
-rw-r--r--include/asm-x86/tlbflush.h13
-rw-r--r--include/asm-x86/unistd_64.h2
-rw-r--r--include/asm-x86/vm86.h11
-rw-r--r--include/asm-x86/xen/hypercall.h11
-rw-r--r--include/asm-x86/xen/page.h29
-rw-r--r--include/asm-x86/xor_32.h5
-rw-r--r--include/asm-x86/xor_64.h5
53 files changed, 908 insertions, 787 deletions
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index be9639a9a186..313bcaf4b6c3 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -36,14 +36,10 @@ extern void generic_apic_probe(void);
36#ifdef CONFIG_X86_LOCAL_APIC 36#ifdef CONFIG_X86_LOCAL_APIC
37 37
38extern int apic_verbosity; 38extern int apic_verbosity;
39extern int timer_over_8254;
40extern int local_apic_timer_c2_ok; 39extern int local_apic_timer_c2_ok;
41extern int local_apic_timer_disabled;
42 40
43extern int apic_runs_main_timer;
44extern int ioapic_force; 41extern int ioapic_force;
45extern int disable_apic; 42extern int disable_apic;
46extern int disable_apic_timer;
47 43
48/* 44/*
49 * Basic functions accessing APICs. 45 * Basic functions accessing APICs.
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h
index 90dec0c23646..70939820c55f 100644
--- a/include/asm-x86/asm.h
+++ b/include/asm-x86/asm.h
@@ -1,33 +1,29 @@
1#ifndef _ASM_X86_ASM_H 1#ifndef _ASM_X86_ASM_H
2#define _ASM_X86_ASM_H 2#define _ASM_X86_ASM_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef __ASSEMBLY__
5/* 32 bits */ 5# define __ASM_FORM(x) x
6 6#else
7# define _ASM_PTR " .long " 7# define __ASM_FORM(x) " " #x " "
8# define _ASM_ALIGN " .balign 4 " 8#endif
9# define _ASM_MOV_UL " movl "
10
11# define _ASM_INC " incl "
12# define _ASM_DEC " decl "
13# define _ASM_ADD " addl "
14# define _ASM_SUB " subl "
15# define _ASM_XADD " xaddl "
16 9
10#ifdef CONFIG_X86_32
11# define __ASM_SEL(a,b) __ASM_FORM(a)
17#else 12#else
18/* 64 bits */ 13# define __ASM_SEL(a,b) __ASM_FORM(b)
14#endif
19 15
20# define _ASM_PTR " .quad " 16#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q)
21# define _ASM_ALIGN " .balign 8 "
22# define _ASM_MOV_UL " movq "
23 17
24# define _ASM_INC " incq " 18#define _ASM_PTR __ASM_SEL(.long, .quad)
25# define _ASM_DEC " decq " 19#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
26# define _ASM_ADD " addq " 20#define _ASM_MOV_UL __ASM_SIZE(mov)
27# define _ASM_SUB " subq "
28# define _ASM_XADD " xaddq "
29 21
30#endif /* CONFIG_X86_32 */ 22#define _ASM_INC __ASM_SIZE(inc)
23#define _ASM_DEC __ASM_SIZE(dec)
24#define _ASM_ADD __ASM_SIZE(add)
25#define _ASM_SUB __ASM_SIZE(sub)
26#define _ASM_XADD __ASM_SIZE(xadd)
31 27
32/* Exception table entry */ 28/* Exception table entry */
33# define _ASM_EXTABLE(from,to) \ 29# define _ASM_EXTABLE(from,to) \
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h
index 3e0cd7d38335..fe589c153db8 100644
--- a/include/asm-x86/atomic_64.h
+++ b/include/asm-x86/atomic_64.h
@@ -11,12 +11,6 @@
11 * resource counting etc.. 11 * resource counting etc..
12 */ 12 */
13 13
14#ifdef CONFIG_SMP
15#define LOCK "lock ; "
16#else
17#define LOCK ""
18#endif
19
20/* 14/*
21 * Make sure gcc doesn't try to be clever and move things around 15 * Make sure gcc doesn't try to be clever and move things around
22 * on us. We need to use _exactly_ the address the user gave us, 16 * on us. We need to use _exactly_ the address the user gave us,
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index ee4b3ead6a43..96b1829cea15 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -23,11 +23,21 @@
23#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) 23#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
24/* Technically wrong, but this avoids compilation errors on some gcc 24/* Technically wrong, but this avoids compilation errors on some gcc
25 versions. */ 25 versions. */
26#define ADDR "=m" (*(volatile long *) addr) 26#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
27#else 27#else
28#define ADDR "+m" (*(volatile long *) addr) 28#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
29#endif 29#endif
30 30
31#define ADDR BITOP_ADDR(addr)
32
33/*
34 * We do the locked ops that don't return the old value as
35 * a mask operation on a byte.
36 */
37#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
38#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
39#define CONST_MASK(nr) (1 << ((nr) & 7))
40
31/** 41/**
32 * set_bit - Atomically set a bit in memory 42 * set_bit - Atomically set a bit in memory
33 * @nr: the bit to set 43 * @nr: the bit to set
@@ -43,9 +53,17 @@
43 * Note that @nr may be almost arbitrarily large; this function is not 53 * Note that @nr may be almost arbitrarily large; this function is not
44 * restricted to acting on a single-word quantity. 54 * restricted to acting on a single-word quantity.
45 */ 55 */
46static inline void set_bit(int nr, volatile void *addr) 56static inline void set_bit(unsigned int nr, volatile unsigned long *addr)
47{ 57{
48 asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory"); 58 if (IS_IMMEDIATE(nr)) {
59 asm volatile(LOCK_PREFIX "orb %1,%0"
60 : CONST_MASK_ADDR(nr, addr)
61 : "iq" ((u8)CONST_MASK(nr))
62 : "memory");
63 } else {
64 asm volatile(LOCK_PREFIX "bts %1,%0"
65 : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
66 }
49} 67}
50 68
51/** 69/**
@@ -57,7 +75,7 @@ static inline void set_bit(int nr, volatile void *addr)
57 * If it's called on the same region of memory simultaneously, the effect 75 * If it's called on the same region of memory simultaneously, the effect
58 * may be that only one operation succeeds. 76 * may be that only one operation succeeds.
59 */ 77 */
60static inline void __set_bit(int nr, volatile void *addr) 78static inline void __set_bit(int nr, volatile unsigned long *addr)
61{ 79{
62 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); 80 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
63} 81}
@@ -72,9 +90,17 @@ static inline void __set_bit(int nr, volatile void *addr)
72 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 90 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
73 * in order to ensure changes are visible on other processors. 91 * in order to ensure changes are visible on other processors.
74 */ 92 */
75static inline void clear_bit(int nr, volatile void *addr) 93static inline void clear_bit(int nr, volatile unsigned long *addr)
76{ 94{
77 asm volatile(LOCK_PREFIX "btr %1,%0" : ADDR : "Ir" (nr)); 95 if (IS_IMMEDIATE(nr)) {
96 asm volatile(LOCK_PREFIX "andb %1,%0"
97 : CONST_MASK_ADDR(nr, addr)
98 : "iq" ((u8)~CONST_MASK(nr)));
99 } else {
100 asm volatile(LOCK_PREFIX "btr %1,%0"
101 : BITOP_ADDR(addr)
102 : "Ir" (nr));
103 }
78} 104}
79 105
80/* 106/*
@@ -85,13 +111,13 @@ static inline void clear_bit(int nr, volatile void *addr)
85 * clear_bit() is atomic and implies release semantics before the memory 111 * clear_bit() is atomic and implies release semantics before the memory
86 * operation. It can be used for an unlock. 112 * operation. It can be used for an unlock.
87 */ 113 */
88static inline void clear_bit_unlock(unsigned nr, volatile void *addr) 114static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
89{ 115{
90 barrier(); 116 barrier();
91 clear_bit(nr, addr); 117 clear_bit(nr, addr);
92} 118}
93 119
94static inline void __clear_bit(int nr, volatile void *addr) 120static inline void __clear_bit(int nr, volatile unsigned long *addr)
95{ 121{
96 asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); 122 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
97} 123}
@@ -108,7 +134,7 @@ static inline void __clear_bit(int nr, volatile void *addr)
108 * No memory barrier is required here, because x86 cannot reorder stores past 134 * No memory barrier is required here, because x86 cannot reorder stores past
109 * older loads. Same principle as spin_unlock. 135 * older loads. Same principle as spin_unlock.
110 */ 136 */
111static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) 137static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
112{ 138{
113 barrier(); 139 barrier();
114 __clear_bit(nr, addr); 140 __clear_bit(nr, addr);
@@ -126,7 +152,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr)
126 * If it's called on the same region of memory simultaneously, the effect 152 * If it's called on the same region of memory simultaneously, the effect
127 * may be that only one operation succeeds. 153 * may be that only one operation succeeds.
128 */ 154 */
129static inline void __change_bit(int nr, volatile void *addr) 155static inline void __change_bit(int nr, volatile unsigned long *addr)
130{ 156{
131 asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); 157 asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
132} 158}
@@ -140,7 +166,7 @@ static inline void __change_bit(int nr, volatile void *addr)
140 * Note that @nr may be almost arbitrarily large; this function is not 166 * Note that @nr may be almost arbitrarily large; this function is not
141 * restricted to acting on a single-word quantity. 167 * restricted to acting on a single-word quantity.
142 */ 168 */
143static inline void change_bit(int nr, volatile void *addr) 169static inline void change_bit(int nr, volatile unsigned long *addr)
144{ 170{
145 asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr)); 171 asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr));
146} 172}
@@ -153,7 +179,7 @@ static inline void change_bit(int nr, volatile void *addr)
153 * This operation is atomic and cannot be reordered. 179 * This operation is atomic and cannot be reordered.
154 * It also implies a memory barrier. 180 * It also implies a memory barrier.
155 */ 181 */
156static inline int test_and_set_bit(int nr, volatile void *addr) 182static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
157{ 183{
158 int oldbit; 184 int oldbit;
159 185
@@ -170,7 +196,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
170 * 196 *
171 * This is the same as test_and_set_bit on x86. 197 * This is the same as test_and_set_bit on x86.
172 */ 198 */
173static inline int test_and_set_bit_lock(int nr, volatile void *addr) 199static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
174{ 200{
175 return test_and_set_bit(nr, addr); 201 return test_and_set_bit(nr, addr);
176} 202}
@@ -184,7 +210,7 @@ static inline int test_and_set_bit_lock(int nr, volatile void *addr)
184 * If two examples of this operation race, one can appear to succeed 210 * If two examples of this operation race, one can appear to succeed
185 * but actually fail. You must protect multiple accesses with a lock. 211 * but actually fail. You must protect multiple accesses with a lock.
186 */ 212 */
187static inline int __test_and_set_bit(int nr, volatile void *addr) 213static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
188{ 214{
189 int oldbit; 215 int oldbit;
190 216
@@ -203,7 +229,7 @@ static inline int __test_and_set_bit(int nr, volatile void *addr)
203 * This operation is atomic and cannot be reordered. 229 * This operation is atomic and cannot be reordered.
204 * It also implies a memory barrier. 230 * It also implies a memory barrier.
205 */ 231 */
206static inline int test_and_clear_bit(int nr, volatile void *addr) 232static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
207{ 233{
208 int oldbit; 234 int oldbit;
209 235
@@ -223,7 +249,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
223 * If two examples of this operation race, one can appear to succeed 249 * If two examples of this operation race, one can appear to succeed
224 * but actually fail. You must protect multiple accesses with a lock. 250 * but actually fail. You must protect multiple accesses with a lock.
225 */ 251 */
226static inline int __test_and_clear_bit(int nr, volatile void *addr) 252static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
227{ 253{
228 int oldbit; 254 int oldbit;
229 255
@@ -235,7 +261,7 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr)
235} 261}
236 262
237/* WARNING: non atomic and it can be reordered! */ 263/* WARNING: non atomic and it can be reordered! */
238static inline int __test_and_change_bit(int nr, volatile void *addr) 264static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
239{ 265{
240 int oldbit; 266 int oldbit;
241 267
@@ -255,7 +281,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
255 * This operation is atomic and cannot be reordered. 281 * This operation is atomic and cannot be reordered.
256 * It also implies a memory barrier. 282 * It also implies a memory barrier.
257 */ 283 */
258static inline int test_and_change_bit(int nr, volatile void *addr) 284static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
259{ 285{
260 int oldbit; 286 int oldbit;
261 287
@@ -266,13 +292,13 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
266 return oldbit; 292 return oldbit;
267} 293}
268 294
269static inline int constant_test_bit(int nr, const volatile void *addr) 295static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
270{ 296{
271 return ((1UL << (nr % BITS_PER_LONG)) & 297 return ((1UL << (nr % BITS_PER_LONG)) &
272 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; 298 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
273} 299}
274 300
275static inline int variable_test_bit(int nr, volatile const void *addr) 301static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
276{ 302{
277 int oldbit; 303 int oldbit;
278 304
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index 0d609c837a41..78b47e7404eb 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -142,11 +142,11 @@ extern const char * const x86_power_flags[32];
142#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) 142#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability))
143#define setup_clear_cpu_cap(bit) do { \ 143#define setup_clear_cpu_cap(bit) do { \
144 clear_cpu_cap(&boot_cpu_data, bit); \ 144 clear_cpu_cap(&boot_cpu_data, bit); \
145 set_bit(bit, cleared_cpu_caps); \ 145 set_bit(bit, (unsigned long *)cleared_cpu_caps); \
146} while (0) 146} while (0)
147#define setup_force_cpu_cap(bit) do { \ 147#define setup_force_cpu_cap(bit) do { \
148 set_cpu_cap(&boot_cpu_data, bit); \ 148 set_cpu_cap(&boot_cpu_data, bit); \
149 clear_bit(bit, cleared_cpu_caps); \ 149 clear_bit(bit, (unsigned long *)cleared_cpu_caps); \
150} while (0) 150} while (0)
151 151
152#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) 152#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
diff --git a/include/asm-x86/current.h b/include/asm-x86/current.h
index d2526d3f7346..7515c19d4988 100644
--- a/include/asm-x86/current.h
+++ b/include/asm-x86/current.h
@@ -1,5 +1,39 @@
1#ifndef _X86_CURRENT_H
2#define _X86_CURRENT_H
3
1#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
2# include "current_32.h" 5#include <linux/compiler.h>
3#else 6#include <asm/percpu.h>
4# include "current_64.h" 7
5#endif 8struct task_struct;
9
10DECLARE_PER_CPU(struct task_struct *, current_task);
11static __always_inline struct task_struct *get_current(void)
12{
13 return x86_read_percpu(current_task);
14}
15
16#else /* X86_32 */
17
18#ifndef __ASSEMBLY__
19#include <asm/pda.h>
20
21struct task_struct;
22
23static __always_inline struct task_struct *get_current(void)
24{
25 return read_pda(pcurrent);
26}
27
28#else /* __ASSEMBLY__ */
29
30#include <asm/asm-offsets.h>
31#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
32
33#endif /* __ASSEMBLY__ */
34
35#endif /* X86_32 */
36
37#define current get_current()
38
39#endif /* X86_CURRENT_H */
diff --git a/include/asm-x86/current_32.h b/include/asm-x86/current_32.h
deleted file mode 100644
index 5af9bdb97a16..000000000000
--- a/include/asm-x86/current_32.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _I386_CURRENT_H
2#define _I386_CURRENT_H
3
4#include <linux/compiler.h>
5#include <asm/percpu.h>
6
7struct task_struct;
8
9DECLARE_PER_CPU(struct task_struct *, current_task);
10static __always_inline struct task_struct *get_current(void)
11{
12 return x86_read_percpu(current_task);
13}
14
15#define current get_current()
16
17#endif /* !(_I386_CURRENT_H) */
diff --git a/include/asm-x86/current_64.h b/include/asm-x86/current_64.h
deleted file mode 100644
index 2d368ede2fc1..000000000000
--- a/include/asm-x86/current_64.h
+++ /dev/null
@@ -1,27 +0,0 @@
1#ifndef _X86_64_CURRENT_H
2#define _X86_64_CURRENT_H
3
4#if !defined(__ASSEMBLY__)
5struct task_struct;
6
7#include <asm/pda.h>
8
9static inline struct task_struct *get_current(void)
10{
11 struct task_struct *t = read_pda(pcurrent);
12 return t;
13}
14
15#define current get_current()
16
17#else
18
19#ifndef ASM_OFFSET_H
20#include <asm/asm-offsets.h>
21#endif
22
23#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
24
25#endif
26
27#endif /* !(_X86_64_CURRENT_H) */
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h
index 7004251fc66b..5103d0b2c46c 100644
--- a/include/asm-x86/e820.h
+++ b/include/asm-x86/e820.h
@@ -24,6 +24,7 @@ struct e820map {
24 24
25#define ISA_START_ADDRESS 0xa0000 25#define ISA_START_ADDRESS 0xa0000
26#define ISA_END_ADDRESS 0x100000 26#define ISA_END_ADDRESS 0x100000
27#define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
27 28
28#define BIOS_BEGIN 0x000a0000 29#define BIOS_BEGIN 0x000a0000
29#define BIOS_END 0x00100000 30#define BIOS_END 0x00100000
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h
index 4b96148e90c1..f0df7ee96816 100644
--- a/include/asm-x86/fixmap_32.h
+++ b/include/asm-x86/fixmap_32.h
@@ -79,10 +79,6 @@ enum fixed_addresses {
79 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 79 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
80 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, 80 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
81#endif 81#endif
82#ifdef CONFIG_ACPI
83 FIX_ACPI_BEGIN,
84 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
85#endif
86#ifdef CONFIG_PCI_MMCONFIG 82#ifdef CONFIG_PCI_MMCONFIG
87 FIX_PCIE_MCFG, 83 FIX_PCIE_MCFG,
88#endif 84#endif
@@ -103,6 +99,10 @@ enum fixed_addresses {
103 (__end_of_permanent_fixed_addresses & 511), 99 (__end_of_permanent_fixed_addresses & 511),
104 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, 100 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1,
105 FIX_WP_TEST, 101 FIX_WP_TEST,
102#ifdef CONFIG_ACPI
103 FIX_ACPI_BEGIN,
104 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
105#endif
106#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 106#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
107 FIX_OHCI1394_BASE, 107 FIX_OHCI1394_BASE,
108#endif 108#endif
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h
index 90958ed993fa..eeca2f51fd8f 100644
--- a/include/asm-x86/gart.h
+++ b/include/asm-x86/gart.h
@@ -1,16 +1,20 @@
1#ifndef _ASM_X8664_IOMMU_H 1#ifndef _ASM_X8664_IOMMU_H
2#define _ASM_X8664_IOMMU_H 1 2#define _ASM_X8664_IOMMU_H 1
3 3
4#include <asm/e820.h>
5
4extern void pci_iommu_shutdown(void); 6extern void pci_iommu_shutdown(void);
5extern void no_iommu_init(void); 7extern void no_iommu_init(void);
6extern int force_iommu, no_iommu; 8extern int force_iommu, no_iommu;
7extern int iommu_detected; 9extern int iommu_detected;
10extern int agp_amd64_init(void);
8#ifdef CONFIG_GART_IOMMU 11#ifdef CONFIG_GART_IOMMU
9extern void gart_iommu_init(void); 12extern void gart_iommu_init(void);
10extern void gart_iommu_shutdown(void); 13extern void gart_iommu_shutdown(void);
11extern void __init gart_parse_options(char *); 14extern void __init gart_parse_options(char *);
12extern void early_gart_iommu_check(void); 15extern void early_gart_iommu_check(void);
13extern void gart_iommu_hole_init(void); 16extern void gart_iommu_hole_init(void);
17extern void set_up_gart_resume(u32, u32);
14extern int fallback_aper_order; 18extern int fallback_aper_order;
15extern int fallback_aper_force; 19extern int fallback_aper_force;
16extern int gart_iommu_aperture; 20extern int gart_iommu_aperture;
@@ -31,4 +35,63 @@ static inline void gart_iommu_shutdown(void)
31 35
32#endif 36#endif
33 37
38/* PTE bits. */
39#define GPTE_VALID 1
40#define GPTE_COHERENT 2
41
42/* Aperture control register bits. */
43#define GARTEN (1<<0)
44#define DISGARTCPU (1<<4)
45#define DISGARTIO (1<<5)
46
47/* GART cache control register bits. */
48#define INVGART (1<<0)
49#define GARTPTEERR (1<<1)
50
51/* K8 On-cpu GART registers */
52#define AMD64_GARTAPERTURECTL 0x90
53#define AMD64_GARTAPERTUREBASE 0x94
54#define AMD64_GARTTABLEBASE 0x98
55#define AMD64_GARTCACHECTL 0x9c
56#define AMD64_GARTEN (1<<0)
57
58static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
59{
60 u32 tmp, ctl;
61
62 /* address of the mappings table */
63 addr >>= 12;
64 tmp = (u32) addr<<4;
65 tmp &= ~0xf;
66 pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
67
68 /* Enable GART translation for this hammer. */
69 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
70 ctl |= GARTEN;
71 ctl &= ~(DISGARTCPU | DISGARTIO);
72 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
73}
74
75static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
76{
77 if (!aper_base)
78 return 0;
79
80 if (aper_base + aper_size > 0x100000000ULL) {
81 printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n");
82 return 0;
83 }
84 if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
85 printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n");
86 return 0;
87 }
88 if (aper_size < min_size) {
89 printk(KERN_ERR "Aperture too small (%d MB) than (%d MB)\n",
90 aper_size>>20, min_size>>20);
91 return 0;
92 }
93
94 return 1;
95}
96
34#endif 97#endif
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h
index 6e6458853a36..bb06027fc83e 100644
--- a/include/asm-x86/geode.h
+++ b/include/asm-x86/geode.h
@@ -112,8 +112,8 @@ extern int geode_get_dev_base(unsigned int dev);
112#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ 112#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
113#define VSA_VR_SIGNATURE 0x0003 113#define VSA_VR_SIGNATURE 0x0003
114#define VSA_VR_MEM_SIZE 0x0200 114#define VSA_VR_MEM_SIZE 0x0200
115#define VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ 115#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
116 116#define GSW_VSA_SIG 0x534d /* General Software signature */
117/* GPIO */ 117/* GPIO */
118 118
119#define GPIO_OUTPUT_VAL 0x00 119#define GPIO_OUTPUT_VAL 0x00
diff --git a/include/asm-x86/hardirq.h b/include/asm-x86/hardirq.h
index 314434d664e7..000787df66e6 100644
--- a/include/asm-x86/hardirq.h
+++ b/include/asm-x86/hardirq.h
@@ -3,3 +3,9 @@
3#else 3#else
4# include "hardirq_64.h" 4# include "hardirq_64.h"
5#endif 5#endif
6
7extern u64 arch_irq_stat_cpu(unsigned int cpu);
8#define arch_irq_stat_cpu arch_irq_stat_cpu
9
10extern u64 arch_irq_stat(void);
11#define arch_irq_stat arch_irq_stat
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 6b722d315936..37672f79dcc8 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -193,6 +193,8 @@ static inline int restore_i387(struct _fpstate __user *buf)
193 193
194#else /* CONFIG_X86_32 */ 194#else /* CONFIG_X86_32 */
195 195
196extern void finit(void);
197
196static inline void tolerant_fwait(void) 198static inline void tolerant_fwait(void)
197{ 199{
198 asm volatile("fnclex ; fwait"); 200 asm volatile("fnclex ; fwait");
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h
index 45d4df3e51e6..2f98df91f1f2 100644
--- a/include/asm-x86/i8259.h
+++ b/include/asm-x86/i8259.h
@@ -55,4 +55,6 @@ static inline void outb_pic(unsigned char value, unsigned int port)
55 udelay(2); 55 udelay(2);
56} 56}
57 57
58extern struct irq_chip i8259A_chip;
59
58#endif /* __ASM_I8259_H__ */ 60#endif /* __ASM_I8259_H__ */
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h
index d593e14f0341..dc0f55f2b034 100644
--- a/include/asm-x86/io_apic.h
+++ b/include/asm-x86/io_apic.h
@@ -11,6 +11,15 @@
11 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar 11 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
12 */ 12 */
13 13
14/* I/O Unit Redirection Table */
15#define IO_APIC_REDIR_VECTOR_MASK 0x000FF
16#define IO_APIC_REDIR_DEST_LOGICAL 0x00800
17#define IO_APIC_REDIR_DEST_PHYSICAL 0x00000
18#define IO_APIC_REDIR_SEND_PENDING (1 << 12)
19#define IO_APIC_REDIR_REMOTE_IRR (1 << 14)
20#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15)
21#define IO_APIC_REDIR_MASKED (1 << 16)
22
14/* 23/*
15 * The structure of the IO-APIC: 24 * The structure of the IO-APIC:
16 */ 25 */
@@ -137,6 +146,9 @@ extern int sis_apic_bug;
137/* 1 if "noapic" boot option passed */ 146/* 1 if "noapic" boot option passed */
138extern int skip_ioapic_setup; 147extern int skip_ioapic_setup;
139 148
149/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
150extern int timer_through_8259;
151
140static inline void disable_ioapic_setup(void) 152static inline void disable_ioapic_setup(void)
141{ 153{
142 skip_ioapic_setup = 1; 154 skip_ioapic_setup = 1;
@@ -162,6 +174,7 @@ extern void ioapic_init_mappings(void);
162 174
163#else /* !CONFIG_X86_IO_APIC */ 175#else /* !CONFIG_X86_IO_APIC */
164#define io_apic_assign_pci_irqs 0 176#define io_apic_assign_pci_irqs 0
177static const int timer_through_8259 = 0;
165#endif 178#endif
166 179
167#endif 180#endif
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 1d8cd01fa514..844f2a89afbc 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -18,6 +18,7 @@
18#include <linux/kvm_para.h> 18#include <linux/kvm_para.h>
19#include <linux/kvm_types.h> 19#include <linux/kvm_types.h>
20 20
21#include <asm/pvclock-abi.h>
21#include <asm/desc.h> 22#include <asm/desc.h>
22 23
23#define KVM_MAX_VCPUS 16 24#define KVM_MAX_VCPUS 16
@@ -282,7 +283,8 @@ struct kvm_vcpu_arch {
282 struct x86_emulate_ctxt emulate_ctxt; 283 struct x86_emulate_ctxt emulate_ctxt;
283 284
284 gpa_t time; 285 gpa_t time;
285 struct kvm_vcpu_time_info hv_clock; 286 struct pvclock_vcpu_time_info hv_clock;
287 unsigned int hv_clock_tsc_khz;
286 unsigned int time_offset; 288 unsigned int time_offset;
287 struct page *time_page; 289 struct page *time_page;
288}; 290};
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h
index 509845942070..76f392146daa 100644
--- a/include/asm-x86/kvm_para.h
+++ b/include/asm-x86/kvm_para.h
@@ -48,24 +48,6 @@ struct kvm_mmu_op_release_pt {
48#ifdef __KERNEL__ 48#ifdef __KERNEL__
49#include <asm/processor.h> 49#include <asm/processor.h>
50 50
51/* xen binary-compatible interface. See xen headers for details */
52struct kvm_vcpu_time_info {
53 uint32_t version;
54 uint32_t pad0;
55 uint64_t tsc_timestamp;
56 uint64_t system_time;
57 uint32_t tsc_to_system_mul;
58 int8_t tsc_shift;
59 int8_t pad[3];
60} __attribute__((__packed__)); /* 32 bytes */
61
62struct kvm_wall_clock {
63 uint32_t wc_version;
64 uint32_t wc_sec;
65 uint32_t wc_nsec;
66} __attribute__((__packed__));
67
68
69extern void kvmclock_init(void); 51extern void kvmclock_init(void);
70 52
71 53
@@ -89,7 +71,8 @@ static inline long kvm_hypercall0(unsigned int nr)
89 long ret; 71 long ret;
90 asm volatile(KVM_HYPERCALL 72 asm volatile(KVM_HYPERCALL
91 : "=a"(ret) 73 : "=a"(ret)
92 : "a"(nr)); 74 : "a"(nr)
75 : "memory");
93 return ret; 76 return ret;
94} 77}
95 78
@@ -98,7 +81,8 @@ static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
98 long ret; 81 long ret;
99 asm volatile(KVM_HYPERCALL 82 asm volatile(KVM_HYPERCALL
100 : "=a"(ret) 83 : "=a"(ret)
101 : "a"(nr), "b"(p1)); 84 : "a"(nr), "b"(p1)
85 : "memory");
102 return ret; 86 return ret;
103} 87}
104 88
@@ -108,7 +92,8 @@ static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
108 long ret; 92 long ret;
109 asm volatile(KVM_HYPERCALL 93 asm volatile(KVM_HYPERCALL
110 : "=a"(ret) 94 : "=a"(ret)
111 : "a"(nr), "b"(p1), "c"(p2)); 95 : "a"(nr), "b"(p1), "c"(p2)
96 : "memory");
112 return ret; 97 return ret;
113} 98}
114 99
@@ -118,7 +103,8 @@ static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
118 long ret; 103 long ret;
119 asm volatile(KVM_HYPERCALL 104 asm volatile(KVM_HYPERCALL
120 : "=a"(ret) 105 : "=a"(ret)
121 : "a"(nr), "b"(p1), "c"(p2), "d"(p3)); 106 : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
107 : "memory");
122 return ret; 108 return ret;
123} 109}
124 110
@@ -129,7 +115,8 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
129 long ret; 115 long ret;
130 asm volatile(KVM_HYPERCALL 116 asm volatile(KVM_HYPERCALL
131 : "=a"(ret) 117 : "=a"(ret)
132 : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)); 118 : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
119 : "memory");
133 return ret; 120 return ret;
134} 121}
135 122
diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h
index 56d0e1fa0258..b63c52182006 100644
--- a/include/asm-x86/mach-default/smpboot_hooks.h
+++ b/include/asm-x86/mach-default/smpboot_hooks.h
@@ -41,8 +41,10 @@ static inline void __init smpboot_setup_io_apic(void)
41 */ 41 */
42 if (!skip_ioapic_setup && nr_ioapics) 42 if (!skip_ioapic_setup && nr_ioapics)
43 setup_IO_APIC(); 43 setup_IO_APIC();
44 else 44 else {
45 nr_ioapics = 0; 45 nr_ioapics = 0;
46 localise_nmi_watchdog();
47 }
46} 48}
47 49
48static inline void smpboot_clear_io_apic(void) 50static inline void smpboot_clear_io_apic(void)
diff --git a/include/asm-x86/mmconfig.h b/include/asm-x86/mmconfig.h
new file mode 100644
index 000000000000..95beda07c6fa
--- /dev/null
+++ b/include/asm-x86/mmconfig.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_MMCONFIG_H
2#define _ASM_MMCONFIG_H
3
4#ifdef CONFIG_PCI_MMCONFIG
5extern void __cpuinit fam10h_check_enable_mmcfg(void);
6extern void __init check_enable_amd_mmconf_dmi(void);
7#else
8static inline void fam10h_check_enable_mmcfg(void) { }
9static inline void check_enable_amd_mmconf_dmi(void) { }
10#endif
11
12#endif
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h
index 09413ad39d3c..44bce773012e 100644
--- a/include/asm-x86/msr-index.h
+++ b/include/asm-x86/msr-index.h
@@ -111,7 +111,9 @@
111#define MSR_K8_TOP_MEM2 0xc001001d 111#define MSR_K8_TOP_MEM2 0xc001001d
112#define MSR_K8_SYSCFG 0xc0010010 112#define MSR_K8_SYSCFG 0xc0010010
113#define MSR_K8_HWCR 0xc0010015 113#define MSR_K8_HWCR 0xc0010015
114#define MSR_K8_ENABLE_C1E 0xc0010055 114#define MSR_K8_INT_PENDING_MSG 0xc0010055
115/* C1E active bits in int pending message */
116#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
115#define MSR_K8_TSEG_ADDR 0xc0010112 117#define MSR_K8_TSEG_ADDR 0xc0010112
116#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ 118#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
117#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ 119#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h
index 3707650a169b..2b5f2c91db25 100644
--- a/include/asm-x86/msr.h
+++ b/include/asm-x86/msr.h
@@ -18,7 +18,7 @@ static inline unsigned long long native_read_tscp(unsigned int *aux)
18 unsigned long low, high; 18 unsigned long low, high;
19 asm volatile(".byte 0x0f,0x01,0xf9" 19 asm volatile(".byte 0x0f,0x01,0xf9"
20 : "=a" (low), "=d" (high), "=c" (*aux)); 20 : "=a" (low), "=d" (high), "=c" (*aux));
21 return low | ((u64)high >> 32); 21 return low | ((u64)high << 32);
22} 22}
23 23
24/* 24/*
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h
index 1e363021e72f..05449ef830a7 100644
--- a/include/asm-x86/nmi.h
+++ b/include/asm-x86/nmi.h
@@ -15,27 +15,6 @@
15 */ 15 */
16int do_nmi_callback(struct pt_regs *regs, int cpu); 16int do_nmi_callback(struct pt_regs *regs, int cpu);
17 17
18#ifdef CONFIG_PM
19
20/** Replace the PM callback routine for NMI. */
21struct pm_dev *set_nmi_pm_callback(pm_callback callback);
22
23/** Unset the PM callback routine back to the default. */
24void unset_nmi_pm_callback(struct pm_dev *dev);
25
26#else
27
28static inline struct pm_dev *set_nmi_pm_callback(pm_callback callback)
29{
30 return 0;
31}
32
33static inline void unset_nmi_pm_callback(struct pm_dev *dev)
34{
35}
36
37#endif /* CONFIG_PM */
38
39#ifdef CONFIG_X86_64 18#ifdef CONFIG_X86_64
40extern void default_do_nmi(struct pt_regs *); 19extern void default_do_nmi(struct pt_regs *);
41extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); 20extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
@@ -46,7 +25,6 @@ extern void nmi_watchdog_default(void);
46 25
47extern int check_nmi_watchdog(void); 26extern int check_nmi_watchdog(void);
48extern int nmi_watchdog_enabled; 27extern int nmi_watchdog_enabled;
49extern int unknown_nmi_panic;
50extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); 28extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
51extern int avail_to_resrv_perfctr_nmi(unsigned int); 29extern int avail_to_resrv_perfctr_nmi(unsigned int);
52extern int reserve_perfctr_nmi(unsigned int); 30extern int reserve_perfctr_nmi(unsigned int);
@@ -78,6 +56,11 @@ extern int unknown_nmi_panic;
78void __trigger_all_cpu_backtrace(void); 56void __trigger_all_cpu_backtrace(void);
79#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 57#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
80 58
59static inline void localise_nmi_watchdog(void)
60{
61 if (nmi_watchdog == NMI_IO_APIC)
62 nmi_watchdog = NMI_LOCAL_APIC;
63}
81#endif 64#endif
82 65
83void lapic_watchdog_stop(void); 66void lapic_watchdog_stop(void);
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
index dc936dddf161..b52ed85f32f5 100644
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -51,8 +51,15 @@
51 51
52#ifndef __ASSEMBLY__ 52#ifndef __ASSEMBLY__
53 53
54typedef struct { pgdval_t pgd; } pgd_t;
55typedef struct { pgprotval_t pgprot; } pgprot_t;
56
54extern int page_is_ram(unsigned long pagenr); 57extern int page_is_ram(unsigned long pagenr);
55extern int devmem_is_allowed(unsigned long pagenr); 58extern int devmem_is_allowed(unsigned long pagenr);
59extern void map_devmem(unsigned long pfn, unsigned long size,
60 pgprot_t vma_prot);
61extern void unmap_devmem(unsigned long pfn, unsigned long size,
62 pgprot_t vma_prot);
56 63
57extern unsigned long max_pfn_mapped; 64extern unsigned long max_pfn_mapped;
58 65
@@ -74,9 +81,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
74 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) 81 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
75#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 82#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
76 83
77typedef struct { pgdval_t pgd; } pgd_t;
78typedef struct { pgprotval_t pgprot; } pgprot_t;
79
80static inline pgd_t native_make_pgd(pgdval_t val) 84static inline pgd_t native_make_pgd(pgdval_t val)
81{ 85{
82 return (pgd_t) { val }; 86 return (pgd_t) { val };
@@ -160,6 +164,7 @@ static inline pteval_t native_pte_val(pte_t pte)
160#endif 164#endif
161 165
162#define pte_val(x) native_pte_val(x) 166#define pte_val(x) native_pte_val(x)
167#define pte_flags(x) native_pte_val(x)
163#define __pte(x) native_make_pte(x) 168#define __pte(x) native_make_pte(x)
164 169
165#endif /* CONFIG_PARAVIRT */ 170#endif /* CONFIG_PARAVIRT */
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
index 424e82f8ae27..73ed2e4ebf95 100644
--- a/include/asm-x86/page_32.h
+++ b/include/asm-x86/page_32.h
@@ -13,8 +13,17 @@
13 */ 13 */
14#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 14#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
15 15
16#ifdef CONFIG_4KSTACKS
17#define THREAD_ORDER 0
18#else
19#define THREAD_ORDER 1
20#endif
21#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
22
23
16#ifdef CONFIG_X86_PAE 24#ifdef CONFIG_X86_PAE
17#define __PHYSICAL_MASK_SHIFT 36 25/* 44=32+12, the limit we can fit into an unsigned long pfn */
26#define __PHYSICAL_MASK_SHIFT 44
18#define __VIRTUAL_MASK_SHIFT 32 27#define __VIRTUAL_MASK_SHIFT 32
19#define PAGETABLE_LEVELS 3 28#define PAGETABLE_LEVELS 3
20 29
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index 0f13b945e240..e9ada314dfc1 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -238,7 +238,13 @@ struct pv_mmu_ops {
238 void (*pte_update_defer)(struct mm_struct *mm, 238 void (*pte_update_defer)(struct mm_struct *mm,
239 unsigned long addr, pte_t *ptep); 239 unsigned long addr, pte_t *ptep);
240 240
241 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
242 pte_t *ptep);
243 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
244 pte_t *ptep, pte_t pte);
245
241 pteval_t (*pte_val)(pte_t); 246 pteval_t (*pte_val)(pte_t);
247 pteval_t (*pte_flags)(pte_t);
242 pte_t (*make_pte)(pteval_t pte); 248 pte_t (*make_pte)(pteval_t pte);
243 249
244 pgdval_t (*pgd_val)(pgd_t); 250 pgdval_t (*pgd_val)(pgd_t);
@@ -996,6 +1002,20 @@ static inline pteval_t pte_val(pte_t pte)
996 return ret; 1002 return ret;
997} 1003}
998 1004
1005static inline pteval_t pte_flags(pte_t pte)
1006{
1007 pteval_t ret;
1008
1009 if (sizeof(pteval_t) > sizeof(long))
1010 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1011 pte.pte, (u64)pte.pte >> 32);
1012 else
1013 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
1014 pte.pte);
1015
1016 return ret;
1017}
1018
999static inline pgd_t __pgd(pgdval_t val) 1019static inline pgd_t __pgd(pgdval_t val)
1000{ 1020{
1001 pgdval_t ret; 1021 pgdval_t ret;
@@ -1024,6 +1044,29 @@ static inline pgdval_t pgd_val(pgd_t pgd)
1024 return ret; 1044 return ret;
1025} 1045}
1026 1046
1047#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1048static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1049 pte_t *ptep)
1050{
1051 pteval_t ret;
1052
1053 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1054 mm, addr, ptep);
1055
1056 return (pte_t) { .pte = ret };
1057}
1058
1059static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1060 pte_t *ptep, pte_t pte)
1061{
1062 if (sizeof(pteval_t) > sizeof(long))
1063 /* 5 arg words */
1064 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1065 else
1066 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1067 mm, addr, ptep, pte.pte);
1068}
1069
1027static inline void set_pte(pte_t *ptep, pte_t pte) 1070static inline void set_pte(pte_t *ptep, pte_t pte)
1028{ 1071{
1029 if (sizeof(pteval_t) > sizeof(long)) 1072 if (sizeof(pteval_t) > sizeof(long))
diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h
index 88f60cc6a227..7edc47307217 100644
--- a/include/asm-x86/pat.h
+++ b/include/asm-x86/pat.h
@@ -1,14 +1,13 @@
1
2#ifndef _ASM_PAT_H 1#ifndef _ASM_PAT_H
3#define _ASM_PAT_H 1 2#define _ASM_PAT_H
4 3
5#include <linux/types.h> 4#include <linux/types.h>
6 5
7#ifdef CONFIG_X86_PAT 6#ifdef CONFIG_X86_PAT
8extern int pat_wc_enabled; 7extern int pat_enabled;
9extern void validate_pat_support(struct cpuinfo_x86 *c); 8extern void validate_pat_support(struct cpuinfo_x86 *c);
10#else 9#else
11static const int pat_wc_enabled = 0; 10static const int pat_enabled;
12static inline void validate_pat_support(struct cpuinfo_x86 *c) { } 11static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
13#endif 12#endif
14 13
@@ -21,4 +20,3 @@ extern int free_memtype(u64 start, u64 end);
21extern void pat_disable(char *reason); 20extern void pat_disable(char *reason);
22 21
23#endif 22#endif
24
diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h
index 30bbde0cb34b..2db14cf17db8 100644
--- a/include/asm-x86/pci.h
+++ b/include/asm-x86/pci.h
@@ -18,6 +18,8 @@ struct pci_sysdata {
18#endif 18#endif
19}; 19};
20 20
21extern int pci_routeirq;
22
21/* scan a bus after allocating a pci_sysdata for it */ 23/* scan a bus after allocating a pci_sysdata for it */
22extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, 24extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops,
23 int node); 25 int node);
diff --git a/include/asm-x86/pci_32.h b/include/asm-x86/pci_32.h
index 8c4c3a0368e2..a50d46851285 100644
--- a/include/asm-x86/pci_32.h
+++ b/include/asm-x86/pci_32.h
@@ -18,12 +18,14 @@ struct pci_dev;
18#define PCI_DMA_BUS_IS_PHYS (1) 18#define PCI_DMA_BUS_IS_PHYS (1)
19 19
20/* pci_unmap_{page,single} is a nop so... */ 20/* pci_unmap_{page,single} is a nop so... */
21#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) 21#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME[0];
22#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) 22#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) unsigned LEN_NAME[0];
23#define pci_unmap_addr(PTR, ADDR_NAME) (0) 23#define pci_unmap_addr(PTR, ADDR_NAME) sizeof((PTR)->ADDR_NAME)
24#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) 24#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
25#define pci_unmap_len(PTR, LEN_NAME) (0) 25 do { break; } while (pci_unmap_addr(PTR, ADDR_NAME))
26#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) 26#define pci_unmap_len(PTR, LEN_NAME) sizeof((PTR)->LEN_NAME)
27#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
28 do { break; } while (pci_unmap_len(PTR, LEN_NAME))
27 29
28 30
29#endif /* __KERNEL__ */ 31#endif /* __KERNEL__ */
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 97c271b2910b..bcb5446a08d1 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -20,30 +20,25 @@
20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ 20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
21#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ 21#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
22 22
23/* 23#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
24 * Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a 24#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
25 * sign-extended value on 32-bit with all 1's in the upper word, 25#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
26 * which preserves the upper pte values on 64-bit ptes: 26#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
27 */ 27#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
28#define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT) 28#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
29#define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW) 29#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
30#define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER) 30#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
31#define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT) 31#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
32#define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD) 32#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
33#define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED) 33#define _PAGE_UNUSED2 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED2)
34#define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY) 34#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
35#define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */ 35#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
36#define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */ 36#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
37#define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1)
38#define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2)
39#define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3)
40#define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT)
41#define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE)
42 37
43#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 38#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
44#define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX) 39#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
45#else 40#else
46#define _PAGE_NX 0 41#define _PAGE_NX (_AT(pteval_t, 0))
47#endif 42#endif
48 43
49/* If _PAGE_PRESENT is clear, we use these: */ 44/* If _PAGE_PRESENT is clear, we use these: */
@@ -164,37 +159,37 @@ extern struct list_head pgd_list;
164 */ 159 */
165static inline int pte_dirty(pte_t pte) 160static inline int pte_dirty(pte_t pte)
166{ 161{
167 return pte_val(pte) & _PAGE_DIRTY; 162 return pte_flags(pte) & _PAGE_DIRTY;
168} 163}
169 164
170static inline int pte_young(pte_t pte) 165static inline int pte_young(pte_t pte)
171{ 166{
172 return pte_val(pte) & _PAGE_ACCESSED; 167 return pte_flags(pte) & _PAGE_ACCESSED;
173} 168}
174 169
175static inline int pte_write(pte_t pte) 170static inline int pte_write(pte_t pte)
176{ 171{
177 return pte_val(pte) & _PAGE_RW; 172 return pte_flags(pte) & _PAGE_RW;
178} 173}
179 174
180static inline int pte_file(pte_t pte) 175static inline int pte_file(pte_t pte)
181{ 176{
182 return pte_val(pte) & _PAGE_FILE; 177 return pte_flags(pte) & _PAGE_FILE;
183} 178}
184 179
185static inline int pte_huge(pte_t pte) 180static inline int pte_huge(pte_t pte)
186{ 181{
187 return pte_val(pte) & _PAGE_PSE; 182 return pte_flags(pte) & _PAGE_PSE;
188} 183}
189 184
190static inline int pte_global(pte_t pte) 185static inline int pte_global(pte_t pte)
191{ 186{
192 return pte_val(pte) & _PAGE_GLOBAL; 187 return pte_flags(pte) & _PAGE_GLOBAL;
193} 188}
194 189
195static inline int pte_exec(pte_t pte) 190static inline int pte_exec(pte_t pte)
196{ 191{
197 return !(pte_val(pte) & _PAGE_NX); 192 return !(pte_flags(pte) & _PAGE_NX);
198} 193}
199 194
200static inline int pte_special(pte_t pte) 195static inline int pte_special(pte_t pte)
@@ -210,22 +205,22 @@ static inline int pmd_large(pmd_t pte)
210 205
211static inline pte_t pte_mkclean(pte_t pte) 206static inline pte_t pte_mkclean(pte_t pte)
212{ 207{
213 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); 208 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
214} 209}
215 210
216static inline pte_t pte_mkold(pte_t pte) 211static inline pte_t pte_mkold(pte_t pte)
217{ 212{
218 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); 213 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
219} 214}
220 215
221static inline pte_t pte_wrprotect(pte_t pte) 216static inline pte_t pte_wrprotect(pte_t pte)
222{ 217{
223 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); 218 return __pte(pte_val(pte) & ~_PAGE_RW);
224} 219}
225 220
226static inline pte_t pte_mkexec(pte_t pte) 221static inline pte_t pte_mkexec(pte_t pte)
227{ 222{
228 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); 223 return __pte(pte_val(pte) & ~_PAGE_NX);
229} 224}
230 225
231static inline pte_t pte_mkdirty(pte_t pte) 226static inline pte_t pte_mkdirty(pte_t pte)
@@ -250,7 +245,7 @@ static inline pte_t pte_mkhuge(pte_t pte)
250 245
251static inline pte_t pte_clrhuge(pte_t pte) 246static inline pte_t pte_clrhuge(pte_t pte)
252{ 247{
253 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); 248 return __pte(pte_val(pte) & ~_PAGE_PSE);
254} 249}
255 250
256static inline pte_t pte_mkglobal(pte_t pte) 251static inline pte_t pte_mkglobal(pte_t pte)
@@ -260,7 +255,7 @@ static inline pte_t pte_mkglobal(pte_t pte)
260 255
261static inline pte_t pte_clrglobal(pte_t pte) 256static inline pte_t pte_clrglobal(pte_t pte)
262{ 257{
263 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); 258 return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
264} 259}
265 260
266static inline pte_t pte_mkspecial(pte_t pte) 261static inline pte_t pte_mkspecial(pte_t pte)
@@ -305,7 +300,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
305 return __pgprot(preservebits | addbits); 300 return __pgprot(preservebits | addbits);
306} 301}
307 302
308#define pte_pgprot(x) __pgprot(pte_val(x) & ~PTE_MASK) 303#define pte_pgprot(x) __pgprot(pte_flags(x) & ~PTE_MASK)
309 304
310#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) 305#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
311 306
@@ -369,8 +364,15 @@ enum {
369 PG_LEVEL_4K, 364 PG_LEVEL_4K,
370 PG_LEVEL_2M, 365 PG_LEVEL_2M,
371 PG_LEVEL_1G, 366 PG_LEVEL_1G,
367 PG_LEVEL_NUM
372}; 368};
373 369
370#ifdef CONFIG_PROC_FS
371extern void update_page_count(int level, unsigned long pages);
372#else
373static inline void update_page_count(int level, unsigned long pages) { }
374#endif
375
374/* 376/*
375 * Helper function that returns the kernel pagetable entry controlling 377 * Helper function that returns the kernel pagetable entry controlling
376 * the virtual address 'address'. NULL means no pagetable entry present. 378 * the virtual address 'address'. NULL means no pagetable entry present.
diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h
index 199cab107d85..092b39b3a7e6 100644
--- a/include/asm-x86/processor-flags.h
+++ b/include/asm-x86/processor-flags.h
@@ -88,4 +88,10 @@
88#define CX86_ARR_BASE 0xc4 88#define CX86_ARR_BASE 0xc4
89#define CX86_RCR_BASE 0xdc 89#define CX86_RCR_BASE 0xdc
90 90
91#ifdef CONFIG_VM86
92#define X86_VM_MASK X86_EFLAGS_VM
93#else
94#define X86_VM_MASK 0 /* No VM86 support */
95#endif
96
91#endif /* __ASM_I386_PROCESSOR_FLAGS_H */ 97#endif /* __ASM_I386_PROCESSOR_FLAGS_H */
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 559105220a47..4ab2ede6f4b9 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -263,15 +263,11 @@ struct tss_struct {
263 struct thread_struct *io_bitmap_owner; 263 struct thread_struct *io_bitmap_owner;
264 264
265 /* 265 /*
266 * Pad the TSS to be cacheline-aligned (size is 0x100):
267 */
268 unsigned long __cacheline_filler[35];
269 /*
270 * .. and then another 0x100 bytes for the emergency kernel stack: 266 * .. and then another 0x100 bytes for the emergency kernel stack:
271 */ 267 */
272 unsigned long stack[64]; 268 unsigned long stack[64];
273 269
274} __attribute__((packed)); 270} ____cacheline_aligned;
275 271
276DECLARE_PER_CPU(struct tss_struct, init_tss); 272DECLARE_PER_CPU(struct tss_struct, init_tss);
277 273
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index 9f922b0b95d6..8a71db803da6 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -3,7 +3,12 @@
3 3
4#include <linux/compiler.h> /* For __user */ 4#include <linux/compiler.h> /* For __user */
5#include <asm/ptrace-abi.h> 5#include <asm/ptrace-abi.h>
6#include <asm/processor-flags.h>
6 7
8#ifdef __KERNEL__
9#include <asm/ds.h> /* the DS BTS struct is used for ptrace too */
10#include <asm/segment.h>
11#endif
7 12
8#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
9 14
@@ -55,9 +60,6 @@ struct pt_regs {
55 unsigned long ss; 60 unsigned long ss;
56}; 61};
57 62
58#include <asm/vm86.h>
59#include <asm/segment.h>
60
61#endif /* __KERNEL__ */ 63#endif /* __KERNEL__ */
62 64
63#else /* __i386__ */ 65#else /* __i386__ */
diff --git a/include/asm-x86/pvclock-abi.h b/include/asm-x86/pvclock-abi.h
new file mode 100644
index 000000000000..6857f840b243
--- /dev/null
+++ b/include/asm-x86/pvclock-abi.h
@@ -0,0 +1,42 @@
1#ifndef _ASM_X86_PVCLOCK_ABI_H_
2#define _ASM_X86_PVCLOCK_ABI_H_
3#ifndef __ASSEMBLY__
4
5/*
6 * These structs MUST NOT be changed.
7 * They are the ABI between hypervisor and guest OS.
8 * Both Xen and KVM are using this.
9 *
10 * pvclock_vcpu_time_info holds the system time and the tsc timestamp
11 * of the last update. So the guest can use the tsc delta to get a
12 * more precise system time. There is one per virtual cpu.
13 *
14 * pvclock_wall_clock references the point in time when the system
15 * time was zero (usually boot time), thus the guest calculates the
16 * current wall clock by adding the system time.
17 *
18 * Protocol for the "version" fields is: hypervisor raises it (making
19 * it uneven) before it starts updating the fields and raises it again
20 * (making it even) when it is done. Thus the guest can make sure the
21 * time values it got are consistent by checking the version before
22 * and after reading them.
23 */
24
25struct pvclock_vcpu_time_info {
26 u32 version;
27 u32 pad0;
28 u64 tsc_timestamp;
29 u64 system_time;
30 u32 tsc_to_system_mul;
31 s8 tsc_shift;
32 u8 pad[3];
33} __attribute__((__packed__)); /* 32 bytes */
34
35struct pvclock_wall_clock {
36 u32 version;
37 u32 sec;
38 u32 nsec;
39} __attribute__((__packed__));
40
41#endif /* __ASSEMBLY__ */
42#endif /* _ASM_X86_PVCLOCK_ABI_H_ */
diff --git a/include/asm-x86/pvclock.h b/include/asm-x86/pvclock.h
new file mode 100644
index 000000000000..85b1bba8e0a3
--- /dev/null
+++ b/include/asm-x86/pvclock.h
@@ -0,0 +1,13 @@
1#ifndef _ASM_X86_PVCLOCK_H_
2#define _ASM_X86_PVCLOCK_H_
3
4#include <linux/clocksource.h>
5#include <asm/pvclock-abi.h>
6
7/* some helper functions for xen and kvm pv clock sources */
8cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
9void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
10 struct pvclock_vcpu_time_info *vcpu,
11 struct timespec *ts);
12
13#endif /* _ASM_X86_PVCLOCK_H_ */
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
index e63741f19392..206f355786dc 100644
--- a/include/asm-x86/reboot.h
+++ b/include/asm-x86/reboot.h
@@ -14,8 +14,8 @@ struct machine_ops {
14 14
15extern struct machine_ops machine_ops; 15extern struct machine_ops machine_ops;
16 16
17void machine_real_restart(unsigned char *code, int length);
18void native_machine_crash_shutdown(struct pt_regs *regs); 17void native_machine_crash_shutdown(struct pt_regs *regs);
19void native_machine_shutdown(void); 18void native_machine_shutdown(void);
19void machine_real_restart(const unsigned char *code, int length);
20 20
21#endif /* _ASM_REBOOT_H */ 21#endif /* _ASM_REBOOT_H */
diff --git a/include/asm-x86/required-features.h b/include/asm-x86/required-features.h
index 7400d3ad75c6..8c387198ca88 100644
--- a/include/asm-x86/required-features.h
+++ b/include/asm-x86/required-features.h
@@ -19,9 +19,13 @@
19 19
20#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) 20#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
21# define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) 21# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
22# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
23#else 22#else
24# define NEED_PAE 0 23# define NEED_PAE 0
24#endif
25
26#ifdef CONFIG_X86_CMPXCHG64
27# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
28#else
25# define NEED_CX8 0 29# define NEED_CX8 0
26#endif 30#endif
27 31
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h
index 2557514d7ef6..8d9f0b41ee86 100644
--- a/include/asm-x86/resume-trace.h
+++ b/include/asm-x86/resume-trace.h
@@ -6,7 +6,7 @@
6#define TRACE_RESUME(user) \ 6#define TRACE_RESUME(user) \
7do { \ 7do { \
8 if (pm_trace_enabled) { \ 8 if (pm_trace_enabled) { \
9 void *tracedata; \ 9 const void *tracedata; \
10 asm volatile(_ASM_MOV_UL " $1f,%0\n" \ 10 asm volatile(_ASM_MOV_UL " $1f,%0\n" \
11 ".section .tracedata,\"a\"\n" \ 11 ".section .tracedata,\"a\"\n" \
12 "1:\t.word %c1\n\t" \ 12 "1:\t.word %c1\n\t" \
diff --git a/include/asm-x86/seccomp_32.h b/include/asm-x86/seccomp_32.h
index 18da19e89bff..36e71c5f306f 100644
--- a/include/asm-x86/seccomp_32.h
+++ b/include/asm-x86/seccomp_32.h
@@ -1,4 +1,5 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef _ASM_SECCOMP_H
2#define _ASM_SECCOMP_H
2 3
3#include <linux/thread_info.h> 4#include <linux/thread_info.h>
4 5
diff --git a/include/asm-x86/seccomp_64.h b/include/asm-x86/seccomp_64.h
index 553af65a2287..76cfe69aa63c 100644
--- a/include/asm-x86/seccomp_64.h
+++ b/include/asm-x86/seccomp_64.h
@@ -1,4 +1,5 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef _ASM_SECCOMP_H
2#define _ASM_SECCOMP_H
2 3
3#include <linux/thread_info.h> 4#include <linux/thread_info.h>
4 5
diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h
index b49369ad9a61..193578cd1fd9 100644
--- a/include/asm-x86/string_32.h
+++ b/include/asm-x86/string_32.h
@@ -29,81 +29,116 @@ extern char *strchr(const char *s, int c);
29#define __HAVE_ARCH_STRLEN 29#define __HAVE_ARCH_STRLEN
30extern size_t strlen(const char *s); 30extern size_t strlen(const char *s);
31 31
32static __always_inline void * __memcpy(void * to, const void * from, size_t n) 32static __always_inline void *__memcpy(void *to, const void *from, size_t n)
33{ 33{
34int d0, d1, d2; 34 int d0, d1, d2;
35__asm__ __volatile__( 35 asm volatile("rep ; movsl\n\t"
36 "rep ; movsl\n\t" 36 "movl %4,%%ecx\n\t"
37 "movl %4,%%ecx\n\t" 37 "andl $3,%%ecx\n\t"
38 "andl $3,%%ecx\n\t" 38 "jz 1f\n\t"
39 "jz 1f\n\t" 39 "rep ; movsb\n\t"
40 "rep ; movsb\n\t" 40 "1:"
41 "1:" 41 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
42 : "=&c" (d0), "=&D" (d1), "=&S" (d2) 42 : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
43 : "0" (n/4), "g" (n), "1" ((long) to), "2" ((long) from) 43 : "memory");
44 : "memory"); 44 return to;
45return (to);
46} 45}
47 46
48/* 47/*
49 * This looks ugly, but the compiler can optimize it totally, 48 * This looks ugly, but the compiler can optimize it totally,
50 * as the count is constant. 49 * as the count is constant.
51 */ 50 */
52static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n) 51static __always_inline void *__constant_memcpy(void *to, const void *from,
52 size_t n)
53{ 53{
54 long esi, edi; 54 long esi, edi;
55 if (!n) return to; 55 if (!n)
56#if 1 /* want to do small copies with non-string ops? */ 56 return to;
57
57 switch (n) { 58 switch (n) {
58 case 1: *(char*)to = *(char*)from; return to; 59 case 1:
59 case 2: *(short*)to = *(short*)from; return to; 60 *(char *)to = *(char *)from;
60 case 4: *(int*)to = *(int*)from; return to; 61 return to;
61#if 1 /* including those doable with two moves? */ 62 case 2:
62 case 3: *(short*)to = *(short*)from; 63 *(short *)to = *(short *)from;
63 *((char*)to+2) = *((char*)from+2); return to; 64 return to;
64 case 5: *(int*)to = *(int*)from; 65 case 4:
65 *((char*)to+4) = *((char*)from+4); return to; 66 *(int *)to = *(int *)from;
66 case 6: *(int*)to = *(int*)from; 67 return to;
67 *((short*)to+2) = *((short*)from+2); return to; 68
68 case 8: *(int*)to = *(int*)from; 69 case 3:
69 *((int*)to+1) = *((int*)from+1); return to; 70 *(short *)to = *(short *)from;
70#endif 71 *((char *)to + 2) = *((char *)from + 2);
72 return to;
73 case 5:
74 *(int *)to = *(int *)from;
75 *((char *)to + 4) = *((char *)from + 4);
76 return to;
77 case 6:
78 *(int *)to = *(int *)from;
79 *((short *)to + 2) = *((short *)from + 2);
80 return to;
81 case 8:
82 *(int *)to = *(int *)from;
83 *((int *)to + 1) = *((int *)from + 1);
84 return to;
71 } 85 }
72#endif 86
73 esi = (long) from; 87 esi = (long)from;
74 edi = (long) to; 88 edi = (long)to;
75 if (n >= 5*4) { 89 if (n >= 5 * 4) {
76 /* large block: use rep prefix */ 90 /* large block: use rep prefix */
77 int ecx; 91 int ecx;
78 __asm__ __volatile__( 92 asm volatile("rep ; movsl"
79 "rep ; movsl" 93 : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
80 : "=&c" (ecx), "=&D" (edi), "=&S" (esi) 94 : "0" (n / 4), "1" (edi), "2" (esi)
81 : "0" (n/4), "1" (edi),"2" (esi) 95 : "memory"
82 : "memory"
83 ); 96 );
84 } else { 97 } else {
85 /* small block: don't clobber ecx + smaller code */ 98 /* small block: don't clobber ecx + smaller code */
86 if (n >= 4*4) __asm__ __volatile__("movsl" 99 if (n >= 4 * 4)
87 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 100 asm volatile("movsl"
88 if (n >= 3*4) __asm__ __volatile__("movsl" 101 : "=&D"(edi), "=&S"(esi)
89 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 102 : "0"(edi), "1"(esi)
90 if (n >= 2*4) __asm__ __volatile__("movsl" 103 : "memory");
91 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 104 if (n >= 3 * 4)
92 if (n >= 1*4) __asm__ __volatile__("movsl" 105 asm volatile("movsl"
93 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 106 : "=&D"(edi), "=&S"(esi)
107 : "0"(edi), "1"(esi)
108 : "memory");
109 if (n >= 2 * 4)
110 asm volatile("movsl"
111 : "=&D"(edi), "=&S"(esi)
112 : "0"(edi), "1"(esi)
113 : "memory");
114 if (n >= 1 * 4)
115 asm volatile("movsl"
116 : "=&D"(edi), "=&S"(esi)
117 : "0"(edi), "1"(esi)
118 : "memory");
94 } 119 }
95 switch (n % 4) { 120 switch (n % 4) {
96 /* tail */ 121 /* tail */
97 case 0: return to; 122 case 0:
98 case 1: __asm__ __volatile__("movsb" 123 return to;
99 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 124 case 1:
100 return to; 125 asm volatile("movsb"
101 case 2: __asm__ __volatile__("movsw" 126 : "=&D"(edi), "=&S"(esi)
102 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 127 : "0"(edi), "1"(esi)
103 return to; 128 : "memory");
104 default: __asm__ __volatile__("movsw\n\tmovsb" 129 return to;
105 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 130 case 2:
106 return to; 131 asm volatile("movsw"
132 : "=&D"(edi), "=&S"(esi)
133 : "0"(edi), "1"(esi)
134 : "memory");
135 return to;
136 default:
137 asm volatile("movsw\n\tmovsb"
138 : "=&D"(edi), "=&S"(esi)
139 : "0"(edi), "1"(esi)
140 : "memory");
141 return to;
107 } 142 }
108} 143}
109 144
@@ -117,87 +152,86 @@ static __always_inline void * __constant_memcpy(void * to, const void * from, si
117 * This CPU favours 3DNow strongly (eg AMD Athlon) 152 * This CPU favours 3DNow strongly (eg AMD Athlon)
118 */ 153 */
119 154
120static inline void * __constant_memcpy3d(void * to, const void * from, size_t len) 155static inline void *__constant_memcpy3d(void *to, const void *from, size_t len)
121{ 156{
122 if (len < 512) 157 if (len < 512)
123 return __constant_memcpy(to, from, len); 158 return __constant_memcpy(to, from, len);
124 return _mmx_memcpy(to, from, len); 159 return _mmx_memcpy(to, from, len);
125} 160}
126 161
127static __inline__ void *__memcpy3d(void *to, const void *from, size_t len) 162static inline void *__memcpy3d(void *to, const void *from, size_t len)
128{ 163{
129 if (len < 512) 164 if (len < 512)
130 return __memcpy(to, from, len); 165 return __memcpy(to, from, len);
131 return _mmx_memcpy(to, from, len); 166 return _mmx_memcpy(to, from, len);
132} 167}
133 168
134#define memcpy(t, f, n) \ 169#define memcpy(t, f, n) \
135(__builtin_constant_p(n) ? \ 170 (__builtin_constant_p((n)) \
136 __constant_memcpy3d((t),(f),(n)) : \ 171 ? __constant_memcpy3d((t), (f), (n)) \
137 __memcpy3d((t),(f),(n))) 172 : __memcpy3d((t), (f), (n)))
138 173
139#else 174#else
140 175
141/* 176/*
142 * No 3D Now! 177 * No 3D Now!
143 */ 178 */
144 179
145#define memcpy(t, f, n) \ 180#define memcpy(t, f, n) \
146(__builtin_constant_p(n) ? \ 181 (__builtin_constant_p((n)) \
147 __constant_memcpy((t),(f),(n)) : \ 182 ? __constant_memcpy((t), (f), (n)) \
148 __memcpy((t),(f),(n))) 183 : __memcpy((t), (f), (n)))
149 184
150#endif 185#endif
151 186
152#define __HAVE_ARCH_MEMMOVE 187#define __HAVE_ARCH_MEMMOVE
153void *memmove(void * dest,const void * src, size_t n); 188void *memmove(void *dest, const void *src, size_t n);
154 189
155#define memcmp __builtin_memcmp 190#define memcmp __builtin_memcmp
156 191
157#define __HAVE_ARCH_MEMCHR 192#define __HAVE_ARCH_MEMCHR
158extern void *memchr(const void * cs,int c,size_t count); 193extern void *memchr(const void *cs, int c, size_t count);
159 194
160static inline void * __memset_generic(void * s, char c,size_t count) 195static inline void *__memset_generic(void *s, char c, size_t count)
161{ 196{
162int d0, d1; 197 int d0, d1;
163__asm__ __volatile__( 198 asm volatile("rep\n\t"
164 "rep\n\t" 199 "stosb"
165 "stosb" 200 : "=&c" (d0), "=&D" (d1)
166 : "=&c" (d0), "=&D" (d1) 201 : "a" (c), "1" (s), "0" (count)
167 :"a" (c),"1" (s),"0" (count) 202 : "memory");
168 :"memory"); 203 return s;
169return s;
170} 204}
171 205
172/* we might want to write optimized versions of these later */ 206/* we might want to write optimized versions of these later */
173#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count)) 207#define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count))
174 208
175/* 209/*
176 * memset(x,0,y) is a reasonably common thing to do, so we want to fill 210 * memset(x, 0, y) is a reasonably common thing to do, so we want to fill
177 * things 32 bits at a time even when we don't know the size of the 211 * things 32 bits at a time even when we don't know the size of the
178 * area at compile-time.. 212 * area at compile-time..
179 */ 213 */
180static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count) 214static __always_inline
215void *__constant_c_memset(void *s, unsigned long c, size_t count)
181{ 216{
182int d0, d1; 217 int d0, d1;
183__asm__ __volatile__( 218 asm volatile("rep ; stosl\n\t"
184 "rep ; stosl\n\t" 219 "testb $2,%b3\n\t"
185 "testb $2,%b3\n\t" 220 "je 1f\n\t"
186 "je 1f\n\t" 221 "stosw\n"
187 "stosw\n" 222 "1:\ttestb $1,%b3\n\t"
188 "1:\ttestb $1,%b3\n\t" 223 "je 2f\n\t"
189 "je 2f\n\t" 224 "stosb\n"
190 "stosb\n" 225 "2:"
191 "2:" 226 : "=&c" (d0), "=&D" (d1)
192 :"=&c" (d0), "=&D" (d1) 227 : "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
193 :"a" (c), "q" (count), "0" (count/4), "1" ((long) s) 228 : "memory");
194 :"memory"); 229 return s;
195return (s);
196} 230}
197 231
198/* Added by Gertjan van Wingerde to make minix and sysv module work */ 232/* Added by Gertjan van Wingerde to make minix and sysv module work */
199#define __HAVE_ARCH_STRNLEN 233#define __HAVE_ARCH_STRNLEN
200extern size_t strnlen(const char * s, size_t count); 234extern size_t strnlen(const char *s, size_t count);
201/* end of additional stuff */ 235/* end of additional stuff */
202 236
203#define __HAVE_ARCH_STRSTR 237#define __HAVE_ARCH_STRSTR
@@ -207,66 +241,85 @@ extern char *strstr(const char *cs, const char *ct);
207 * This looks horribly ugly, but the compiler can optimize it totally, 241 * This looks horribly ugly, but the compiler can optimize it totally,
208 * as we by now know that both pattern and count is constant.. 242 * as we by now know that both pattern and count is constant..
209 */ 243 */
210static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) 244static __always_inline
245void *__constant_c_and_count_memset(void *s, unsigned long pattern,
246 size_t count)
211{ 247{
212 switch (count) { 248 switch (count) {
249 case 0:
250 return s;
251 case 1:
252 *(unsigned char *)s = pattern & 0xff;
253 return s;
254 case 2:
255 *(unsigned short *)s = pattern & 0xffff;
256 return s;
257 case 3:
258 *(unsigned short *)s = pattern & 0xffff;
259 *((unsigned char *)s + 2) = pattern & 0xff;
260 return s;
261 case 4:
262 *(unsigned long *)s = pattern;
263 return s;
264 }
265
266#define COMMON(x) \
267 asm volatile("rep ; stosl" \
268 x \
269 : "=&c" (d0), "=&D" (d1) \
270 : "a" (eax), "0" (count/4), "1" ((long)s) \
271 : "memory")
272
273 {
274 int d0, d1;
275#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
276 /* Workaround for broken gcc 4.0 */
277 register unsigned long eax asm("%eax") = pattern;
278#else
279 unsigned long eax = pattern;
280#endif
281
282 switch (count % 4) {
213 case 0: 283 case 0:
284 COMMON("");
214 return s; 285 return s;
215 case 1: 286 case 1:
216 *(unsigned char *)s = pattern & 0xff; 287 COMMON("\n\tstosb");
217 return s; 288 return s;
218 case 2: 289 case 2:
219 *(unsigned short *)s = pattern & 0xffff; 290 COMMON("\n\tstosw");
220 return s; 291 return s;
221 case 3: 292 default:
222 *(unsigned short *)s = pattern & 0xffff; 293 COMMON("\n\tstosw\n\tstosb");
223 *(2+(unsigned char *)s) = pattern & 0xff;
224 return s;
225 case 4:
226 *(unsigned long *)s = pattern;
227 return s; 294 return s;
295 }
228 } 296 }
229#define COMMON(x) \ 297
230__asm__ __volatile__( \
231 "rep ; stosl" \
232 x \
233 : "=&c" (d0), "=&D" (d1) \
234 : "a" (pattern),"0" (count/4),"1" ((long) s) \
235 : "memory")
236{
237 int d0, d1;
238 switch (count % 4) {
239 case 0: COMMON(""); return s;
240 case 1: COMMON("\n\tstosb"); return s;
241 case 2: COMMON("\n\tstosw"); return s;
242 default: COMMON("\n\tstosw\n\tstosb"); return s;
243 }
244}
245
246#undef COMMON 298#undef COMMON
247} 299}
248 300
249#define __constant_c_x_memset(s, c, count) \ 301#define __constant_c_x_memset(s, c, count) \
250(__builtin_constant_p(count) ? \ 302 (__builtin_constant_p(count) \
251 __constant_c_and_count_memset((s),(c),(count)) : \ 303 ? __constant_c_and_count_memset((s), (c), (count)) \
252 __constant_c_memset((s),(c),(count))) 304 : __constant_c_memset((s), (c), (count)))
253 305
254#define __memset(s, c, count) \ 306#define __memset(s, c, count) \
255(__builtin_constant_p(count) ? \ 307 (__builtin_constant_p(count) \
256 __constant_count_memset((s),(c),(count)) : \ 308 ? __constant_count_memset((s), (c), (count)) \
257 __memset_generic((s),(c),(count))) 309 : __memset_generic((s), (c), (count)))
258 310
259#define __HAVE_ARCH_MEMSET 311#define __HAVE_ARCH_MEMSET
260#define memset(s, c, count) \ 312#define memset(s, c, count) \
261(__builtin_constant_p(c) ? \ 313 (__builtin_constant_p(c) \
262 __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \ 314 ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
263 __memset((s),(c),(count))) 315 (count)) \
316 : __memset((s), (c), (count)))
264 317
265/* 318/*
266 * find the first occurrence of byte 'c', or 1 past the area if none 319 * find the first occurrence of byte 'c', or 1 past the area if none
267 */ 320 */
268#define __HAVE_ARCH_MEMSCAN 321#define __HAVE_ARCH_MEMSCAN
269extern void *memscan(void * addr, int c, size_t size); 322extern void *memscan(void *addr, int c, size_t size);
270 323
271#endif /* __KERNEL__ */ 324#endif /* __KERNEL__ */
272 325
diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h
index 24e1c080aa8a..8675c6782a7d 100644
--- a/include/asm-x86/suspend_32.h
+++ b/include/asm-x86/suspend_32.h
@@ -3,6 +3,9 @@
3 * Based on code 3 * Based on code
4 * Copyright 2001 Patrick Mochel <mochel@osdl.org> 4 * Copyright 2001 Patrick Mochel <mochel@osdl.org>
5 */ 5 */
6#ifndef __ASM_X86_32_SUSPEND_H
7#define __ASM_X86_32_SUSPEND_H
8
6#include <asm/desc.h> 9#include <asm/desc.h>
7#include <asm/i387.h> 10#include <asm/i387.h>
8 11
@@ -44,3 +47,5 @@ static inline void acpi_save_register_state(unsigned long return_point)
44/* routines for saving/restoring kernel state */ 47/* routines for saving/restoring kernel state */
45extern int acpi_save_state_mem(void); 48extern int acpi_save_state_mem(void);
46#endif 49#endif
50
51#endif /* __ASM_X86_32_SUSPEND_H */
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index a2f04cd79b29..7e4c133795a1 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -289,7 +289,7 @@ static inline void native_wbinvd(void)
289 289
290#endif/* CONFIG_PARAVIRT */ 290#endif/* CONFIG_PARAVIRT */
291 291
292#define stts() write_cr0(8 | read_cr0()) 292#define stts() write_cr0(read_cr0() | X86_CR0_TS)
293 293
294#endif /* __KERNEL__ */ 294#endif /* __KERNEL__ */
295 295
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h
index 77244f17993f..895339d2bc0b 100644
--- a/include/asm-x86/thread_info.h
+++ b/include/asm-x86/thread_info.h
@@ -1,9 +1,253 @@
1/* thread_info.h: low-level thread information
2 *
3 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */
6
1#ifndef _ASM_X86_THREAD_INFO_H 7#ifndef _ASM_X86_THREAD_INFO_H
8#define _ASM_X86_THREAD_INFO_H
9
10#include <linux/compiler.h>
11#include <asm/page.h>
12#include <asm/types.h>
13
14/*
15 * low level task data that entry.S needs immediate access to
16 * - this struct should fit entirely inside of one cache line
17 * - this struct shares the supervisor stack pages
18 */
19#ifndef __ASSEMBLY__
20struct task_struct;
21struct exec_domain;
22#include <asm/processor.h>
23
24struct thread_info {
25 struct task_struct *task; /* main task structure */
26 struct exec_domain *exec_domain; /* execution domain */
27 unsigned long flags; /* low level flags */
28 __u32 status; /* thread synchronous flags */
29 __u32 cpu; /* current CPU */
30 int preempt_count; /* 0 => preemptable,
31 <0 => BUG */
32 mm_segment_t addr_limit;
33 struct restart_block restart_block;
34 void __user *sysenter_return;
35#ifdef CONFIG_X86_32
36 unsigned long previous_esp; /* ESP of the previous stack in
37 case of nested (IRQ) stacks
38 */
39 __u8 supervisor_stack[0];
40#endif
41};
42
43#define INIT_THREAD_INFO(tsk) \
44{ \
45 .task = &tsk, \
46 .exec_domain = &default_exec_domain, \
47 .flags = 0, \
48 .cpu = 0, \
49 .preempt_count = 1, \
50 .addr_limit = KERNEL_DS, \
51 .restart_block = { \
52 .fn = do_no_restart_syscall, \
53 }, \
54}
55
56#define init_thread_info (init_thread_union.thread_info)
57#define init_stack (init_thread_union.stack)
58
59#else /* !__ASSEMBLY__ */
60
61#include <asm/asm-offsets.h>
62
63#endif
64
65/*
66 * thread information flags
67 * - these are process state flags that various assembly files
68 * may need to access
69 * - pending work-to-be-done flags are in LSW
70 * - other flags in MSW
71 * Warning: layout of LSW is hardcoded in entry.S
72 */
73#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
74#define TIF_SIGPENDING 2 /* signal pending */
75#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
76#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
77#define TIF_IRET 5 /* force IRET */
2#ifdef CONFIG_X86_32 78#ifdef CONFIG_X86_32
3# include "thread_info_32.h" 79#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
80#endif
81#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
82#define TIF_SECCOMP 8 /* secure computing */
83#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
84#define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */
85#define TIF_NOTSC 16 /* TSC is not accessible in userland */
86#define TIF_IA32 17 /* 32bit process */
87#define TIF_FORK 18 /* ret_from_fork */
88#define TIF_ABI_PENDING 19
89#define TIF_MEMDIE 20
90#define TIF_DEBUG 21 /* uses debug registers */
91#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
92#define TIF_FREEZE 23 /* is freezing for suspend */
93#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
94#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
95#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
96#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
97
98#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
99#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
100#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
101#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
102#define _TIF_IRET (1 << TIF_IRET)
103#ifdef CONFIG_X86_32
104#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
4#else 105#else
5# include "thread_info_64.h" 106#define _TIF_SYSCALL_EMU 0
6#endif 107#endif
108#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
109#define _TIF_SECCOMP (1 << TIF_SECCOMP)
110#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
111#define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED)
112#define _TIF_NOTSC (1 << TIF_NOTSC)
113#define _TIF_IA32 (1 << TIF_IA32)
114#define _TIF_FORK (1 << TIF_FORK)
115#define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING)
116#define _TIF_DEBUG (1 << TIF_DEBUG)
117#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
118#define _TIF_FREEZE (1 << TIF_FREEZE)
119#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
120#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
121#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
122#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
123
124/* work to do on interrupt/exception return */
125#define _TIF_WORK_MASK \
126 (0x0000FFFF & \
127 ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP| \
128 _TIF_SECCOMP|_TIF_SYSCALL_EMU))
129
130/* work to do on any return to user space */
131#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
132
133/* Only used for 64 bit */
134#define _TIF_DO_NOTIFY_MASK \
135 (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED)
136
137/* flags to check in __switch_to() */
138#define _TIF_WORK_CTXSW \
139 (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS| \
140 _TIF_NOTSC)
141
142#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
143#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
144
145#define PREEMPT_ACTIVE 0x10000000
146
147/* thread information allocation */
148#ifdef CONFIG_DEBUG_STACK_USAGE
149#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
150#else
151#define THREAD_FLAGS GFP_KERNEL
152#endif
153
154#define alloc_thread_info(tsk) \
155 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
156
157#ifdef CONFIG_X86_32
158
159#define STACK_WARN (THREAD_SIZE/8)
160/*
161 * macros/functions for gaining access to the thread information structure
162 *
163 * preempt_count needs to be 1 initially, until the scheduler is functional.
164 */
165#ifndef __ASSEMBLY__
166
167
168/* how to get the current stack pointer from C */
169register unsigned long current_stack_pointer asm("esp") __used;
170
171/* how to get the thread information struct from C */
172static inline struct thread_info *current_thread_info(void)
173{
174 return (struct thread_info *)
175 (current_stack_pointer & ~(THREAD_SIZE - 1));
176}
177
178#else /* !__ASSEMBLY__ */
179
180/* how to get the thread information struct from ASM */
181#define GET_THREAD_INFO(reg) \
182 movl $-THREAD_SIZE, reg; \
183 andl %esp, reg
184
185/* use this one if reg already contains %esp */
186#define GET_THREAD_INFO_WITH_ESP(reg) \
187 andl $-THREAD_SIZE, reg
188
189#endif
190
191#else /* X86_32 */
192
193#include <asm/pda.h>
194
195/*
196 * macros/functions for gaining access to the thread information structure
197 * preempt_count needs to be 1 initially, until the scheduler is functional.
198 */
199#ifndef __ASSEMBLY__
200static inline struct thread_info *current_thread_info(void)
201{
202 struct thread_info *ti;
203 ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
204 return ti;
205}
206
207/* do not use in interrupt context */
208static inline struct thread_info *stack_thread_info(void)
209{
210 struct thread_info *ti;
211 asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
212 return ti;
213}
214
215#else /* !__ASSEMBLY__ */
216
217/* how to get the thread information struct from ASM */
218#define GET_THREAD_INFO(reg) \
219 movq %gs:pda_kernelstack,reg ; \
220 subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
221
222#endif
223
224#endif /* !X86_32 */
225
226/*
227 * Thread-synchronous status.
228 *
229 * This is different from the flags in that nobody else
230 * ever touches our thread-synchronous status, so we don't
231 * have to worry about atomic accesses.
232 */
233#define TS_USEDFPU 0x0001 /* FPU was used by this task
234 this quantum (SMP) */
235#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
236#define TS_POLLING 0x0004 /* true if in idle loop
237 and not sleeping */
238#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
239
240#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
241
242#ifndef __ASSEMBLY__
243#define HAVE_SET_RESTORE_SIGMASK 1
244static inline void set_restore_sigmask(void)
245{
246 struct thread_info *ti = current_thread_info();
247 ti->status |= TS_RESTORE_SIGMASK;
248 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
249}
250#endif /* !__ASSEMBLY__ */
7 251
8#ifndef __ASSEMBLY__ 252#ifndef __ASSEMBLY__
9extern void arch_task_cache_init(void); 253extern void arch_task_cache_init(void);
diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h
deleted file mode 100644
index b6338829d1a8..000000000000
--- a/include/asm-x86/thread_info_32.h
+++ /dev/null
@@ -1,205 +0,0 @@
1/* thread_info.h: i386 low-level thread information
2 *
3 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */
6
7#ifndef _ASM_THREAD_INFO_H
8#define _ASM_THREAD_INFO_H
9
10#ifdef __KERNEL__
11
12#include <linux/compiler.h>
13#include <asm/page.h>
14
15#ifndef __ASSEMBLY__
16#include <asm/processor.h>
17#endif
18
19/*
20 * low level task data that entry.S needs immediate access to
21 * - this struct should fit entirely inside of one cache line
22 * - this struct shares the supervisor stack pages
23 * - if the contents of this structure are changed,
24 * the assembly constants must also be changed
25 */
26#ifndef __ASSEMBLY__
27
28struct thread_info {
29 struct task_struct *task; /* main task structure */
30 struct exec_domain *exec_domain; /* execution domain */
31 unsigned long flags; /* low level flags */
32 unsigned long status; /* thread-synchronous flags */
33 __u32 cpu; /* current CPU */
34 int preempt_count; /* 0 => preemptable,
35 <0 => BUG */
36 mm_segment_t addr_limit; /* thread address space:
37 0-0xBFFFFFFF user-thread
38 0-0xFFFFFFFF kernel-thread
39 */
40 void *sysenter_return;
41 struct restart_block restart_block;
42 unsigned long previous_esp; /* ESP of the previous stack in
43 case of nested (IRQ) stacks
44 */
45 __u8 supervisor_stack[0];
46};
47
48#else /* !__ASSEMBLY__ */
49
50#include <asm/asm-offsets.h>
51
52#endif
53
54#define PREEMPT_ACTIVE 0x10000000
55#ifdef CONFIG_4KSTACKS
56#define THREAD_SIZE (4096)
57#else
58#define THREAD_SIZE (8192)
59#endif
60
61#define STACK_WARN (THREAD_SIZE/8)
62/*
63 * macros/functions for gaining access to the thread information structure
64 *
65 * preempt_count needs to be 1 initially, until the scheduler is functional.
66 */
67#ifndef __ASSEMBLY__
68
69#define INIT_THREAD_INFO(tsk) \
70{ \
71 .task = &tsk, \
72 .exec_domain = &default_exec_domain, \
73 .flags = 0, \
74 .cpu = 0, \
75 .preempt_count = 1, \
76 .addr_limit = KERNEL_DS, \
77 .restart_block = { \
78 .fn = do_no_restart_syscall, \
79 }, \
80}
81
82#define init_thread_info (init_thread_union.thread_info)
83#define init_stack (init_thread_union.stack)
84
85
86/* how to get the current stack pointer from C */
87register unsigned long current_stack_pointer asm("esp") __used;
88
89/* how to get the thread information struct from C */
90static inline struct thread_info *current_thread_info(void)
91{
92 return (struct thread_info *)
93 (current_stack_pointer & ~(THREAD_SIZE - 1));
94}
95
96/* thread information allocation */
97#ifdef CONFIG_DEBUG_STACK_USAGE
98#define alloc_thread_info(tsk) ((struct thread_info *) \
99 __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(THREAD_SIZE)))
100#else
101#define alloc_thread_info(tsk) ((struct thread_info *) \
102 __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE)))
103#endif
104
105#else /* !__ASSEMBLY__ */
106
107/* how to get the thread information struct from ASM */
108#define GET_THREAD_INFO(reg) \
109 movl $-THREAD_SIZE, reg; \
110 andl %esp, reg
111
112/* use this one if reg already contains %esp */
113#define GET_THREAD_INFO_WITH_ESP(reg) \
114 andl $-THREAD_SIZE, reg
115
116#endif
117
118/*
119 * thread information flags
120 * - these are process state flags that various
121 * assembly files may need to access
122 * - pending work-to-be-done flags are in LSW
123 * - other flags in MSW
124 */
125#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
126#define TIF_SIGPENDING 1 /* signal pending */
127#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
128#define TIF_SINGLESTEP 3 /* restore singlestep on return to
129 user mode */
130#define TIF_IRET 4 /* return with iret */
131#define TIF_SYSCALL_EMU 5 /* syscall emulation active */
132#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
133#define TIF_SECCOMP 7 /* secure computing */
134#define TIF_HRTICK_RESCHED 9 /* reprogram hrtick timer */
135#define TIF_MEMDIE 16
136#define TIF_DEBUG 17 /* uses debug registers */
137#define TIF_IO_BITMAP 18 /* uses I/O bitmap */
138#define TIF_FREEZE 19 /* is freezing for suspend */
139#define TIF_NOTSC 20 /* TSC is not accessible in userland */
140#define TIF_FORCED_TF 21 /* true if TF in eflags artificially */
141#define TIF_DEBUGCTLMSR 22 /* uses thread_struct.debugctlmsr */
142#define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */
143#define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */
144
145#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
146#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
147#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
148#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
149#define _TIF_IRET (1 << TIF_IRET)
150#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
151#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
152#define _TIF_SECCOMP (1 << TIF_SECCOMP)
153#define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED)
154#define _TIF_DEBUG (1 << TIF_DEBUG)
155#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
156#define _TIF_FREEZE (1 << TIF_FREEZE)
157#define _TIF_NOTSC (1 << TIF_NOTSC)
158#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
159#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
160#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
161#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
162
163/* work to do on interrupt/exception return */
164#define _TIF_WORK_MASK \
165 (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
166 _TIF_SECCOMP | _TIF_SYSCALL_EMU))
167/* work to do on any return to u-space */
168#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
169
170/* flags to check in __switch_to() */
171#define _TIF_WORK_CTXSW \
172 (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \
173 _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS)
174#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
175#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG)
176
177
178/*
179 * Thread-synchronous status.
180 *
181 * This is different from the flags in that nobody else
182 * ever touches our thread-synchronous status, so we don't
183 * have to worry about atomic accesses.
184 */
185#define TS_USEDFPU 0x0001 /* FPU was used by this task
186 this quantum (SMP) */
187#define TS_POLLING 0x0002 /* True if in idle loop
188 and not sleeping */
189#define TS_RESTORE_SIGMASK 0x0004 /* restore signal mask in do_signal() */
190
191#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
192
193#ifndef __ASSEMBLY__
194#define HAVE_SET_RESTORE_SIGMASK 1
195static inline void set_restore_sigmask(void)
196{
197 struct thread_info *ti = current_thread_info();
198 ti->status |= TS_RESTORE_SIGMASK;
199 set_bit(TIF_SIGPENDING, &ti->flags);
200}
201#endif /* !__ASSEMBLY__ */
202
203#endif /* __KERNEL__ */
204
205#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h
deleted file mode 100644
index cb69f70abba1..000000000000
--- a/include/asm-x86/thread_info_64.h
+++ /dev/null
@@ -1,195 +0,0 @@
1/* thread_info.h: x86_64 low-level thread information
2 *
3 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */
6
7#ifndef _ASM_THREAD_INFO_H
8#define _ASM_THREAD_INFO_H
9
10#ifdef __KERNEL__
11
12#include <asm/page.h>
13#include <asm/types.h>
14#include <asm/pda.h>
15
16/*
17 * low level task data that entry.S needs immediate access to
18 * - this struct should fit entirely inside of one cache line
19 * - this struct shares the supervisor stack pages
20 */
21#ifndef __ASSEMBLY__
22struct task_struct;
23struct exec_domain;
24#include <asm/processor.h>
25
26struct thread_info {
27 struct task_struct *task; /* main task structure */
28 struct exec_domain *exec_domain; /* execution domain */
29 __u32 flags; /* low level flags */
30 __u32 status; /* thread synchronous flags */
31 __u32 cpu; /* current CPU */
32 int preempt_count; /* 0 => preemptable,
33 <0 => BUG */
34 mm_segment_t addr_limit;
35 struct restart_block restart_block;
36#ifdef CONFIG_IA32_EMULATION
37 void __user *sysenter_return;
38#endif
39};
40#endif
41
42/*
43 * macros/functions for gaining access to the thread information structure
44 * preempt_count needs to be 1 initially, until the scheduler is functional.
45 */
46#ifndef __ASSEMBLY__
47#define INIT_THREAD_INFO(tsk) \
48{ \
49 .task = &tsk, \
50 .exec_domain = &default_exec_domain, \
51 .flags = 0, \
52 .cpu = 0, \
53 .preempt_count = 1, \
54 .addr_limit = KERNEL_DS, \
55 .restart_block = { \
56 .fn = do_no_restart_syscall, \
57 }, \
58}
59
60#define init_thread_info (init_thread_union.thread_info)
61#define init_stack (init_thread_union.stack)
62
63static inline struct thread_info *current_thread_info(void)
64{
65 struct thread_info *ti;
66 ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
67 return ti;
68}
69
70/* do not use in interrupt context */
71static inline struct thread_info *stack_thread_info(void)
72{
73 struct thread_info *ti;
74 asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
75 return ti;
76}
77
78/* thread information allocation */
79#ifdef CONFIG_DEBUG_STACK_USAGE
80#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
81#else
82#define THREAD_FLAGS GFP_KERNEL
83#endif
84
85#define alloc_thread_info(tsk) \
86 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
87
88#else /* !__ASSEMBLY__ */
89
90/* how to get the thread information struct from ASM */
91#define GET_THREAD_INFO(reg) \
92 movq %gs:pda_kernelstack,reg ; \
93 subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
94
95#endif
96
97/*
98 * thread information flags
99 * - these are process state flags that various assembly files
100 * may need to access
101 * - pending work-to-be-done flags are in LSW
102 * - other flags in MSW
103 * Warning: layout of LSW is hardcoded in entry.S
104 */
105#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
106#define TIF_SIGPENDING 2 /* signal pending */
107#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
108#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
109#define TIF_IRET 5 /* force IRET */
110#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
111#define TIF_SECCOMP 8 /* secure computing */
112#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
113#define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */
114/* 16 free */
115#define TIF_IA32 17 /* 32bit process */
116#define TIF_FORK 18 /* ret_from_fork */
117#define TIF_ABI_PENDING 19
118#define TIF_MEMDIE 20
119#define TIF_DEBUG 21 /* uses debug registers */
120#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
121#define TIF_FREEZE 23 /* is freezing for suspend */
122#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
123#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
124#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
125#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
126#define TIF_NOTSC 28 /* TSC is not accessible in userland */
127
128#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
129#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
130#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
131#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
132#define _TIF_IRET (1 << TIF_IRET)
133#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
134#define _TIF_SECCOMP (1 << TIF_SECCOMP)
135#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
136#define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED)
137#define _TIF_IA32 (1 << TIF_IA32)
138#define _TIF_FORK (1 << TIF_FORK)
139#define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING)
140#define _TIF_DEBUG (1 << TIF_DEBUG)
141#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
142#define _TIF_FREEZE (1 << TIF_FREEZE)
143#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
144#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
145#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
146#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
147#define _TIF_NOTSC (1 << TIF_NOTSC)
148
149/* work to do on interrupt/exception return */
150#define _TIF_WORK_MASK \
151 (0x0000FFFF & \
152 ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP))
153/* work to do on any return to user space */
154#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
155
156#define _TIF_DO_NOTIFY_MASK \
157 (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED)
158
159/* flags to check in __switch_to() */
160#define _TIF_WORK_CTXSW \
161 (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS|_TIF_NOTSC)
162#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
163#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
164
165#define PREEMPT_ACTIVE 0x10000000
166
167/*
168 * Thread-synchronous status.
169 *
170 * This is different from the flags in that nobody else
171 * ever touches our thread-synchronous status, so we don't
172 * have to worry about atomic accesses.
173 */
174#define TS_USEDFPU 0x0001 /* FPU was used by this task
175 this quantum (SMP) */
176#define TS_COMPAT 0x0002 /* 32bit syscall active */
177#define TS_POLLING 0x0004 /* true if in idle loop
178 and not sleeping */
179#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
180
181#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
182
183#ifndef __ASSEMBLY__
184#define HAVE_SET_RESTORE_SIGMASK 1
185static inline void set_restore_sigmask(void)
186{
187 struct thread_info *ti = current_thread_info();
188 ti->status |= TS_RESTORE_SIGMASK;
189 set_bit(TIF_SIGPENDING, &ti->flags);
190}
191#endif /* !__ASSEMBLY__ */
192
193#endif /* __KERNEL__ */
194
195#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h
index 0c0674d94255..35c76ceb9f40 100644
--- a/include/asm-x86/tlbflush.h
+++ b/include/asm-x86/tlbflush.h
@@ -22,12 +22,23 @@ static inline void __native_flush_tlb(void)
22 22
23static inline void __native_flush_tlb_global(void) 23static inline void __native_flush_tlb_global(void)
24{ 24{
25 unsigned long cr4 = read_cr4(); 25 unsigned long flags;
26 unsigned long cr4;
26 27
28 /*
29 * Read-modify-write to CR4 - protect it from preemption and
30 * from interrupts. (Use the raw variant because this code can
31 * be called from deep inside debugging code.)
32 */
33 raw_local_irq_save(flags);
34
35 cr4 = read_cr4();
27 /* clear PGE */ 36 /* clear PGE */
28 write_cr4(cr4 & ~X86_CR4_PGE); 37 write_cr4(cr4 & ~X86_CR4_PGE);
29 /* write old PGE again and flush TLBs */ 38 /* write old PGE again and flush TLBs */
30 write_cr4(cr4); 39 write_cr4(cr4);
40
41 raw_local_irq_restore(flags);
31} 42}
32 43
33static inline void __native_flush_tlb_single(unsigned long addr) 44static inline void __native_flush_tlb_single(unsigned long addr)
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h
index fe26e36d0f51..9c1a4a3470d9 100644
--- a/include/asm-x86/unistd_64.h
+++ b/include/asm-x86/unistd_64.h
@@ -290,7 +290,7 @@ __SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait)
290#define __NR_rt_sigqueueinfo 129 290#define __NR_rt_sigqueueinfo 129
291__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo) 291__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo)
292#define __NR_rt_sigsuspend 130 292#define __NR_rt_sigsuspend 130
293__SYSCALL(__NR_rt_sigsuspend, stub_rt_sigsuspend) 293__SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend)
294#define __NR_sigaltstack 131 294#define __NR_sigaltstack 131
295__SYSCALL(__NR_sigaltstack, stub_sigaltstack) 295__SYSCALL(__NR_sigaltstack, stub_sigaltstack)
296#define __NR_utime 132 296#define __NR_utime 132
diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h
index 074b357146df..5ce351325e01 100644
--- a/include/asm-x86/vm86.h
+++ b/include/asm-x86/vm86.h
@@ -14,12 +14,6 @@
14 14
15#include <asm/processor-flags.h> 15#include <asm/processor-flags.h>
16 16
17#ifdef CONFIG_VM86
18#define X86_VM_MASK X86_EFLAGS_VM
19#else
20#define X86_VM_MASK 0 /* No VM86 support */
21#endif
22
23#define BIOSSEG 0x0f000 17#define BIOSSEG 0x0f000
24 18
25#define CPU_086 0 19#define CPU_086 0
@@ -121,7 +115,6 @@ struct vm86plus_info_struct {
121 unsigned long is_vm86pus:1; /* for vm86 internal use */ 115 unsigned long is_vm86pus:1; /* for vm86 internal use */
122 unsigned char vm86dbg_intxxtab[32]; /* for debugger */ 116 unsigned char vm86dbg_intxxtab[32]; /* for debugger */
123}; 117};
124
125struct vm86plus_struct { 118struct vm86plus_struct {
126 struct vm86_regs regs; 119 struct vm86_regs regs;
127 unsigned long flags; 120 unsigned long flags;
@@ -133,6 +126,9 @@ struct vm86plus_struct {
133}; 126};
134 127
135#ifdef __KERNEL__ 128#ifdef __KERNEL__
129
130#include <asm/ptrace.h>
131
136/* 132/*
137 * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86 133 * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
138 * mode - the main change is that the old segment descriptors aren't 134 * mode - the main change is that the old segment descriptors aren't
@@ -141,7 +137,6 @@ struct vm86plus_struct {
141 * at the end of the structure. Look at ptrace.h to see the "normal" 137 * at the end of the structure. Look at ptrace.h to see the "normal"
142 * setup. For user space layout see 'struct vm86_regs' above. 138 * setup. For user space layout see 'struct vm86_regs' above.
143 */ 139 */
144#include <asm/ptrace.h>
145 140
146struct kernel_vm86_regs { 141struct kernel_vm86_regs {
147/* 142/*
diff --git a/include/asm-x86/xen/hypercall.h b/include/asm-x86/xen/hypercall.h
index c2ccd997ed35..2a4f9b41d684 100644
--- a/include/asm-x86/xen/hypercall.h
+++ b/include/asm-x86/xen/hypercall.h
@@ -176,9 +176,9 @@ HYPERVISOR_fpu_taskswitch(int set)
176} 176}
177 177
178static inline int 178static inline int
179HYPERVISOR_sched_op(int cmd, unsigned long arg) 179HYPERVISOR_sched_op(int cmd, void *arg)
180{ 180{
181 return _hypercall2(int, sched_op, cmd, arg); 181 return _hypercall2(int, sched_op_new, cmd, arg);
182} 182}
183 183
184static inline long 184static inline long
@@ -315,6 +315,13 @@ HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
315} 315}
316 316
317static inline void 317static inline void
318MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
319{
320 mcl->op = __HYPERVISOR_fpu_taskswitch;
321 mcl->args[0] = set;
322}
323
324static inline void
318MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, 325MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
319 pte_t new_val, unsigned long flags) 326 pte_t new_val, unsigned long flags)
320{ 327{
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h
index baf3a4dce28c..377c04591c15 100644
--- a/include/asm-x86/xen/page.h
+++ b/include/asm-x86/xen/page.h
@@ -26,15 +26,20 @@ typedef struct xpaddr {
26#define FOREIGN_FRAME_BIT (1UL<<31) 26#define FOREIGN_FRAME_BIT (1UL<<31)
27#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) 27#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
28 28
29extern unsigned long *phys_to_machine_mapping; 29/* Maximum amount of memory we can handle in a domain in pages */
30#define MAX_DOMAIN_PAGES \
31 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
32
33
34extern unsigned long get_phys_to_machine(unsigned long pfn);
35extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn);
30 36
31static inline unsigned long pfn_to_mfn(unsigned long pfn) 37static inline unsigned long pfn_to_mfn(unsigned long pfn)
32{ 38{
33 if (xen_feature(XENFEAT_auto_translated_physmap)) 39 if (xen_feature(XENFEAT_auto_translated_physmap))
34 return pfn; 40 return pfn;
35 41
36 return phys_to_machine_mapping[(unsigned int)(pfn)] & 42 return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT;
37 ~FOREIGN_FRAME_BIT;
38} 43}
39 44
40static inline int phys_to_machine_mapping_valid(unsigned long pfn) 45static inline int phys_to_machine_mapping_valid(unsigned long pfn)
@@ -42,7 +47,7 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
42 if (xen_feature(XENFEAT_auto_translated_physmap)) 47 if (xen_feature(XENFEAT_auto_translated_physmap))
43 return 1; 48 return 1;
44 49
45 return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); 50 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
46} 51}
47 52
48static inline unsigned long mfn_to_pfn(unsigned long mfn) 53static inline unsigned long mfn_to_pfn(unsigned long mfn)
@@ -106,20 +111,12 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
106 unsigned long pfn = mfn_to_pfn(mfn); 111 unsigned long pfn = mfn_to_pfn(mfn);
107 if ((pfn < max_mapnr) 112 if ((pfn < max_mapnr)
108 && !xen_feature(XENFEAT_auto_translated_physmap) 113 && !xen_feature(XENFEAT_auto_translated_physmap)
109 && (phys_to_machine_mapping[pfn] != mfn)) 114 && (get_phys_to_machine(pfn) != mfn))
110 return max_mapnr; /* force !pfn_valid() */ 115 return max_mapnr; /* force !pfn_valid() */
116 /* XXX fixme; not true with sparsemem */
111 return pfn; 117 return pfn;
112} 118}
113 119
114static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
115{
116 if (xen_feature(XENFEAT_auto_translated_physmap)) {
117 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
118 return;
119 }
120 phys_to_machine_mapping[pfn] = mfn;
121}
122
123/* VIRT <-> MACHINE conversion */ 120/* VIRT <-> MACHINE conversion */
124#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) 121#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
125#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v)))) 122#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v))))
@@ -150,13 +147,9 @@ static inline pte_t __pte_ma(pteval_t x)
150 return (pte_t) { .pte = x }; 147 return (pte_t) { .pte = x };
151} 148}
152 149
153#ifdef CONFIG_X86_PAE
154#define pmd_val_ma(v) ((v).pmd) 150#define pmd_val_ma(v) ((v).pmd)
155#define pud_val_ma(v) ((v).pgd.pgd) 151#define pud_val_ma(v) ((v).pgd.pgd)
156#define __pmd_ma(x) ((pmd_t) { (x) } ) 152#define __pmd_ma(x) ((pmd_t) { (x) } )
157#else /* !X86_PAE */
158#define pmd_val_ma(v) ((v).pud.pgd.pgd)
159#endif /* CONFIG_X86_PAE */
160 153
161#define pgd_val_ma(x) ((x).pgd) 154#define pgd_val_ma(x) ((x).pgd)
162 155
diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h
index 067b5c1835a3..921b45840449 100644
--- a/include/asm-x86/xor_32.h
+++ b/include/asm-x86/xor_32.h
@@ -1,3 +1,6 @@
1#ifndef ASM_X86__XOR_32_H
2#define ASM_X86__XOR_32_H
3
1/* 4/*
2 * Optimized RAID-5 checksumming functions for MMX and SSE. 5 * Optimized RAID-5 checksumming functions for MMX and SSE.
3 * 6 *
@@ -881,3 +884,5 @@ do { \
881 deals with a load to a line that is being prefetched. */ 884 deals with a load to a line that is being prefetched. */
882#define XOR_SELECT_TEMPLATE(FASTEST) \ 885#define XOR_SELECT_TEMPLATE(FASTEST) \
883 (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) 886 (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
887
888#endif /* ASM_X86__XOR_32_H */
diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h
index 24957e39ac8a..2d3a18de295b 100644
--- a/include/asm-x86/xor_64.h
+++ b/include/asm-x86/xor_64.h
@@ -1,3 +1,6 @@
1#ifndef ASM_X86__XOR_64_H
2#define ASM_X86__XOR_64_H
3
1/* 4/*
2 * Optimized RAID-5 checksumming functions for MMX and SSE. 5 * Optimized RAID-5 checksumming functions for MMX and SSE.
3 * 6 *
@@ -354,3 +357,5 @@ do { \
354 We may also be able to load into the L1 only depending on how the cpu 357 We may also be able to load into the L1 only depending on how the cpu
355 deals with a load to a line that is being prefetched. */ 358 deals with a load to a line that is being prefetched. */
356#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse) 359#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
360
361#endif /* ASM_X86__XOR_64_H */