aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 18:58:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 18:58:21 -0400
commit0195c00244dc2e9f522475868fa278c473ba7339 (patch)
treef97ca98ae64ede2c33ad3de05ed7bbfa4f4495ed /arch/x86
parentf21ce8f8447c8be8847dadcfdbcc76b0d7365fa5 (diff)
parent141124c02059eee9dbc5c86ea797b1ca888e77f7 (diff)
Merge tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system
Pull "Disintegrate and delete asm/system.h" from David Howells: "Here are a bunch of patches to disintegrate asm/system.h into a set of separate bits to relieve the problem of circular inclusion dependencies. I've built all the working defconfigs from all the arches that I can and made sure that they don't break. The reason for these patches is that I recently encountered a circular dependency problem that came about when I produced some patches to optimise get_order() by rewriting it to use ilog2(). This uses bitops - and on the SH arch asm/bitops.h drags in asm-generic/get_order.h by a circuituous route involving asm/system.h. The main difficulty seems to be asm/system.h. It holds a number of low level bits with no/few dependencies that are commonly used (eg. memory barriers) and a number of bits with more dependencies that aren't used in many places (eg. switch_to()). These patches break asm/system.h up into the following core pieces: (1) asm/barrier.h Move memory barriers here. This already done for MIPS and Alpha. (2) asm/switch_to.h Move switch_to() and related stuff here. (3) asm/exec.h Move arch_align_stack() here. Other process execution related bits could perhaps go here from asm/processor.h. (4) asm/cmpxchg.h Move xchg() and cmpxchg() here as they're full word atomic ops and frequently used by atomic_xchg() and atomic_cmpxchg(). (5) asm/bug.h Move die() and related bits. (6) asm/auxvec.h Move AT_VECTOR_SIZE_ARCH here. Other arch headers are created as needed on a per-arch basis." Fixed up some conflicts from other header file cleanups and moving code around that has happened in the meantime, so David's testing is somewhat weakened by that. We'll find out anything that got broken and fix it.. * tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system: (38 commits) Delete all instances of asm/system.h Remove all #inclusions of asm/system.h Add #includes needed to permit the removal of asm/system.h Move all declarations of free_initmem() to linux/mm.h Disintegrate asm/system.h for OpenRISC Split arch_align_stack() out from asm-generic/system.h Split the switch_to() wrapper out of asm-generic/system.h Move the asm-generic/system.h xchg() implementation to asm-generic/cmpxchg.h Create asm-generic/barrier.h Make asm-generic/cmpxchg.h #include asm-generic/cmpxchg-local.h Disintegrate asm/system.h for Xtensa Disintegrate asm/system.h for Unicore32 [based on ver #3, changed by gxt] Disintegrate asm/system.h for Tile Disintegrate asm/system.h for Sparc Disintegrate asm/system.h for SH Disintegrate asm/system.h for Score Disintegrate asm/system.h for S390 Disintegrate asm/system.h for PowerPC Disintegrate asm/system.h for PA-RISC Disintegrate asm/system.h for MN10300 ...
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/ia32/ia32_aout.c1
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/auxvec.h7
-rw-r--r--arch/x86/include/asm/barrier.h116
-rw-r--r--arch/x86/include/asm/bug.h4
-rw-r--r--arch/x86/include/asm/cacheflush.h1
-rw-r--r--arch/x86/include/asm/elf.h1
-rw-r--r--arch/x86/include/asm/exec.h1
-rw-r--r--arch/x86/include/asm/futex.h1
-rw-r--r--arch/x86/include/asm/i387.h1
-rw-r--r--arch/x86/include/asm/local.h1
-rw-r--r--arch/x86/include/asm/mc146818rtc.h1
-rw-r--r--arch/x86/include/asm/page_types.h1
-rw-r--r--arch/x86/include/asm/processor.h31
-rw-r--r--arch/x86/include/asm/segment.h58
-rw-r--r--arch/x86/include/asm/special_insns.h199
-rw-r--r--arch/x86/include/asm/stackprotector.h1
-rw-r--r--arch/x86/include/asm/switch_to.h129
-rw-r--r--arch/x86/include/asm/system.h523
-rw-r--r--arch/x86/include/asm/tlbflush.h2
-rw-r--r--arch/x86/include/asm/virtext.h1
-rw-r--r--arch/x86/kernel/acpi/cstate.c1
-rw-r--r--arch/x86/kernel/apm_32.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/p5.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/winchip.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c1
-rw-r--r--arch/x86/kernel/cpuid.c1
-rw-r--r--arch/x86/kernel/i8259.c1
-rw-r--r--arch/x86/kernel/irqinit.c1
-rw-r--r--arch/x86/kernel/kgdb.c1
-rw-r--r--arch/x86/kernel/ldt.c1
-rw-r--r--arch/x86/kernel/machine_kexec_32.c1
-rw-r--r--arch/x86/kernel/mca_32.c1
-rw-r--r--arch/x86/kernel/module.c1
-rw-r--r--arch/x86/kernel/msr.c1
-rw-r--r--arch/x86/kernel/paravirt.c1
-rw-r--r--arch/x86/kernel/pci-calgary_64.c1
-rw-r--r--arch/x86/kernel/process.c1
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/ptrace.c1
-rw-r--r--arch/x86/kernel/setup.c1
-rw-r--r--arch/x86/kernel/tce_64.c1
-rw-r--r--arch/x86/kernel/tls.c1
-rw-r--r--arch/x86/kernel/traps.c1
-rw-r--r--arch/x86/mm/init.c1
-rw-r--r--arch/x86/mm/init_32.c1
-rw-r--r--arch/x86/mm/init_64.c1
-rw-r--r--arch/x86/mm/pgtable_32.c1
-rw-r--r--arch/x86/power/hibernate_32.c1
51 files changed, 549 insertions, 564 deletions
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 4c2e59a420b9..d511d951a052 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -26,7 +26,6 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28 28
29#include <asm/system.h>
30#include <asm/uaccess.h> 29#include <asm/uaccess.h>
31#include <asm/pgalloc.h> 30#include <asm/pgalloc.h>
32#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index a9371c91718c..4b2caeefe1a2 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -11,7 +11,6 @@
11#include <linux/atomic.h> 11#include <linux/atomic.h>
12#include <asm/fixmap.h> 12#include <asm/fixmap.h>
13#include <asm/mpspec.h> 13#include <asm/mpspec.h>
14#include <asm/system.h>
15#include <asm/msr.h> 14#include <asm/msr.h>
16 15
17#define ARCH_APICTIMER_STOPS_ON_C3 1 16#define ARCH_APICTIMER_STOPS_ON_C3 1
diff --git a/arch/x86/include/asm/auxvec.h b/arch/x86/include/asm/auxvec.h
index 1316b4c35425..77203ac352de 100644
--- a/arch/x86/include/asm/auxvec.h
+++ b/arch/x86/include/asm/auxvec.h
@@ -9,4 +9,11 @@
9#endif 9#endif
10#define AT_SYSINFO_EHDR 33 10#define AT_SYSINFO_EHDR 33
11 11
12/* entries in ARCH_DLINFO: */
13#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
14# define AT_VECTOR_SIZE_ARCH 2
15#else /* else it's non-compat x86-64 */
16# define AT_VECTOR_SIZE_ARCH 1
17#endif
18
12#endif /* _ASM_X86_AUXVEC_H */ 19#endif /* _ASM_X86_AUXVEC_H */
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
new file mode 100644
index 000000000000..c6cd358a1eec
--- /dev/null
+++ b/arch/x86/include/asm/barrier.h
@@ -0,0 +1,116 @@
1#ifndef _ASM_X86_BARRIER_H
2#define _ASM_X86_BARRIER_H
3
4#include <asm/alternative.h>
5#include <asm/nops.h>
6
7/*
8 * Force strict CPU ordering.
9 * And yes, this is required on UP too when we're talking
10 * to devices.
11 */
12
13#ifdef CONFIG_X86_32
14/*
15 * Some non-Intel clones support out of order store. wmb() ceases to be a
16 * nop for these.
17 */
18#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
19#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
20#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
21#else
22#define mb() asm volatile("mfence":::"memory")
23#define rmb() asm volatile("lfence":::"memory")
24#define wmb() asm volatile("sfence" ::: "memory")
25#endif
26
27/**
28 * read_barrier_depends - Flush all pending reads that subsequents reads
29 * depend on.
30 *
31 * No data-dependent reads from memory-like regions are ever reordered
32 * over this barrier. All reads preceding this primitive are guaranteed
33 * to access memory (but not necessarily other CPUs' caches) before any
34 * reads following this primitive that depend on the data return by
35 * any of the preceding reads. This primitive is much lighter weight than
36 * rmb() on most CPUs, and is never heavier weight than is
37 * rmb().
38 *
39 * These ordering constraints are respected by both the local CPU
40 * and the compiler.
41 *
42 * Ordering is not guaranteed by anything other than these primitives,
43 * not even by data dependencies. See the documentation for
44 * memory_barrier() for examples and URLs to more information.
45 *
46 * For example, the following code would force ordering (the initial
47 * value of "a" is zero, "b" is one, and "p" is "&a"):
48 *
49 * <programlisting>
50 * CPU 0 CPU 1
51 *
52 * b = 2;
53 * memory_barrier();
54 * p = &b; q = p;
55 * read_barrier_depends();
56 * d = *q;
57 * </programlisting>
58 *
59 * because the read of "*q" depends on the read of "p" and these
60 * two reads are separated by a read_barrier_depends(). However,
61 * the following code, with the same initial values for "a" and "b":
62 *
63 * <programlisting>
64 * CPU 0 CPU 1
65 *
66 * a = 2;
67 * memory_barrier();
68 * b = 3; y = b;
69 * read_barrier_depends();
70 * x = a;
71 * </programlisting>
72 *
73 * does not enforce ordering, since there is no data dependency between
74 * the read of "a" and the read of "b". Therefore, on some CPUs, such
75 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
76 * in cases like this where there are no data dependencies.
77 **/
78
79#define read_barrier_depends() do { } while (0)
80
81#ifdef CONFIG_SMP
82#define smp_mb() mb()
83#ifdef CONFIG_X86_PPRO_FENCE
84# define smp_rmb() rmb()
85#else
86# define smp_rmb() barrier()
87#endif
88#ifdef CONFIG_X86_OOSTORE
89# define smp_wmb() wmb()
90#else
91# define smp_wmb() barrier()
92#endif
93#define smp_read_barrier_depends() read_barrier_depends()
94#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
95#else
96#define smp_mb() barrier()
97#define smp_rmb() barrier()
98#define smp_wmb() barrier()
99#define smp_read_barrier_depends() do { } while (0)
100#define set_mb(var, value) do { var = value; barrier(); } while (0)
101#endif
102
103/*
104 * Stop RDTSC speculation. This is needed when you need to use RDTSC
105 * (or get_cycles or vread that possibly accesses the TSC) in a defined
106 * code region.
107 *
108 * (Could use an alternative three way for this if there was one.)
109 */
110static __always_inline void rdtsc_barrier(void)
111{
112 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
113 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
114}
115
116#endif /* _ASM_X86_BARRIER_H */
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index f654d1bb17fb..11e1152222d0 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -36,4 +36,8 @@ do { \
36#endif /* !CONFIG_BUG */ 36#endif /* !CONFIG_BUG */
37 37
38#include <asm-generic/bug.h> 38#include <asm-generic/bug.h>
39
40
41extern void show_regs_common(void);
42
39#endif /* _ASM_X86_BUG_H */ 43#endif /* _ASM_X86_BUG_H */
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 4e12668711e5..9863ee3747da 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -3,6 +3,7 @@
3 3
4/* Caches aren't brain-dead on the intel. */ 4/* Caches aren't brain-dead on the intel. */
5#include <asm-generic/cacheflush.h> 5#include <asm-generic/cacheflush.h>
6#include <asm/special_insns.h>
6 7
7#ifdef CONFIG_X86_PAT 8#ifdef CONFIG_X86_PAT
8/* 9/*
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 5f962df30d0f..f27f79abe021 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -84,7 +84,6 @@ extern unsigned int vdso_enabled;
84 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) 84 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
85 85
86#include <asm/processor.h> 86#include <asm/processor.h>
87#include <asm/system.h>
88 87
89#ifdef CONFIG_X86_32 88#ifdef CONFIG_X86_32
90#include <asm/desc.h> 89#include <asm/desc.h>
diff --git a/arch/x86/include/asm/exec.h b/arch/x86/include/asm/exec.h
new file mode 100644
index 000000000000..54c2e1db274a
--- /dev/null
+++ b/arch/x86/include/asm/exec.h
@@ -0,0 +1 @@
/* define arch_align_stack() here */
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index d09bb03653f0..71ecbcba1a4e 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -9,7 +9,6 @@
9#include <asm/asm.h> 9#include <asm/asm.h>
10#include <asm/errno.h> 10#include <asm/errno.h>
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/system.h>
13 12
14#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ 13#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
15 asm volatile("1:\t" insn "\n" \ 14 asm volatile("1:\t" insn "\n" \
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 7ce0798b1b26..257d9cca214f 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -14,7 +14,6 @@
14 14
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/hardirq.h> 16#include <linux/hardirq.h>
17#include <asm/system.h>
18 17
19struct pt_regs; 18struct pt_regs;
20struct user_i387_struct; 19struct user_i387_struct;
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 9cdae5d47e8f..c8bed0da434a 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5 5
6#include <asm/system.h>
7#include <linux/atomic.h> 6#include <linux/atomic.h>
8#include <asm/asm.h> 7#include <asm/asm.h>
9 8
diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h
index 0e8e85bb7c51..d354fb781c57 100644
--- a/arch/x86/include/asm/mc146818rtc.h
+++ b/arch/x86/include/asm/mc146818rtc.h
@@ -5,7 +5,6 @@
5#define _ASM_X86_MC146818RTC_H 5#define _ASM_X86_MC146818RTC_H
6 6
7#include <asm/io.h> 7#include <asm/io.h>
8#include <asm/system.h>
9#include <asm/processor.h> 8#include <asm/processor.h>
10#include <linux/mc146818rtc.h> 9#include <linux/mc146818rtc.h>
11 10
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index bce688d54c12..e21fdd10479f 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -55,7 +55,6 @@ extern unsigned long init_memory_mapping(unsigned long start,
55 unsigned long end); 55 unsigned long end);
56 56
57extern void initmem_init(void); 57extern void initmem_init(void);
58extern void free_initmem(void);
59 58
60#endif /* !__ASSEMBLY__ */ 59#endif /* !__ASSEMBLY__ */
61 60
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 5533b30cac07..a19542c1685e 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -14,13 +14,13 @@ struct mm_struct;
14#include <asm/sigcontext.h> 14#include <asm/sigcontext.h>
15#include <asm/current.h> 15#include <asm/current.h>
16#include <asm/cpufeature.h> 16#include <asm/cpufeature.h>
17#include <asm/system.h>
18#include <asm/page.h> 17#include <asm/page.h>
19#include <asm/pgtable_types.h> 18#include <asm/pgtable_types.h>
20#include <asm/percpu.h> 19#include <asm/percpu.h>
21#include <asm/msr.h> 20#include <asm/msr.h>
22#include <asm/desc_defs.h> 21#include <asm/desc_defs.h>
23#include <asm/nops.h> 22#include <asm/nops.h>
23#include <asm/special_insns.h>
24 24
25#include <linux/personality.h> 25#include <linux/personality.h>
26#include <linux/cpumask.h> 26#include <linux/cpumask.h>
@@ -29,6 +29,15 @@ struct mm_struct;
29#include <linux/math64.h> 29#include <linux/math64.h>
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/err.h> 31#include <linux/err.h>
32#include <linux/irqflags.h>
33
34/*
35 * We handle most unaligned accesses in hardware. On the other hand
36 * unaligned DMA can be quite expensive on some Nehalem processors.
37 *
38 * Based on this we disable the IP header alignment in network drivers.
39 */
40#define NET_IP_ALIGN 0
32 41
33#define HBP_NUM 4 42#define HBP_NUM 4
34/* 43/*
@@ -959,4 +968,24 @@ extern bool cpu_has_amd_erratum(const int *);
959#define cpu_has_amd_erratum(x) (false) 968#define cpu_has_amd_erratum(x) (false)
960#endif /* CONFIG_CPU_SUP_AMD */ 969#endif /* CONFIG_CPU_SUP_AMD */
961 970
971#ifdef CONFIG_X86_32
972/*
973 * disable hlt during certain critical i/o operations
974 */
975#define HAVE_DISABLE_HLT
976#endif
977
978void disable_hlt(void);
979void enable_hlt(void);
980
981void cpu_idle_wait(void);
982
983extern unsigned long arch_align_stack(unsigned long sp);
984extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
985
986void default_idle(void);
987bool set_pm_idle_to_default(void);
988
989void stop_this_cpu(void *dummy);
990
962#endif /* _ASM_X86_PROCESSOR_H */ 991#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 5e641715c3fe..165466233ab0 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -212,7 +212,61 @@
212#ifdef __KERNEL__ 212#ifdef __KERNEL__
213#ifndef __ASSEMBLY__ 213#ifndef __ASSEMBLY__
214extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10]; 214extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10];
215#endif 215
216#endif 216/*
217 * Load a segment. Fall back on loading the zero
218 * segment if something goes wrong..
219 */
220#define loadsegment(seg, value) \
221do { \
222 unsigned short __val = (value); \
223 \
224 asm volatile(" \n" \
225 "1: movl %k0,%%" #seg " \n" \
226 \
227 ".section .fixup,\"ax\" \n" \
228 "2: xorl %k0,%k0 \n" \
229 " jmp 1b \n" \
230 ".previous \n" \
231 \
232 _ASM_EXTABLE(1b, 2b) \
233 \
234 : "+r" (__val) : : "memory"); \
235} while (0)
236
237/*
238 * Save a segment register away
239 */
240#define savesegment(seg, value) \
241 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
242
243/*
244 * x86_32 user gs accessors.
245 */
246#ifdef CONFIG_X86_32
247#ifdef CONFIG_X86_32_LAZY_GS
248#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
249#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
250#define task_user_gs(tsk) ((tsk)->thread.gs)
251#define lazy_save_gs(v) savesegment(gs, (v))
252#define lazy_load_gs(v) loadsegment(gs, (v))
253#else /* X86_32_LAZY_GS */
254#define get_user_gs(regs) (u16)((regs)->gs)
255#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
256#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
257#define lazy_save_gs(v) do { } while (0)
258#define lazy_load_gs(v) do { } while (0)
259#endif /* X86_32_LAZY_GS */
260#endif /* X86_32 */
261
262static inline unsigned long get_limit(unsigned long segment)
263{
264 unsigned long __limit;
265 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
266 return __limit + 1;
267}
268
269#endif /* !__ASSEMBLY__ */
270#endif /* __KERNEL__ */
217 271
218#endif /* _ASM_X86_SEGMENT_H */ 272#endif /* _ASM_X86_SEGMENT_H */
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
new file mode 100644
index 000000000000..41fc93a2e225
--- /dev/null
+++ b/arch/x86/include/asm/special_insns.h
@@ -0,0 +1,199 @@
1#ifndef _ASM_X86_SPECIAL_INSNS_H
2#define _ASM_X86_SPECIAL_INSNS_H
3
4
5#ifdef __KERNEL__
6
7static inline void native_clts(void)
8{
9 asm volatile("clts");
10}
11
12/*
13 * Volatile isn't enough to prevent the compiler from reordering the
14 * read/write functions for the control registers and messing everything up.
15 * A memory clobber would solve the problem, but would prevent reordering of
16 * all loads stores around it, which can hurt performance. Solution is to
17 * use a variable and mimic reads and writes to it to enforce serialization
18 */
19static unsigned long __force_order;
20
21static inline unsigned long native_read_cr0(void)
22{
23 unsigned long val;
24 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
25 return val;
26}
27
28static inline void native_write_cr0(unsigned long val)
29{
30 asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
31}
32
33static inline unsigned long native_read_cr2(void)
34{
35 unsigned long val;
36 asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
37 return val;
38}
39
40static inline void native_write_cr2(unsigned long val)
41{
42 asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
43}
44
45static inline unsigned long native_read_cr3(void)
46{
47 unsigned long val;
48 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
49 return val;
50}
51
52static inline void native_write_cr3(unsigned long val)
53{
54 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
55}
56
57static inline unsigned long native_read_cr4(void)
58{
59 unsigned long val;
60 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
61 return val;
62}
63
64static inline unsigned long native_read_cr4_safe(void)
65{
66 unsigned long val;
67 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
68 * exists, so it will never fail. */
69#ifdef CONFIG_X86_32
70 asm volatile("1: mov %%cr4, %0\n"
71 "2:\n"
72 _ASM_EXTABLE(1b, 2b)
73 : "=r" (val), "=m" (__force_order) : "0" (0));
74#else
75 val = native_read_cr4();
76#endif
77 return val;
78}
79
80static inline void native_write_cr4(unsigned long val)
81{
82 asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
83}
84
85#ifdef CONFIG_X86_64
86static inline unsigned long native_read_cr8(void)
87{
88 unsigned long cr8;
89 asm volatile("movq %%cr8,%0" : "=r" (cr8));
90 return cr8;
91}
92
93static inline void native_write_cr8(unsigned long val)
94{
95 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
96}
97#endif
98
99static inline void native_wbinvd(void)
100{
101 asm volatile("wbinvd": : :"memory");
102}
103
104extern void native_load_gs_index(unsigned);
105
106#ifdef CONFIG_PARAVIRT
107#include <asm/paravirt.h>
108#else
109
110static inline unsigned long read_cr0(void)
111{
112 return native_read_cr0();
113}
114
115static inline void write_cr0(unsigned long x)
116{
117 native_write_cr0(x);
118}
119
120static inline unsigned long read_cr2(void)
121{
122 return native_read_cr2();
123}
124
125static inline void write_cr2(unsigned long x)
126{
127 native_write_cr2(x);
128}
129
130static inline unsigned long read_cr3(void)
131{
132 return native_read_cr3();
133}
134
135static inline void write_cr3(unsigned long x)
136{
137 native_write_cr3(x);
138}
139
140static inline unsigned long read_cr4(void)
141{
142 return native_read_cr4();
143}
144
145static inline unsigned long read_cr4_safe(void)
146{
147 return native_read_cr4_safe();
148}
149
150static inline void write_cr4(unsigned long x)
151{
152 native_write_cr4(x);
153}
154
155static inline void wbinvd(void)
156{
157 native_wbinvd();
158}
159
160#ifdef CONFIG_X86_64
161
162static inline unsigned long read_cr8(void)
163{
164 return native_read_cr8();
165}
166
167static inline void write_cr8(unsigned long x)
168{
169 native_write_cr8(x);
170}
171
172static inline void load_gs_index(unsigned selector)
173{
174 native_load_gs_index(selector);
175}
176
177#endif
178
179/* Clear the 'TS' bit */
180static inline void clts(void)
181{
182 native_clts();
183}
184
185#endif/* CONFIG_PARAVIRT */
186
187#define stts() write_cr0(read_cr0() | X86_CR0_TS)
188
189static inline void clflush(volatile void *__p)
190{
191 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
192}
193
194#define nop() asm volatile ("nop")
195
196
197#endif /* __KERNEL__ */
198
199#endif /* _ASM_X86_SPECIAL_INSNS_H */
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 157517763565..b5d9533d2c38 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -38,7 +38,6 @@
38#include <asm/tsc.h> 38#include <asm/tsc.h>
39#include <asm/processor.h> 39#include <asm/processor.h>
40#include <asm/percpu.h> 40#include <asm/percpu.h>
41#include <asm/system.h>
42#include <asm/desc.h> 41#include <asm/desc.h>
43#include <linux/random.h> 42#include <linux/random.h>
44 43
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
new file mode 100644
index 000000000000..4ec45b3abba1
--- /dev/null
+++ b/arch/x86/include/asm/switch_to.h
@@ -0,0 +1,129 @@
1#ifndef _ASM_X86_SWITCH_TO_H
2#define _ASM_X86_SWITCH_TO_H
3
4struct task_struct; /* one of the stranger aspects of C forward declarations */
5struct task_struct *__switch_to(struct task_struct *prev,
6 struct task_struct *next);
7struct tss_struct;
8void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
9 struct tss_struct *tss);
10
11#ifdef CONFIG_X86_32
12
13#ifdef CONFIG_CC_STACKPROTECTOR
14#define __switch_canary \
15 "movl %P[task_canary](%[next]), %%ebx\n\t" \
16 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
17#define __switch_canary_oparam \
18 , [stack_canary] "=m" (stack_canary.canary)
19#define __switch_canary_iparam \
20 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
21#else /* CC_STACKPROTECTOR */
22#define __switch_canary
23#define __switch_canary_oparam
24#define __switch_canary_iparam
25#endif /* CC_STACKPROTECTOR */
26
27/*
28 * Saving eflags is important. It switches not only IOPL between tasks,
29 * it also protects other tasks from NT leaking through sysenter etc.
30 */
31#define switch_to(prev, next, last) \
32do { \
33 /* \
34 * Context-switching clobbers all registers, so we clobber \
35 * them explicitly, via unused output variables. \
36 * (EAX and EBP is not listed because EBP is saved/restored \
37 * explicitly for wchan access and EAX is the return value of \
38 * __switch_to()) \
39 */ \
40 unsigned long ebx, ecx, edx, esi, edi; \
41 \
42 asm volatile("pushfl\n\t" /* save flags */ \
43 "pushl %%ebp\n\t" /* save EBP */ \
44 "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
45 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
46 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
47 "pushl %[next_ip]\n\t" /* restore EIP */ \
48 __switch_canary \
49 "jmp __switch_to\n" /* regparm call */ \
50 "1:\t" \
51 "popl %%ebp\n\t" /* restore EBP */ \
52 "popfl\n" /* restore flags */ \
53 \
54 /* output parameters */ \
55 : [prev_sp] "=m" (prev->thread.sp), \
56 [prev_ip] "=m" (prev->thread.ip), \
57 "=a" (last), \
58 \
59 /* clobbered output registers: */ \
60 "=b" (ebx), "=c" (ecx), "=d" (edx), \
61 "=S" (esi), "=D" (edi) \
62 \
63 __switch_canary_oparam \
64 \
65 /* input parameters: */ \
66 : [next_sp] "m" (next->thread.sp), \
67 [next_ip] "m" (next->thread.ip), \
68 \
69 /* regparm parameters for __switch_to(): */ \
70 [prev] "a" (prev), \
71 [next] "d" (next) \
72 \
73 __switch_canary_iparam \
74 \
75 : /* reloaded segment registers */ \
76 "memory"); \
77} while (0)
78
79#else /* CONFIG_X86_32 */
80
81/* frame pointer must be last for get_wchan */
82#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
83#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
84
85#define __EXTRA_CLOBBER \
86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
87 "r12", "r13", "r14", "r15"
88
89#ifdef CONFIG_CC_STACKPROTECTOR
90#define __switch_canary \
91 "movq %P[task_canary](%%rsi),%%r8\n\t" \
92 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
93#define __switch_canary_oparam \
94 , [gs_canary] "=m" (irq_stack_union.stack_canary)
95#define __switch_canary_iparam \
96 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
97#else /* CC_STACKPROTECTOR */
98#define __switch_canary
99#define __switch_canary_oparam
100#define __switch_canary_iparam
101#endif /* CC_STACKPROTECTOR */
102
103/* Save restore flags to clear handle leaking NT */
104#define switch_to(prev, next, last) \
105 asm volatile(SAVE_CONTEXT \
106 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
107 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
108 "call __switch_to\n\t" \
109 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
110 __switch_canary \
111 "movq %P[thread_info](%%rsi),%%r8\n\t" \
112 "movq %%rax,%%rdi\n\t" \
113 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
114 "jnz ret_from_fork\n\t" \
115 RESTORE_CONTEXT \
116 : "=a" (last) \
117 __switch_canary_oparam \
118 : [next] "S" (next), [prev] "D" (prev), \
119 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
120 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
121 [_tif_fork] "i" (_TIF_FORK), \
122 [thread_info] "i" (offsetof(struct task_struct, stack)), \
123 [current_task] "m" (current_task) \
124 __switch_canary_iparam \
125 : "memory", "cc" __EXTRA_CLOBBER)
126
127#endif /* CONFIG_X86_32 */
128
129#endif /* _ASM_X86_SWITCH_TO_H */
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
deleted file mode 100644
index 2d2f01ce6dcb..000000000000
--- a/arch/x86/include/asm/system.h
+++ /dev/null
@@ -1,523 +0,0 @@
1#ifndef _ASM_X86_SYSTEM_H
2#define _ASM_X86_SYSTEM_H
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9
10#include <linux/kernel.h>
11#include <linux/irqflags.h>
12
13/* entries in ARCH_DLINFO: */
14#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
15# define AT_VECTOR_SIZE_ARCH 2
16#else /* else it's non-compat x86-64 */
17# define AT_VECTOR_SIZE_ARCH 1
18#endif
19
20struct task_struct; /* one of the stranger aspects of C forward declarations */
21struct task_struct *__switch_to(struct task_struct *prev,
22 struct task_struct *next);
23struct tss_struct;
24void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
25 struct tss_struct *tss);
26extern void show_regs_common(void);
27
28#ifdef CONFIG_X86_32
29
30#ifdef CONFIG_CC_STACKPROTECTOR
31#define __switch_canary \
32 "movl %P[task_canary](%[next]), %%ebx\n\t" \
33 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
34#define __switch_canary_oparam \
35 , [stack_canary] "=m" (stack_canary.canary)
36#define __switch_canary_iparam \
37 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
38#else /* CC_STACKPROTECTOR */
39#define __switch_canary
40#define __switch_canary_oparam
41#define __switch_canary_iparam
42#endif /* CC_STACKPROTECTOR */
43
44/*
45 * Saving eflags is important. It switches not only IOPL between tasks,
46 * it also protects other tasks from NT leaking through sysenter etc.
47 */
48#define switch_to(prev, next, last) \
49do { \
50 /* \
51 * Context-switching clobbers all registers, so we clobber \
52 * them explicitly, via unused output variables. \
53 * (EAX and EBP is not listed because EBP is saved/restored \
54 * explicitly for wchan access and EAX is the return value of \
55 * __switch_to()) \
56 */ \
57 unsigned long ebx, ecx, edx, esi, edi; \
58 \
59 asm volatile("pushfl\n\t" /* save flags */ \
60 "pushl %%ebp\n\t" /* save EBP */ \
61 "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
62 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
63 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
64 "pushl %[next_ip]\n\t" /* restore EIP */ \
65 __switch_canary \
66 "jmp __switch_to\n" /* regparm call */ \
67 "1:\t" \
68 "popl %%ebp\n\t" /* restore EBP */ \
69 "popfl\n" /* restore flags */ \
70 \
71 /* output parameters */ \
72 : [prev_sp] "=m" (prev->thread.sp), \
73 [prev_ip] "=m" (prev->thread.ip), \
74 "=a" (last), \
75 \
76 /* clobbered output registers: */ \
77 "=b" (ebx), "=c" (ecx), "=d" (edx), \
78 "=S" (esi), "=D" (edi) \
79 \
80 __switch_canary_oparam \
81 \
82 /* input parameters: */ \
83 : [next_sp] "m" (next->thread.sp), \
84 [next_ip] "m" (next->thread.ip), \
85 \
86 /* regparm parameters for __switch_to(): */ \
87 [prev] "a" (prev), \
88 [next] "d" (next) \
89 \
90 __switch_canary_iparam \
91 \
92 : /* reloaded segment registers */ \
93 "memory"); \
94} while (0)
95
96/*
97 * disable hlt during certain critical i/o operations
98 */
99#define HAVE_DISABLE_HLT
100#else
101
102/* frame pointer must be last for get_wchan */
103#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
104#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
105
106#define __EXTRA_CLOBBER \
107 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
108 "r12", "r13", "r14", "r15"
109
110#ifdef CONFIG_CC_STACKPROTECTOR
111#define __switch_canary \
112 "movq %P[task_canary](%%rsi),%%r8\n\t" \
113 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
114#define __switch_canary_oparam \
115 , [gs_canary] "=m" (irq_stack_union.stack_canary)
116#define __switch_canary_iparam \
117 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
118#else /* CC_STACKPROTECTOR */
119#define __switch_canary
120#define __switch_canary_oparam
121#define __switch_canary_iparam
122#endif /* CC_STACKPROTECTOR */
123
124/* Save restore flags to clear handle leaking NT */
125#define switch_to(prev, next, last) \
126 asm volatile(SAVE_CONTEXT \
127 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
128 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
129 "call __switch_to\n\t" \
130 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
131 __switch_canary \
132 "movq %P[thread_info](%%rsi),%%r8\n\t" \
133 "movq %%rax,%%rdi\n\t" \
134 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
135 "jnz ret_from_fork\n\t" \
136 RESTORE_CONTEXT \
137 : "=a" (last) \
138 __switch_canary_oparam \
139 : [next] "S" (next), [prev] "D" (prev), \
140 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
141 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
142 [_tif_fork] "i" (_TIF_FORK), \
143 [thread_info] "i" (offsetof(struct task_struct, stack)), \
144 [current_task] "m" (current_task) \
145 __switch_canary_iparam \
146 : "memory", "cc" __EXTRA_CLOBBER)
147#endif
148
149#ifdef __KERNEL__
150
151extern void native_load_gs_index(unsigned);
152
153/*
154 * Load a segment. Fall back on loading the zero
155 * segment if something goes wrong..
156 */
157#define loadsegment(seg, value) \
158do { \
159 unsigned short __val = (value); \
160 \
161 asm volatile(" \n" \
162 "1: movl %k0,%%" #seg " \n" \
163 \
164 ".section .fixup,\"ax\" \n" \
165 "2: xorl %k0,%k0 \n" \
166 " jmp 1b \n" \
167 ".previous \n" \
168 \
169 _ASM_EXTABLE(1b, 2b) \
170 \
171 : "+r" (__val) : : "memory"); \
172} while (0)
173
174/*
175 * Save a segment register away
176 */
177#define savesegment(seg, value) \
178 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
179
180/*
181 * x86_32 user gs accessors.
182 */
183#ifdef CONFIG_X86_32
184#ifdef CONFIG_X86_32_LAZY_GS
185#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
186#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
187#define task_user_gs(tsk) ((tsk)->thread.gs)
188#define lazy_save_gs(v) savesegment(gs, (v))
189#define lazy_load_gs(v) loadsegment(gs, (v))
190#else /* X86_32_LAZY_GS */
191#define get_user_gs(regs) (u16)((regs)->gs)
192#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
193#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
194#define lazy_save_gs(v) do { } while (0)
195#define lazy_load_gs(v) do { } while (0)
196#endif /* X86_32_LAZY_GS */
197#endif /* X86_32 */
198
199static inline unsigned long get_limit(unsigned long segment)
200{
201 unsigned long __limit;
202 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
203 return __limit + 1;
204}
205
206static inline void native_clts(void)
207{
208 asm volatile("clts");
209}
210
211/*
212 * Volatile isn't enough to prevent the compiler from reordering the
213 * read/write functions for the control registers and messing everything up.
214 * A memory clobber would solve the problem, but would prevent reordering of
215 * all loads stores around it, which can hurt performance. Solution is to
216 * use a variable and mimic reads and writes to it to enforce serialization
217 */
218static unsigned long __force_order;
219
220static inline unsigned long native_read_cr0(void)
221{
222 unsigned long val;
223 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
224 return val;
225}
226
227static inline void native_write_cr0(unsigned long val)
228{
229 asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
230}
231
232static inline unsigned long native_read_cr2(void)
233{
234 unsigned long val;
235 asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
236 return val;
237}
238
239static inline void native_write_cr2(unsigned long val)
240{
241 asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
242}
243
244static inline unsigned long native_read_cr3(void)
245{
246 unsigned long val;
247 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
248 return val;
249}
250
251static inline void native_write_cr3(unsigned long val)
252{
253 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
254}
255
256static inline unsigned long native_read_cr4(void)
257{
258 unsigned long val;
259 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
260 return val;
261}
262
263static inline unsigned long native_read_cr4_safe(void)
264{
265 unsigned long val;
266 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
267 * exists, so it will never fail. */
268#ifdef CONFIG_X86_32
269 asm volatile("1: mov %%cr4, %0\n"
270 "2:\n"
271 _ASM_EXTABLE(1b, 2b)
272 : "=r" (val), "=m" (__force_order) : "0" (0));
273#else
274 val = native_read_cr4();
275#endif
276 return val;
277}
278
279static inline void native_write_cr4(unsigned long val)
280{
281 asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
282}
283
284#ifdef CONFIG_X86_64
285static inline unsigned long native_read_cr8(void)
286{
287 unsigned long cr8;
288 asm volatile("movq %%cr8,%0" : "=r" (cr8));
289 return cr8;
290}
291
292static inline void native_write_cr8(unsigned long val)
293{
294 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
295}
296#endif
297
298static inline void native_wbinvd(void)
299{
300 asm volatile("wbinvd": : :"memory");
301}
302
303#ifdef CONFIG_PARAVIRT
304#include <asm/paravirt.h>
305#else
306
307static inline unsigned long read_cr0(void)
308{
309 return native_read_cr0();
310}
311
312static inline void write_cr0(unsigned long x)
313{
314 native_write_cr0(x);
315}
316
317static inline unsigned long read_cr2(void)
318{
319 return native_read_cr2();
320}
321
322static inline void write_cr2(unsigned long x)
323{
324 native_write_cr2(x);
325}
326
327static inline unsigned long read_cr3(void)
328{
329 return native_read_cr3();
330}
331
332static inline void write_cr3(unsigned long x)
333{
334 native_write_cr3(x);
335}
336
337static inline unsigned long read_cr4(void)
338{
339 return native_read_cr4();
340}
341
342static inline unsigned long read_cr4_safe(void)
343{
344 return native_read_cr4_safe();
345}
346
347static inline void write_cr4(unsigned long x)
348{
349 native_write_cr4(x);
350}
351
352static inline void wbinvd(void)
353{
354 native_wbinvd();
355}
356
357#ifdef CONFIG_X86_64
358
359static inline unsigned long read_cr8(void)
360{
361 return native_read_cr8();
362}
363
364static inline void write_cr8(unsigned long x)
365{
366 native_write_cr8(x);
367}
368
369static inline void load_gs_index(unsigned selector)
370{
371 native_load_gs_index(selector);
372}
373
374#endif
375
376/* Clear the 'TS' bit */
377static inline void clts(void)
378{
379 native_clts();
380}
381
382#endif/* CONFIG_PARAVIRT */
383
384#define stts() write_cr0(read_cr0() | X86_CR0_TS)
385
386#endif /* __KERNEL__ */
387
388static inline void clflush(volatile void *__p)
389{
390 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
391}
392
393#define nop() asm volatile ("nop")
394
395void disable_hlt(void);
396void enable_hlt(void);
397
398void cpu_idle_wait(void);
399
400extern unsigned long arch_align_stack(unsigned long sp);
401extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
402
403void default_idle(void);
404bool set_pm_idle_to_default(void);
405
406void stop_this_cpu(void *dummy);
407
408/*
409 * Force strict CPU ordering.
410 * And yes, this is required on UP too when we're talking
411 * to devices.
412 */
413#ifdef CONFIG_X86_32
414/*
415 * Some non-Intel clones support out of order store. wmb() ceases to be a
416 * nop for these.
417 */
418#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
419#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
420#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
421#else
422#define mb() asm volatile("mfence":::"memory")
423#define rmb() asm volatile("lfence":::"memory")
424#define wmb() asm volatile("sfence" ::: "memory")
425#endif
426
427/**
428 * read_barrier_depends - Flush all pending reads that subsequents reads
429 * depend on.
430 *
431 * No data-dependent reads from memory-like regions are ever reordered
432 * over this barrier. All reads preceding this primitive are guaranteed
433 * to access memory (but not necessarily other CPUs' caches) before any
434 * reads following this primitive that depend on the data return by
435 * any of the preceding reads. This primitive is much lighter weight than
436 * rmb() on most CPUs, and is never heavier weight than is
437 * rmb().
438 *
439 * These ordering constraints are respected by both the local CPU
440 * and the compiler.
441 *
442 * Ordering is not guaranteed by anything other than these primitives,
443 * not even by data dependencies. See the documentation for
444 * memory_barrier() for examples and URLs to more information.
445 *
446 * For example, the following code would force ordering (the initial
447 * value of "a" is zero, "b" is one, and "p" is "&a"):
448 *
449 * <programlisting>
450 * CPU 0 CPU 1
451 *
452 * b = 2;
453 * memory_barrier();
454 * p = &b; q = p;
455 * read_barrier_depends();
456 * d = *q;
457 * </programlisting>
458 *
459 * because the read of "*q" depends on the read of "p" and these
460 * two reads are separated by a read_barrier_depends(). However,
461 * the following code, with the same initial values for "a" and "b":
462 *
463 * <programlisting>
464 * CPU 0 CPU 1
465 *
466 * a = 2;
467 * memory_barrier();
468 * b = 3; y = b;
469 * read_barrier_depends();
470 * x = a;
471 * </programlisting>
472 *
473 * does not enforce ordering, since there is no data dependency between
474 * the read of "a" and the read of "b". Therefore, on some CPUs, such
475 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
476 * in cases like this where there are no data dependencies.
477 **/
478
479#define read_barrier_depends() do { } while (0)
480
481#ifdef CONFIG_SMP
482#define smp_mb() mb()
483#ifdef CONFIG_X86_PPRO_FENCE
484# define smp_rmb() rmb()
485#else
486# define smp_rmb() barrier()
487#endif
488#ifdef CONFIG_X86_OOSTORE
489# define smp_wmb() wmb()
490#else
491# define smp_wmb() barrier()
492#endif
493#define smp_read_barrier_depends() read_barrier_depends()
494#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
495#else
496#define smp_mb() barrier()
497#define smp_rmb() barrier()
498#define smp_wmb() barrier()
499#define smp_read_barrier_depends() do { } while (0)
500#define set_mb(var, value) do { var = value; barrier(); } while (0)
501#endif
502
503/*
504 * Stop RDTSC speculation. This is needed when you need to use RDTSC
505 * (or get_cycles or vread that possibly accesses the TSC) in a defined
506 * code region.
507 *
508 * (Could use an alternative three way for this if there was one.)
509 */
510static __always_inline void rdtsc_barrier(void)
511{
512 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
513 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
514}
515
516/*
517 * We handle most unaligned accesses in hardware. On the other hand
518 * unaligned DMA can be quite expensive on some Nehalem processors.
519 *
520 * Based on this we disable the IP header alignment in network drivers.
521 */
522#define NET_IP_ALIGN 0
523#endif /* _ASM_X86_SYSTEM_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 169be8938b96..c0e108e08079 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -5,7 +5,7 @@
5#include <linux/sched.h> 5#include <linux/sched.h>
6 6
7#include <asm/processor.h> 7#include <asm/processor.h>
8#include <asm/system.h> 8#include <asm/special_insns.h>
9 9
10#ifdef CONFIG_PARAVIRT 10#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h> 11#include <asm/paravirt.h>
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
index e0f9aa16358b..5da71c27cc59 100644
--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -16,7 +16,6 @@
16#define _ASM_X86_VIRTEX_H 16#define _ASM_X86_VIRTEX_H
17 17
18#include <asm/processor.h> 18#include <asm/processor.h>
19#include <asm/system.h>
20 19
21#include <asm/vmx.h> 20#include <asm/vmx.h>
22#include <asm/svm.h> 21#include <asm/svm.h>
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index f50e7fb2a201..d2b7f27781bc 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -14,6 +14,7 @@
14#include <acpi/processor.h> 14#include <acpi/processor.h>
15#include <asm/acpi.h> 15#include <asm/acpi.h>
16#include <asm/mwait.h> 16#include <asm/mwait.h>
17#include <asm/special_insns.h>
17 18
18/* 19/*
19 * Initialize bm_flags based on the CPU cache properties 20 * Initialize bm_flags based on the CPU cache properties
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 5d56931a15b3..459e78cbf61e 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -231,7 +231,6 @@
231#include <linux/syscore_ops.h> 231#include <linux/syscore_ops.h>
232#include <linux/i8253.h> 232#include <linux/i8253.h>
233 233
234#include <asm/system.h>
235#include <asm/uaccess.h> 234#include <asm/uaccess.h>
236#include <asm/desc.h> 235#include <asm/desc.h>
237#include <asm/olpc.h> 236#include <asm/olpc.h>
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
index 5c0e6533d9bc..2d5454cd2c4f 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -9,7 +9,6 @@
9#include <linux/smp.h> 9#include <linux/smp.h>
10 10
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/system.h>
13#include <asm/mce.h> 12#include <asm/mce.h>
14#include <asm/msr.h> 13#include <asm/msr.h>
15 14
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 67bb17a37a0a..47a1870279aa 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -25,7 +25,6 @@
25#include <linux/cpu.h> 25#include <linux/cpu.h>
26 26
27#include <asm/processor.h> 27#include <asm/processor.h>
28#include <asm/system.h>
29#include <asm/apic.h> 28#include <asm/apic.h>
30#include <asm/idle.h> 29#include <asm/idle.h>
31#include <asm/mce.h> 30#include <asm/mce.h>
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
index 54060f565974..2d7998fb628c 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -8,7 +8,6 @@
8#include <linux/init.h> 8#include <linux/init.h>
9 9
10#include <asm/processor.h> 10#include <asm/processor.h>
11#include <asm/system.h>
12#include <asm/mce.h> 11#include <asm/mce.h>
13#include <asm/msr.h> 12#include <asm/msr.h>
14 13
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 97b26356e9ee..75772ae6c65f 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -12,7 +12,6 @@
12#include <asm/processor-flags.h> 12#include <asm/processor-flags.h>
13#include <asm/cpufeature.h> 13#include <asm/cpufeature.h>
14#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
15#include <asm/system.h>
16#include <asm/mtrr.h> 15#include <asm/mtrr.h>
17#include <asm/msr.h> 16#include <asm/msr.h>
18#include <asm/pat.h> 17#include <asm/pat.h>
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index a524353d93f2..39472dd2323f 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -43,7 +43,6 @@
43 43
44#include <asm/processor.h> 44#include <asm/processor.h>
45#include <asm/msr.h> 45#include <asm/msr.h>
46#include <asm/system.h>
47 46
48static struct class *cpuid_class; 47static struct class *cpuid_class;
49 48
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 610485223bdb..36d1853e91af 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -15,7 +15,6 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16 16
17#include <linux/atomic.h> 17#include <linux/atomic.h>
18#include <asm/system.h>
19#include <asm/timer.h> 18#include <asm/timer.h>
20#include <asm/hw_irq.h> 19#include <asm/hw_irq.h>
21#include <asm/pgtable.h> 20#include <asm/pgtable.h>
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 43e2b1cff0a7..6d5fc8cfd5d6 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -16,7 +16,6 @@
16#include <linux/delay.h> 16#include <linux/delay.h>
17 17
18#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/system.h>
20#include <asm/timer.h> 19#include <asm/timer.h>
21#include <asm/hw_irq.h> 20#include <asm/hw_irq.h>
22#include <asm/pgtable.h> 21#include <asm/pgtable.h>
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index fdc37b3d0ce3..db6720edfdd0 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -46,7 +46,6 @@
46 46
47#include <asm/debugreg.h> 47#include <asm/debugreg.h>
48#include <asm/apicdef.h> 48#include <asm/apicdef.h>
49#include <asm/system.h>
50#include <asm/apic.h> 49#include <asm/apic.h>
51#include <asm/nmi.h> 50#include <asm/nmi.h>
52 51
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index ea697263b373..ebc987398923 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -15,7 +15,6 @@
15#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17 17
18#include <asm/system.h>
19#include <asm/ldt.h> 18#include <asm/ldt.h>
20#include <asm/desc.h> 19#include <asm/desc.h>
21#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index a3fa43ba5d3b..5b19e4d78b00 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -23,7 +23,6 @@
23#include <asm/apic.h> 23#include <asm/apic.h>
24#include <asm/cpufeature.h> 24#include <asm/cpufeature.h>
25#include <asm/desc.h> 25#include <asm/desc.h>
26#include <asm/system.h>
27#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
28#include <asm/debugreg.h> 27#include <asm/debugreg.h>
29 28
diff --git a/arch/x86/kernel/mca_32.c b/arch/x86/kernel/mca_32.c
index 177183cbb6ae..7eb1e2b97827 100644
--- a/arch/x86/kernel/mca_32.c
+++ b/arch/x86/kernel/mca_32.c
@@ -43,7 +43,6 @@
43#include <linux/mca.h> 43#include <linux/mca.h>
44#include <linux/kprobes.h> 44#include <linux/kprobes.h>
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <asm/system.h>
47#include <asm/io.h> 46#include <asm/io.h>
48#include <linux/proc_fs.h> 47#include <linux/proc_fs.h>
49#include <linux/mman.h> 48#include <linux/mman.h>
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 925179f871de..f21fd94ac897 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -26,7 +26,6 @@
26#include <linux/gfp.h> 26#include <linux/gfp.h>
27#include <linux/jump_label.h> 27#include <linux/jump_label.h>
28 28
29#include <asm/system.h>
30#include <asm/page.h> 29#include <asm/page.h>
31#include <asm/pgtable.h> 30#include <asm/pgtable.h>
32 31
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 96356762a51d..eb113693f043 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -40,7 +40,6 @@
40 40
41#include <asm/processor.h> 41#include <asm/processor.h>
42#include <asm/msr.h> 42#include <asm/msr.h>
43#include <asm/system.h>
44 43
45static struct class *msr_class; 44static struct class *msr_class;
46 45
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 9c57c02e54f6..ab137605e694 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -38,6 +38,7 @@
38#include <asm/apic.h> 38#include <asm/apic.h>
39#include <asm/tlbflush.h> 39#include <asm/tlbflush.h>
40#include <asm/timer.h> 40#include <asm/timer.h>
41#include <asm/special_insns.h>
41 42
42/* nop stub */ 43/* nop stub */
43void _paravirt_nop(void) 44void _paravirt_nop(void)
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 726494b58345..6ac5782f4d6b 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -42,7 +42,6 @@
42#include <asm/calgary.h> 42#include <asm/calgary.h>
43#include <asm/tce.h> 43#include <asm/tce.h>
44#include <asm/pci-direct.h> 44#include <asm/pci-direct.h>
45#include <asm/system.h>
46#include <asm/dma.h> 45#include <asm/dma.h>
47#include <asm/rio.h> 46#include <asm/rio.h>
48#include <asm/bios_ebda.h> 47#include <asm/bios_ebda.h>
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 14baf78d5a1f..9b24f36eb55f 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -15,7 +15,6 @@
15#include <trace/events/power.h> 15#include <trace/events/power.h>
16#include <linux/hw_breakpoint.h> 16#include <linux/hw_breakpoint.h>
17#include <asm/cpu.h> 17#include <asm/cpu.h>
18#include <asm/system.h>
19#include <asm/apic.h> 18#include <asm/apic.h>
20#include <asm/syscalls.h> 19#include <asm/syscalls.h>
21#include <asm/idle.h> 20#include <asm/idle.h>
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 9d7d4842bfaf..aae4f4bbbe88 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -41,7 +41,6 @@
41#include <linux/cpuidle.h> 41#include <linux/cpuidle.h>
42 42
43#include <asm/pgtable.h> 43#include <asm/pgtable.h>
44#include <asm/system.h>
45#include <asm/ldt.h> 44#include <asm/ldt.h>
46#include <asm/processor.h> 45#include <asm/processor.h>
47#include <asm/i387.h> 46#include <asm/i387.h>
@@ -59,6 +58,7 @@
59#include <asm/syscalls.h> 58#include <asm/syscalls.h>
60#include <asm/debugreg.h> 59#include <asm/debugreg.h>
61#include <asm/nmi.h> 60#include <asm/nmi.h>
61#include <asm/switch_to.h>
62 62
63asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 63asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
64 64
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 292da13fc5aa..61270e8d428a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -40,7 +40,6 @@
40#include <linux/cpuidle.h> 40#include <linux/cpuidle.h>
41 41
42#include <asm/pgtable.h> 42#include <asm/pgtable.h>
43#include <asm/system.h>
44#include <asm/processor.h> 43#include <asm/processor.h>
45#include <asm/i387.h> 44#include <asm/i387.h>
46#include <asm/fpu-internal.h> 45#include <asm/fpu-internal.h>
@@ -53,6 +52,7 @@
53#include <asm/syscalls.h> 52#include <asm/syscalls.h>
54#include <asm/debugreg.h> 53#include <asm/debugreg.h>
55#include <asm/nmi.h> 54#include <asm/nmi.h>
55#include <asm/switch_to.h>
56 56
57asmlinkage extern void ret_from_fork(void); 57asmlinkage extern void ret_from_fork(void);
58 58
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 78f05e438be5..8a634c887652 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -24,7 +24,6 @@
24 24
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/system.h>
28#include <asm/processor.h> 27#include <asm/processor.h>
29#include <asm/i387.h> 28#include <asm/i387.h>
30#include <asm/fpu-internal.h> 29#include <asm/fpu-internal.h>
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 88638883176a..8cbeb7209c3e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -90,7 +90,6 @@
90#include <asm/processor.h> 90#include <asm/processor.h>
91#include <asm/bugs.h> 91#include <asm/bugs.h>
92 92
93#include <asm/system.h>
94#include <asm/vsyscall.h> 93#include <asm/vsyscall.h>
95#include <asm/cpu.h> 94#include <asm/cpu.h>
96#include <asm/desc.h> 95#include <asm/desc.h>
diff --git a/arch/x86/kernel/tce_64.c b/arch/x86/kernel/tce_64.c
index 9e540fee7009..ab40954e113e 100644
--- a/arch/x86/kernel/tce_64.c
+++ b/arch/x86/kernel/tce_64.c
@@ -34,6 +34,7 @@
34#include <asm/tce.h> 34#include <asm/tce.h>
35#include <asm/calgary.h> 35#include <asm/calgary.h>
36#include <asm/proto.h> 36#include <asm/proto.h>
37#include <asm/cacheflush.h>
37 38
38/* flush a tce at 'tceaddr' to main memory */ 39/* flush a tce at 'tceaddr' to main memory */
39static inline void flush_tce(void* tceaddr) 40static inline void flush_tce(void* tceaddr)
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 6bb7b8579e70..73920e4c6dc5 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -6,7 +6,6 @@
6 6
7#include <asm/uaccess.h> 7#include <asm/uaccess.h>
8#include <asm/desc.h> 8#include <asm/desc.h>
9#include <asm/system.h>
10#include <asm/ldt.h> 9#include <asm/ldt.h>
11#include <asm/processor.h> 10#include <asm/processor.h>
12#include <asm/proto.h> 11#include <asm/proto.h>
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index ec61d4c1b93b..860f126ca233 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -50,7 +50,6 @@
50#include <asm/processor.h> 50#include <asm/processor.h>
51#include <asm/debugreg.h> 51#include <asm/debugreg.h>
52#include <linux/atomic.h> 52#include <linux/atomic.h>
53#include <asm/system.h>
54#include <asm/traps.h> 53#include <asm/traps.h>
55#include <asm/desc.h> 54#include <asm/desc.h>
56#include <asm/i387.h> 55#include <asm/i387.h>
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 6cabf6570d64..4f0cec7e4ffb 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -12,7 +12,6 @@
12#include <asm/page_types.h> 12#include <asm/page_types.h>
13#include <asm/sections.h> 13#include <asm/sections.h>
14#include <asm/setup.h> 14#include <asm/setup.h>
15#include <asm/system.h>
16#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
17#include <asm/tlb.h> 16#include <asm/tlb.h>
18#include <asm/proto.h> 17#include <asm/proto.h>
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 8663f6c47ccb..575d86f85ce4 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -35,7 +35,6 @@
35#include <asm/asm.h> 35#include <asm/asm.h>
36#include <asm/bios_ebda.h> 36#include <asm/bios_ebda.h>
37#include <asm/processor.h> 37#include <asm/processor.h>
38#include <asm/system.h>
39#include <asm/uaccess.h> 38#include <asm/uaccess.h>
40#include <asm/pgtable.h> 39#include <asm/pgtable.h>
41#include <asm/dma.h> 40#include <asm/dma.h>
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 436a0309db33..fc18be0f6f29 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -35,7 +35,6 @@
35 35
36#include <asm/processor.h> 36#include <asm/processor.h>
37#include <asm/bios_ebda.h> 37#include <asm/bios_ebda.h>
38#include <asm/system.h>
39#include <asm/uaccess.h> 38#include <asm/uaccess.h>
40#include <asm/pgtable.h> 39#include <asm/pgtable.h>
41#include <asm/pgalloc.h> 40#include <asm/pgalloc.h>
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index cac718499256..a69bcb8c7621 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -10,7 +10,6 @@
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <linux/module.h> 11#include <linux/module.h>
12 12
13#include <asm/system.h>
14#include <asm/pgtable.h> 13#include <asm/pgtable.h>
15#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
16#include <asm/fixmap.h> 15#include <asm/fixmap.h>
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index 3769079874d8..74202c1910cd 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -10,7 +10,6 @@
10#include <linux/suspend.h> 10#include <linux/suspend.h>
11#include <linux/bootmem.h> 11#include <linux/bootmem.h>
12 12
13#include <asm/system.h>
14#include <asm/page.h> 13#include <asm/page.h>
15#include <asm/pgtable.h> 14#include <asm/pgtable.h>
16#include <asm/mmzone.h> 15#include <asm/mmzone.h>