diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 18:58:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 18:58:21 -0400 |
commit | 0195c00244dc2e9f522475868fa278c473ba7339 (patch) | |
tree | f97ca98ae64ede2c33ad3de05ed7bbfa4f4495ed /arch/x86/include | |
parent | f21ce8f8447c8be8847dadcfdbcc76b0d7365fa5 (diff) | |
parent | 141124c02059eee9dbc5c86ea797b1ca888e77f7 (diff) |
Merge tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system
Pull "Disintegrate and delete asm/system.h" from David Howells:
"Here are a bunch of patches to disintegrate asm/system.h into a set of
separate bits to relieve the problem of circular inclusion
dependencies.
I've built all the working defconfigs from all the arches that I can
and made sure that they don't break.
The reason for these patches is that I recently encountered a circular
dependency problem that came about when I produced some patches to
optimise get_order() by rewriting it to use ilog2().
This uses bitops - and on the SH arch asm/bitops.h drags in
asm-generic/get_order.h by a circuituous route involving asm/system.h.
The main difficulty seems to be asm/system.h. It holds a number of
low level bits with no/few dependencies that are commonly used (eg.
memory barriers) and a number of bits with more dependencies that
aren't used in many places (eg. switch_to()).
These patches break asm/system.h up into the following core pieces:
(1) asm/barrier.h
Move memory barriers here. This already done for MIPS and Alpha.
(2) asm/switch_to.h
Move switch_to() and related stuff here.
(3) asm/exec.h
Move arch_align_stack() here. Other process execution related bits
could perhaps go here from asm/processor.h.
(4) asm/cmpxchg.h
Move xchg() and cmpxchg() here as they're full word atomic ops and
frequently used by atomic_xchg() and atomic_cmpxchg().
(5) asm/bug.h
Move die() and related bits.
(6) asm/auxvec.h
Move AT_VECTOR_SIZE_ARCH here.
Other arch headers are created as needed on a per-arch basis."
Fixed up some conflicts from other header file cleanups and moving code
around that has happened in the meantime, so David's testing is somewhat
weakened by that. We'll find out anything that got broken and fix it..
* tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system: (38 commits)
Delete all instances of asm/system.h
Remove all #inclusions of asm/system.h
Add #includes needed to permit the removal of asm/system.h
Move all declarations of free_initmem() to linux/mm.h
Disintegrate asm/system.h for OpenRISC
Split arch_align_stack() out from asm-generic/system.h
Split the switch_to() wrapper out of asm-generic/system.h
Move the asm-generic/system.h xchg() implementation to asm-generic/cmpxchg.h
Create asm-generic/barrier.h
Make asm-generic/cmpxchg.h #include asm-generic/cmpxchg-local.h
Disintegrate asm/system.h for Xtensa
Disintegrate asm/system.h for Unicore32 [based on ver #3, changed by gxt]
Disintegrate asm/system.h for Tile
Disintegrate asm/system.h for Sparc
Disintegrate asm/system.h for SH
Disintegrate asm/system.h for Score
Disintegrate asm/system.h for S390
Disintegrate asm/system.h for PowerPC
Disintegrate asm/system.h for PA-RISC
Disintegrate asm/system.h for MN10300
...
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/apic.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/auxvec.h | 7 | ||||
-rw-r--r-- | arch/x86/include/asm/barrier.h | 116 | ||||
-rw-r--r-- | arch/x86/include/asm/bug.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/cacheflush.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/elf.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/exec.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/futex.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/i387.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/local.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/mc146818rtc.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/page_types.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 31 | ||||
-rw-r--r-- | arch/x86/include/asm/segment.h | 58 | ||||
-rw-r--r-- | arch/x86/include/asm/special_insns.h | 199 | ||||
-rw-r--r-- | arch/x86/include/asm/stackprotector.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/switch_to.h | 129 | ||||
-rw-r--r-- | arch/x86/include/asm/system.h | 523 | ||||
-rw-r--r-- | arch/x86/include/asm/tlbflush.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/virtext.h | 1 |
20 files changed, 544 insertions, 536 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index a9371c91718..4b2caeefe1a 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/atomic.h> | 11 | #include <linux/atomic.h> |
12 | #include <asm/fixmap.h> | 12 | #include <asm/fixmap.h> |
13 | #include <asm/mpspec.h> | 13 | #include <asm/mpspec.h> |
14 | #include <asm/system.h> | ||
15 | #include <asm/msr.h> | 14 | #include <asm/msr.h> |
16 | 15 | ||
17 | #define ARCH_APICTIMER_STOPS_ON_C3 1 | 16 | #define ARCH_APICTIMER_STOPS_ON_C3 1 |
diff --git a/arch/x86/include/asm/auxvec.h b/arch/x86/include/asm/auxvec.h index 1316b4c3542..77203ac352d 100644 --- a/arch/x86/include/asm/auxvec.h +++ b/arch/x86/include/asm/auxvec.h | |||
@@ -9,4 +9,11 @@ | |||
9 | #endif | 9 | #endif |
10 | #define AT_SYSINFO_EHDR 33 | 10 | #define AT_SYSINFO_EHDR 33 |
11 | 11 | ||
12 | /* entries in ARCH_DLINFO: */ | ||
13 | #if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64) | ||
14 | # define AT_VECTOR_SIZE_ARCH 2 | ||
15 | #else /* else it's non-compat x86-64 */ | ||
16 | # define AT_VECTOR_SIZE_ARCH 1 | ||
17 | #endif | ||
18 | |||
12 | #endif /* _ASM_X86_AUXVEC_H */ | 19 | #endif /* _ASM_X86_AUXVEC_H */ |
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h new file mode 100644 index 00000000000..c6cd358a1ee --- /dev/null +++ b/arch/x86/include/asm/barrier.h | |||
@@ -0,0 +1,116 @@ | |||
1 | #ifndef _ASM_X86_BARRIER_H | ||
2 | #define _ASM_X86_BARRIER_H | ||
3 | |||
4 | #include <asm/alternative.h> | ||
5 | #include <asm/nops.h> | ||
6 | |||
7 | /* | ||
8 | * Force strict CPU ordering. | ||
9 | * And yes, this is required on UP too when we're talking | ||
10 | * to devices. | ||
11 | */ | ||
12 | |||
13 | #ifdef CONFIG_X86_32 | ||
14 | /* | ||
15 | * Some non-Intel clones support out of order store. wmb() ceases to be a | ||
16 | * nop for these. | ||
17 | */ | ||
18 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
19 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
20 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
21 | #else | ||
22 | #define mb() asm volatile("mfence":::"memory") | ||
23 | #define rmb() asm volatile("lfence":::"memory") | ||
24 | #define wmb() asm volatile("sfence" ::: "memory") | ||
25 | #endif | ||
26 | |||
27 | /** | ||
28 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
29 | * depend on. | ||
30 | * | ||
31 | * No data-dependent reads from memory-like regions are ever reordered | ||
32 | * over this barrier. All reads preceding this primitive are guaranteed | ||
33 | * to access memory (but not necessarily other CPUs' caches) before any | ||
34 | * reads following this primitive that depend on the data return by | ||
35 | * any of the preceding reads. This primitive is much lighter weight than | ||
36 | * rmb() on most CPUs, and is never heavier weight than is | ||
37 | * rmb(). | ||
38 | * | ||
39 | * These ordering constraints are respected by both the local CPU | ||
40 | * and the compiler. | ||
41 | * | ||
42 | * Ordering is not guaranteed by anything other than these primitives, | ||
43 | * not even by data dependencies. See the documentation for | ||
44 | * memory_barrier() for examples and URLs to more information. | ||
45 | * | ||
46 | * For example, the following code would force ordering (the initial | ||
47 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
48 | * | ||
49 | * <programlisting> | ||
50 | * CPU 0 CPU 1 | ||
51 | * | ||
52 | * b = 2; | ||
53 | * memory_barrier(); | ||
54 | * p = &b; q = p; | ||
55 | * read_barrier_depends(); | ||
56 | * d = *q; | ||
57 | * </programlisting> | ||
58 | * | ||
59 | * because the read of "*q" depends on the read of "p" and these | ||
60 | * two reads are separated by a read_barrier_depends(). However, | ||
61 | * the following code, with the same initial values for "a" and "b": | ||
62 | * | ||
63 | * <programlisting> | ||
64 | * CPU 0 CPU 1 | ||
65 | * | ||
66 | * a = 2; | ||
67 | * memory_barrier(); | ||
68 | * b = 3; y = b; | ||
69 | * read_barrier_depends(); | ||
70 | * x = a; | ||
71 | * </programlisting> | ||
72 | * | ||
73 | * does not enforce ordering, since there is no data dependency between | ||
74 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
75 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
76 | * in cases like this where there are no data dependencies. | ||
77 | **/ | ||
78 | |||
79 | #define read_barrier_depends() do { } while (0) | ||
80 | |||
81 | #ifdef CONFIG_SMP | ||
82 | #define smp_mb() mb() | ||
83 | #ifdef CONFIG_X86_PPRO_FENCE | ||
84 | # define smp_rmb() rmb() | ||
85 | #else | ||
86 | # define smp_rmb() barrier() | ||
87 | #endif | ||
88 | #ifdef CONFIG_X86_OOSTORE | ||
89 | # define smp_wmb() wmb() | ||
90 | #else | ||
91 | # define smp_wmb() barrier() | ||
92 | #endif | ||
93 | #define smp_read_barrier_depends() read_barrier_depends() | ||
94 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||
95 | #else | ||
96 | #define smp_mb() barrier() | ||
97 | #define smp_rmb() barrier() | ||
98 | #define smp_wmb() barrier() | ||
99 | #define smp_read_barrier_depends() do { } while (0) | ||
100 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
101 | #endif | ||
102 | |||
103 | /* | ||
104 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | ||
105 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | ||
106 | * code region. | ||
107 | * | ||
108 | * (Could use an alternative three way for this if there was one.) | ||
109 | */ | ||
110 | static __always_inline void rdtsc_barrier(void) | ||
111 | { | ||
112 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | ||
113 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | ||
114 | } | ||
115 | |||
116 | #endif /* _ASM_X86_BARRIER_H */ | ||
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index f654d1bb17f..11e1152222d 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h | |||
@@ -36,4 +36,8 @@ do { \ | |||
36 | #endif /* !CONFIG_BUG */ | 36 | #endif /* !CONFIG_BUG */ |
37 | 37 | ||
38 | #include <asm-generic/bug.h> | 38 | #include <asm-generic/bug.h> |
39 | |||
40 | |||
41 | extern void show_regs_common(void); | ||
42 | |||
39 | #endif /* _ASM_X86_BUG_H */ | 43 | #endif /* _ASM_X86_BUG_H */ |
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 4e12668711e..9863ee3747d 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | /* Caches aren't brain-dead on the intel. */ | 4 | /* Caches aren't brain-dead on the intel. */ |
5 | #include <asm-generic/cacheflush.h> | 5 | #include <asm-generic/cacheflush.h> |
6 | #include <asm/special_insns.h> | ||
6 | 7 | ||
7 | #ifdef CONFIG_X86_PAT | 8 | #ifdef CONFIG_X86_PAT |
8 | /* | 9 | /* |
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 5f962df30d0..f27f79abe02 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h | |||
@@ -84,7 +84,6 @@ extern unsigned int vdso_enabled; | |||
84 | (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) | 84 | (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) |
85 | 85 | ||
86 | #include <asm/processor.h> | 86 | #include <asm/processor.h> |
87 | #include <asm/system.h> | ||
88 | 87 | ||
89 | #ifdef CONFIG_X86_32 | 88 | #ifdef CONFIG_X86_32 |
90 | #include <asm/desc.h> | 89 | #include <asm/desc.h> |
diff --git a/arch/x86/include/asm/exec.h b/arch/x86/include/asm/exec.h new file mode 100644 index 00000000000..54c2e1db274 --- /dev/null +++ b/arch/x86/include/asm/exec.h | |||
@@ -0,0 +1 @@ | |||
/* define arch_align_stack() here */ | |||
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index d09bb03653f..71ecbcba1a4 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <asm/asm.h> | 9 | #include <asm/asm.h> |
10 | #include <asm/errno.h> | 10 | #include <asm/errno.h> |
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/system.h> | ||
13 | 12 | ||
14 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ | 13 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ |
15 | asm volatile("1:\t" insn "\n" \ | 14 | asm volatile("1:\t" insn "\n" \ |
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 7ce0798b1b2..257d9cca214 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -14,7 +14,6 @@ | |||
14 | 14 | ||
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/hardirq.h> | 16 | #include <linux/hardirq.h> |
17 | #include <asm/system.h> | ||
18 | 17 | ||
19 | struct pt_regs; | 18 | struct pt_regs; |
20 | struct user_i387_struct; | 19 | struct user_i387_struct; |
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index 9cdae5d47e8..c8bed0da434 100644 --- a/arch/x86/include/asm/local.h +++ b/arch/x86/include/asm/local.h | |||
@@ -3,7 +3,6 @@ | |||
3 | 3 | ||
4 | #include <linux/percpu.h> | 4 | #include <linux/percpu.h> |
5 | 5 | ||
6 | #include <asm/system.h> | ||
7 | #include <linux/atomic.h> | 6 | #include <linux/atomic.h> |
8 | #include <asm/asm.h> | 7 | #include <asm/asm.h> |
9 | 8 | ||
diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h index 0e8e85bb7c5..d354fb781c5 100644 --- a/arch/x86/include/asm/mc146818rtc.h +++ b/arch/x86/include/asm/mc146818rtc.h | |||
@@ -5,7 +5,6 @@ | |||
5 | #define _ASM_X86_MC146818RTC_H | 5 | #define _ASM_X86_MC146818RTC_H |
6 | 6 | ||
7 | #include <asm/io.h> | 7 | #include <asm/io.h> |
8 | #include <asm/system.h> | ||
9 | #include <asm/processor.h> | 8 | #include <asm/processor.h> |
10 | #include <linux/mc146818rtc.h> | 9 | #include <linux/mc146818rtc.h> |
11 | 10 | ||
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index bce688d54c1..e21fdd10479 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h | |||
@@ -55,7 +55,6 @@ extern unsigned long init_memory_mapping(unsigned long start, | |||
55 | unsigned long end); | 55 | unsigned long end); |
56 | 56 | ||
57 | extern void initmem_init(void); | 57 | extern void initmem_init(void); |
58 | extern void free_initmem(void); | ||
59 | 58 | ||
60 | #endif /* !__ASSEMBLY__ */ | 59 | #endif /* !__ASSEMBLY__ */ |
61 | 60 | ||
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 5533b30cac0..a19542c1685 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -14,13 +14,13 @@ struct mm_struct; | |||
14 | #include <asm/sigcontext.h> | 14 | #include <asm/sigcontext.h> |
15 | #include <asm/current.h> | 15 | #include <asm/current.h> |
16 | #include <asm/cpufeature.h> | 16 | #include <asm/cpufeature.h> |
17 | #include <asm/system.h> | ||
18 | #include <asm/page.h> | 17 | #include <asm/page.h> |
19 | #include <asm/pgtable_types.h> | 18 | #include <asm/pgtable_types.h> |
20 | #include <asm/percpu.h> | 19 | #include <asm/percpu.h> |
21 | #include <asm/msr.h> | 20 | #include <asm/msr.h> |
22 | #include <asm/desc_defs.h> | 21 | #include <asm/desc_defs.h> |
23 | #include <asm/nops.h> | 22 | #include <asm/nops.h> |
23 | #include <asm/special_insns.h> | ||
24 | 24 | ||
25 | #include <linux/personality.h> | 25 | #include <linux/personality.h> |
26 | #include <linux/cpumask.h> | 26 | #include <linux/cpumask.h> |
@@ -29,6 +29,15 @@ struct mm_struct; | |||
29 | #include <linux/math64.h> | 29 | #include <linux/math64.h> |
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <linux/err.h> | 31 | #include <linux/err.h> |
32 | #include <linux/irqflags.h> | ||
33 | |||
34 | /* | ||
35 | * We handle most unaligned accesses in hardware. On the other hand | ||
36 | * unaligned DMA can be quite expensive on some Nehalem processors. | ||
37 | * | ||
38 | * Based on this we disable the IP header alignment in network drivers. | ||
39 | */ | ||
40 | #define NET_IP_ALIGN 0 | ||
32 | 41 | ||
33 | #define HBP_NUM 4 | 42 | #define HBP_NUM 4 |
34 | /* | 43 | /* |
@@ -959,4 +968,24 @@ extern bool cpu_has_amd_erratum(const int *); | |||
959 | #define cpu_has_amd_erratum(x) (false) | 968 | #define cpu_has_amd_erratum(x) (false) |
960 | #endif /* CONFIG_CPU_SUP_AMD */ | 969 | #endif /* CONFIG_CPU_SUP_AMD */ |
961 | 970 | ||
971 | #ifdef CONFIG_X86_32 | ||
972 | /* | ||
973 | * disable hlt during certain critical i/o operations | ||
974 | */ | ||
975 | #define HAVE_DISABLE_HLT | ||
976 | #endif | ||
977 | |||
978 | void disable_hlt(void); | ||
979 | void enable_hlt(void); | ||
980 | |||
981 | void cpu_idle_wait(void); | ||
982 | |||
983 | extern unsigned long arch_align_stack(unsigned long sp); | ||
984 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | ||
985 | |||
986 | void default_idle(void); | ||
987 | bool set_pm_idle_to_default(void); | ||
988 | |||
989 | void stop_this_cpu(void *dummy); | ||
990 | |||
962 | #endif /* _ASM_X86_PROCESSOR_H */ | 991 | #endif /* _ASM_X86_PROCESSOR_H */ |
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 5e641715c3f..165466233ab 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h | |||
@@ -212,7 +212,61 @@ | |||
212 | #ifdef __KERNEL__ | 212 | #ifdef __KERNEL__ |
213 | #ifndef __ASSEMBLY__ | 213 | #ifndef __ASSEMBLY__ |
214 | extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10]; | 214 | extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10]; |
215 | #endif | 215 | |
216 | #endif | 216 | /* |
217 | * Load a segment. Fall back on loading the zero | ||
218 | * segment if something goes wrong.. | ||
219 | */ | ||
220 | #define loadsegment(seg, value) \ | ||
221 | do { \ | ||
222 | unsigned short __val = (value); \ | ||
223 | \ | ||
224 | asm volatile(" \n" \ | ||
225 | "1: movl %k0,%%" #seg " \n" \ | ||
226 | \ | ||
227 | ".section .fixup,\"ax\" \n" \ | ||
228 | "2: xorl %k0,%k0 \n" \ | ||
229 | " jmp 1b \n" \ | ||
230 | ".previous \n" \ | ||
231 | \ | ||
232 | _ASM_EXTABLE(1b, 2b) \ | ||
233 | \ | ||
234 | : "+r" (__val) : : "memory"); \ | ||
235 | } while (0) | ||
236 | |||
237 | /* | ||
238 | * Save a segment register away | ||
239 | */ | ||
240 | #define savesegment(seg, value) \ | ||
241 | asm("mov %%" #seg ",%0":"=r" (value) : : "memory") | ||
242 | |||
243 | /* | ||
244 | * x86_32 user gs accessors. | ||
245 | */ | ||
246 | #ifdef CONFIG_X86_32 | ||
247 | #ifdef CONFIG_X86_32_LAZY_GS | ||
248 | #define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;}) | ||
249 | #define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) | ||
250 | #define task_user_gs(tsk) ((tsk)->thread.gs) | ||
251 | #define lazy_save_gs(v) savesegment(gs, (v)) | ||
252 | #define lazy_load_gs(v) loadsegment(gs, (v)) | ||
253 | #else /* X86_32_LAZY_GS */ | ||
254 | #define get_user_gs(regs) (u16)((regs)->gs) | ||
255 | #define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) | ||
256 | #define task_user_gs(tsk) (task_pt_regs(tsk)->gs) | ||
257 | #define lazy_save_gs(v) do { } while (0) | ||
258 | #define lazy_load_gs(v) do { } while (0) | ||
259 | #endif /* X86_32_LAZY_GS */ | ||
260 | #endif /* X86_32 */ | ||
261 | |||
262 | static inline unsigned long get_limit(unsigned long segment) | ||
263 | { | ||
264 | unsigned long __limit; | ||
265 | asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); | ||
266 | return __limit + 1; | ||
267 | } | ||
268 | |||
269 | #endif /* !__ASSEMBLY__ */ | ||
270 | #endif /* __KERNEL__ */ | ||
217 | 271 | ||
218 | #endif /* _ASM_X86_SEGMENT_H */ | 272 | #endif /* _ASM_X86_SEGMENT_H */ |
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h new file mode 100644 index 00000000000..41fc93a2e22 --- /dev/null +++ b/arch/x86/include/asm/special_insns.h | |||
@@ -0,0 +1,199 @@ | |||
1 | #ifndef _ASM_X86_SPECIAL_INSNS_H | ||
2 | #define _ASM_X86_SPECIAL_INSNS_H | ||
3 | |||
4 | |||
5 | #ifdef __KERNEL__ | ||
6 | |||
7 | static inline void native_clts(void) | ||
8 | { | ||
9 | asm volatile("clts"); | ||
10 | } | ||
11 | |||
12 | /* | ||
13 | * Volatile isn't enough to prevent the compiler from reordering the | ||
14 | * read/write functions for the control registers and messing everything up. | ||
15 | * A memory clobber would solve the problem, but would prevent reordering of | ||
16 | * all loads stores around it, which can hurt performance. Solution is to | ||
17 | * use a variable and mimic reads and writes to it to enforce serialization | ||
18 | */ | ||
19 | static unsigned long __force_order; | ||
20 | |||
21 | static inline unsigned long native_read_cr0(void) | ||
22 | { | ||
23 | unsigned long val; | ||
24 | asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); | ||
25 | return val; | ||
26 | } | ||
27 | |||
28 | static inline void native_write_cr0(unsigned long val) | ||
29 | { | ||
30 | asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); | ||
31 | } | ||
32 | |||
33 | static inline unsigned long native_read_cr2(void) | ||
34 | { | ||
35 | unsigned long val; | ||
36 | asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); | ||
37 | return val; | ||
38 | } | ||
39 | |||
40 | static inline void native_write_cr2(unsigned long val) | ||
41 | { | ||
42 | asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); | ||
43 | } | ||
44 | |||
45 | static inline unsigned long native_read_cr3(void) | ||
46 | { | ||
47 | unsigned long val; | ||
48 | asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); | ||
49 | return val; | ||
50 | } | ||
51 | |||
52 | static inline void native_write_cr3(unsigned long val) | ||
53 | { | ||
54 | asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); | ||
55 | } | ||
56 | |||
57 | static inline unsigned long native_read_cr4(void) | ||
58 | { | ||
59 | unsigned long val; | ||
60 | asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); | ||
61 | return val; | ||
62 | } | ||
63 | |||
64 | static inline unsigned long native_read_cr4_safe(void) | ||
65 | { | ||
66 | unsigned long val; | ||
67 | /* This could fault if %cr4 does not exist. In x86_64, a cr4 always | ||
68 | * exists, so it will never fail. */ | ||
69 | #ifdef CONFIG_X86_32 | ||
70 | asm volatile("1: mov %%cr4, %0\n" | ||
71 | "2:\n" | ||
72 | _ASM_EXTABLE(1b, 2b) | ||
73 | : "=r" (val), "=m" (__force_order) : "0" (0)); | ||
74 | #else | ||
75 | val = native_read_cr4(); | ||
76 | #endif | ||
77 | return val; | ||
78 | } | ||
79 | |||
80 | static inline void native_write_cr4(unsigned long val) | ||
81 | { | ||
82 | asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); | ||
83 | } | ||
84 | |||
85 | #ifdef CONFIG_X86_64 | ||
86 | static inline unsigned long native_read_cr8(void) | ||
87 | { | ||
88 | unsigned long cr8; | ||
89 | asm volatile("movq %%cr8,%0" : "=r" (cr8)); | ||
90 | return cr8; | ||
91 | } | ||
92 | |||
93 | static inline void native_write_cr8(unsigned long val) | ||
94 | { | ||
95 | asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); | ||
96 | } | ||
97 | #endif | ||
98 | |||
99 | static inline void native_wbinvd(void) | ||
100 | { | ||
101 | asm volatile("wbinvd": : :"memory"); | ||
102 | } | ||
103 | |||
104 | extern void native_load_gs_index(unsigned); | ||
105 | |||
106 | #ifdef CONFIG_PARAVIRT | ||
107 | #include <asm/paravirt.h> | ||
108 | #else | ||
109 | |||
110 | static inline unsigned long read_cr0(void) | ||
111 | { | ||
112 | return native_read_cr0(); | ||
113 | } | ||
114 | |||
115 | static inline void write_cr0(unsigned long x) | ||
116 | { | ||
117 | native_write_cr0(x); | ||
118 | } | ||
119 | |||
120 | static inline unsigned long read_cr2(void) | ||
121 | { | ||
122 | return native_read_cr2(); | ||
123 | } | ||
124 | |||
125 | static inline void write_cr2(unsigned long x) | ||
126 | { | ||
127 | native_write_cr2(x); | ||
128 | } | ||
129 | |||
130 | static inline unsigned long read_cr3(void) | ||
131 | { | ||
132 | return native_read_cr3(); | ||
133 | } | ||
134 | |||
135 | static inline void write_cr3(unsigned long x) | ||
136 | { | ||
137 | native_write_cr3(x); | ||
138 | } | ||
139 | |||
140 | static inline unsigned long read_cr4(void) | ||
141 | { | ||
142 | return native_read_cr4(); | ||
143 | } | ||
144 | |||
145 | static inline unsigned long read_cr4_safe(void) | ||
146 | { | ||
147 | return native_read_cr4_safe(); | ||
148 | } | ||
149 | |||
150 | static inline void write_cr4(unsigned long x) | ||
151 | { | ||
152 | native_write_cr4(x); | ||
153 | } | ||
154 | |||
155 | static inline void wbinvd(void) | ||
156 | { | ||
157 | native_wbinvd(); | ||
158 | } | ||
159 | |||
160 | #ifdef CONFIG_X86_64 | ||
161 | |||
162 | static inline unsigned long read_cr8(void) | ||
163 | { | ||
164 | return native_read_cr8(); | ||
165 | } | ||
166 | |||
167 | static inline void write_cr8(unsigned long x) | ||
168 | { | ||
169 | native_write_cr8(x); | ||
170 | } | ||
171 | |||
172 | static inline void load_gs_index(unsigned selector) | ||
173 | { | ||
174 | native_load_gs_index(selector); | ||
175 | } | ||
176 | |||
177 | #endif | ||
178 | |||
179 | /* Clear the 'TS' bit */ | ||
180 | static inline void clts(void) | ||
181 | { | ||
182 | native_clts(); | ||
183 | } | ||
184 | |||
185 | #endif/* CONFIG_PARAVIRT */ | ||
186 | |||
187 | #define stts() write_cr0(read_cr0() | X86_CR0_TS) | ||
188 | |||
189 | static inline void clflush(volatile void *__p) | ||
190 | { | ||
191 | asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); | ||
192 | } | ||
193 | |||
194 | #define nop() asm volatile ("nop") | ||
195 | |||
196 | |||
197 | #endif /* __KERNEL__ */ | ||
198 | |||
199 | #endif /* _ASM_X86_SPECIAL_INSNS_H */ | ||
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 15751776356..b5d9533d2c3 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <asm/tsc.h> | 38 | #include <asm/tsc.h> |
39 | #include <asm/processor.h> | 39 | #include <asm/processor.h> |
40 | #include <asm/percpu.h> | 40 | #include <asm/percpu.h> |
41 | #include <asm/system.h> | ||
42 | #include <asm/desc.h> | 41 | #include <asm/desc.h> |
43 | #include <linux/random.h> | 42 | #include <linux/random.h> |
44 | 43 | ||
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h new file mode 100644 index 00000000000..4ec45b3abba --- /dev/null +++ b/arch/x86/include/asm/switch_to.h | |||
@@ -0,0 +1,129 @@ | |||
1 | #ifndef _ASM_X86_SWITCH_TO_H | ||
2 | #define _ASM_X86_SWITCH_TO_H | ||
3 | |||
4 | struct task_struct; /* one of the stranger aspects of C forward declarations */ | ||
5 | struct task_struct *__switch_to(struct task_struct *prev, | ||
6 | struct task_struct *next); | ||
7 | struct tss_struct; | ||
8 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | ||
9 | struct tss_struct *tss); | ||
10 | |||
11 | #ifdef CONFIG_X86_32 | ||
12 | |||
13 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
14 | #define __switch_canary \ | ||
15 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ | ||
16 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" | ||
17 | #define __switch_canary_oparam \ | ||
18 | , [stack_canary] "=m" (stack_canary.canary) | ||
19 | #define __switch_canary_iparam \ | ||
20 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | ||
21 | #else /* CC_STACKPROTECTOR */ | ||
22 | #define __switch_canary | ||
23 | #define __switch_canary_oparam | ||
24 | #define __switch_canary_iparam | ||
25 | #endif /* CC_STACKPROTECTOR */ | ||
26 | |||
27 | /* | ||
28 | * Saving eflags is important. It switches not only IOPL between tasks, | ||
29 | * it also protects other tasks from NT leaking through sysenter etc. | ||
30 | */ | ||
31 | #define switch_to(prev, next, last) \ | ||
32 | do { \ | ||
33 | /* \ | ||
34 | * Context-switching clobbers all registers, so we clobber \ | ||
35 | * them explicitly, via unused output variables. \ | ||
36 | * (EAX and EBP is not listed because EBP is saved/restored \ | ||
37 | * explicitly for wchan access and EAX is the return value of \ | ||
38 | * __switch_to()) \ | ||
39 | */ \ | ||
40 | unsigned long ebx, ecx, edx, esi, edi; \ | ||
41 | \ | ||
42 | asm volatile("pushfl\n\t" /* save flags */ \ | ||
43 | "pushl %%ebp\n\t" /* save EBP */ \ | ||
44 | "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \ | ||
45 | "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ | ||
46 | "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ | ||
47 | "pushl %[next_ip]\n\t" /* restore EIP */ \ | ||
48 | __switch_canary \ | ||
49 | "jmp __switch_to\n" /* regparm call */ \ | ||
50 | "1:\t" \ | ||
51 | "popl %%ebp\n\t" /* restore EBP */ \ | ||
52 | "popfl\n" /* restore flags */ \ | ||
53 | \ | ||
54 | /* output parameters */ \ | ||
55 | : [prev_sp] "=m" (prev->thread.sp), \ | ||
56 | [prev_ip] "=m" (prev->thread.ip), \ | ||
57 | "=a" (last), \ | ||
58 | \ | ||
59 | /* clobbered output registers: */ \ | ||
60 | "=b" (ebx), "=c" (ecx), "=d" (edx), \ | ||
61 | "=S" (esi), "=D" (edi) \ | ||
62 | \ | ||
63 | __switch_canary_oparam \ | ||
64 | \ | ||
65 | /* input parameters: */ \ | ||
66 | : [next_sp] "m" (next->thread.sp), \ | ||
67 | [next_ip] "m" (next->thread.ip), \ | ||
68 | \ | ||
69 | /* regparm parameters for __switch_to(): */ \ | ||
70 | [prev] "a" (prev), \ | ||
71 | [next] "d" (next) \ | ||
72 | \ | ||
73 | __switch_canary_iparam \ | ||
74 | \ | ||
75 | : /* reloaded segment registers */ \ | ||
76 | "memory"); \ | ||
77 | } while (0) | ||
78 | |||
79 | #else /* CONFIG_X86_32 */ | ||
80 | |||
81 | /* frame pointer must be last for get_wchan */ | ||
82 | #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" | ||
83 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" | ||
84 | |||
85 | #define __EXTRA_CLOBBER \ | ||
86 | , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ | ||
87 | "r12", "r13", "r14", "r15" | ||
88 | |||
89 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
90 | #define __switch_canary \ | ||
91 | "movq %P[task_canary](%%rsi),%%r8\n\t" \ | ||
92 | "movq %%r8,"__percpu_arg([gs_canary])"\n\t" | ||
93 | #define __switch_canary_oparam \ | ||
94 | , [gs_canary] "=m" (irq_stack_union.stack_canary) | ||
95 | #define __switch_canary_iparam \ | ||
96 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | ||
97 | #else /* CC_STACKPROTECTOR */ | ||
98 | #define __switch_canary | ||
99 | #define __switch_canary_oparam | ||
100 | #define __switch_canary_iparam | ||
101 | #endif /* CC_STACKPROTECTOR */ | ||
102 | |||
103 | /* Save restore flags to clear handle leaking NT */ | ||
104 | #define switch_to(prev, next, last) \ | ||
105 | asm volatile(SAVE_CONTEXT \ | ||
106 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ | ||
107 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ | ||
108 | "call __switch_to\n\t" \ | ||
109 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ | ||
110 | __switch_canary \ | ||
111 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ | ||
112 | "movq %%rax,%%rdi\n\t" \ | ||
113 | "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ | ||
114 | "jnz ret_from_fork\n\t" \ | ||
115 | RESTORE_CONTEXT \ | ||
116 | : "=a" (last) \ | ||
117 | __switch_canary_oparam \ | ||
118 | : [next] "S" (next), [prev] "D" (prev), \ | ||
119 | [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ | ||
120 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ | ||
121 | [_tif_fork] "i" (_TIF_FORK), \ | ||
122 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ | ||
123 | [current_task] "m" (current_task) \ | ||
124 | __switch_canary_iparam \ | ||
125 | : "memory", "cc" __EXTRA_CLOBBER) | ||
126 | |||
127 | #endif /* CONFIG_X86_32 */ | ||
128 | |||
129 | #endif /* _ASM_X86_SWITCH_TO_H */ | ||
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h deleted file mode 100644 index 2d2f01ce6dc..00000000000 --- a/arch/x86/include/asm/system.h +++ /dev/null | |||
@@ -1,523 +0,0 @@ | |||
1 | #ifndef _ASM_X86_SYSTEM_H | ||
2 | #define _ASM_X86_SYSTEM_H | ||
3 | |||
4 | #include <asm/asm.h> | ||
5 | #include <asm/segment.h> | ||
6 | #include <asm/cpufeature.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | #include <asm/nops.h> | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/irqflags.h> | ||
12 | |||
13 | /* entries in ARCH_DLINFO: */ | ||
14 | #if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64) | ||
15 | # define AT_VECTOR_SIZE_ARCH 2 | ||
16 | #else /* else it's non-compat x86-64 */ | ||
17 | # define AT_VECTOR_SIZE_ARCH 1 | ||
18 | #endif | ||
19 | |||
20 | struct task_struct; /* one of the stranger aspects of C forward declarations */ | ||
21 | struct task_struct *__switch_to(struct task_struct *prev, | ||
22 | struct task_struct *next); | ||
23 | struct tss_struct; | ||
24 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | ||
25 | struct tss_struct *tss); | ||
26 | extern void show_regs_common(void); | ||
27 | |||
28 | #ifdef CONFIG_X86_32 | ||
29 | |||
30 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
31 | #define __switch_canary \ | ||
32 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ | ||
33 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" | ||
34 | #define __switch_canary_oparam \ | ||
35 | , [stack_canary] "=m" (stack_canary.canary) | ||
36 | #define __switch_canary_iparam \ | ||
37 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | ||
38 | #else /* CC_STACKPROTECTOR */ | ||
39 | #define __switch_canary | ||
40 | #define __switch_canary_oparam | ||
41 | #define __switch_canary_iparam | ||
42 | #endif /* CC_STACKPROTECTOR */ | ||
43 | |||
44 | /* | ||
45 | * Saving eflags is important. It switches not only IOPL between tasks, | ||
46 | * it also protects other tasks from NT leaking through sysenter etc. | ||
47 | */ | ||
48 | #define switch_to(prev, next, last) \ | ||
49 | do { \ | ||
50 | /* \ | ||
51 | * Context-switching clobbers all registers, so we clobber \ | ||
52 | * them explicitly, via unused output variables. \ | ||
53 | * (EAX and EBP is not listed because EBP is saved/restored \ | ||
54 | * explicitly for wchan access and EAX is the return value of \ | ||
55 | * __switch_to()) \ | ||
56 | */ \ | ||
57 | unsigned long ebx, ecx, edx, esi, edi; \ | ||
58 | \ | ||
59 | asm volatile("pushfl\n\t" /* save flags */ \ | ||
60 | "pushl %%ebp\n\t" /* save EBP */ \ | ||
61 | "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \ | ||
62 | "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ | ||
63 | "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ | ||
64 | "pushl %[next_ip]\n\t" /* restore EIP */ \ | ||
65 | __switch_canary \ | ||
66 | "jmp __switch_to\n" /* regparm call */ \ | ||
67 | "1:\t" \ | ||
68 | "popl %%ebp\n\t" /* restore EBP */ \ | ||
69 | "popfl\n" /* restore flags */ \ | ||
70 | \ | ||
71 | /* output parameters */ \ | ||
72 | : [prev_sp] "=m" (prev->thread.sp), \ | ||
73 | [prev_ip] "=m" (prev->thread.ip), \ | ||
74 | "=a" (last), \ | ||
75 | \ | ||
76 | /* clobbered output registers: */ \ | ||
77 | "=b" (ebx), "=c" (ecx), "=d" (edx), \ | ||
78 | "=S" (esi), "=D" (edi) \ | ||
79 | \ | ||
80 | __switch_canary_oparam \ | ||
81 | \ | ||
82 | /* input parameters: */ \ | ||
83 | : [next_sp] "m" (next->thread.sp), \ | ||
84 | [next_ip] "m" (next->thread.ip), \ | ||
85 | \ | ||
86 | /* regparm parameters for __switch_to(): */ \ | ||
87 | [prev] "a" (prev), \ | ||
88 | [next] "d" (next) \ | ||
89 | \ | ||
90 | __switch_canary_iparam \ | ||
91 | \ | ||
92 | : /* reloaded segment registers */ \ | ||
93 | "memory"); \ | ||
94 | } while (0) | ||
95 | |||
96 | /* | ||
97 | * disable hlt during certain critical i/o operations | ||
98 | */ | ||
99 | #define HAVE_DISABLE_HLT | ||
100 | #else | ||
101 | |||
102 | /* frame pointer must be last for get_wchan */ | ||
103 | #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" | ||
104 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" | ||
105 | |||
106 | #define __EXTRA_CLOBBER \ | ||
107 | , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ | ||
108 | "r12", "r13", "r14", "r15" | ||
109 | |||
110 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
111 | #define __switch_canary \ | ||
112 | "movq %P[task_canary](%%rsi),%%r8\n\t" \ | ||
113 | "movq %%r8,"__percpu_arg([gs_canary])"\n\t" | ||
114 | #define __switch_canary_oparam \ | ||
115 | , [gs_canary] "=m" (irq_stack_union.stack_canary) | ||
116 | #define __switch_canary_iparam \ | ||
117 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | ||
118 | #else /* CC_STACKPROTECTOR */ | ||
119 | #define __switch_canary | ||
120 | #define __switch_canary_oparam | ||
121 | #define __switch_canary_iparam | ||
122 | #endif /* CC_STACKPROTECTOR */ | ||
123 | |||
124 | /* Save restore flags to clear handle leaking NT */ | ||
125 | #define switch_to(prev, next, last) \ | ||
126 | asm volatile(SAVE_CONTEXT \ | ||
127 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ | ||
128 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ | ||
129 | "call __switch_to\n\t" \ | ||
130 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ | ||
131 | __switch_canary \ | ||
132 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ | ||
133 | "movq %%rax,%%rdi\n\t" \ | ||
134 | "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ | ||
135 | "jnz ret_from_fork\n\t" \ | ||
136 | RESTORE_CONTEXT \ | ||
137 | : "=a" (last) \ | ||
138 | __switch_canary_oparam \ | ||
139 | : [next] "S" (next), [prev] "D" (prev), \ | ||
140 | [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ | ||
141 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ | ||
142 | [_tif_fork] "i" (_TIF_FORK), \ | ||
143 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ | ||
144 | [current_task] "m" (current_task) \ | ||
145 | __switch_canary_iparam \ | ||
146 | : "memory", "cc" __EXTRA_CLOBBER) | ||
147 | #endif | ||
148 | |||
149 | #ifdef __KERNEL__ | ||
150 | |||
151 | extern void native_load_gs_index(unsigned); | ||
152 | |||
153 | /* | ||
154 | * Load a segment. Fall back on loading the zero | ||
155 | * segment if something goes wrong.. | ||
156 | */ | ||
157 | #define loadsegment(seg, value) \ | ||
158 | do { \ | ||
159 | unsigned short __val = (value); \ | ||
160 | \ | ||
161 | asm volatile(" \n" \ | ||
162 | "1: movl %k0,%%" #seg " \n" \ | ||
163 | \ | ||
164 | ".section .fixup,\"ax\" \n" \ | ||
165 | "2: xorl %k0,%k0 \n" \ | ||
166 | " jmp 1b \n" \ | ||
167 | ".previous \n" \ | ||
168 | \ | ||
169 | _ASM_EXTABLE(1b, 2b) \ | ||
170 | \ | ||
171 | : "+r" (__val) : : "memory"); \ | ||
172 | } while (0) | ||
173 | |||
174 | /* | ||
175 | * Save a segment register away | ||
176 | */ | ||
177 | #define savesegment(seg, value) \ | ||
178 | asm("mov %%" #seg ",%0":"=r" (value) : : "memory") | ||
179 | |||
180 | /* | ||
181 | * x86_32 user gs accessors. | ||
182 | */ | ||
183 | #ifdef CONFIG_X86_32 | ||
184 | #ifdef CONFIG_X86_32_LAZY_GS | ||
185 | #define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;}) | ||
186 | #define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) | ||
187 | #define task_user_gs(tsk) ((tsk)->thread.gs) | ||
188 | #define lazy_save_gs(v) savesegment(gs, (v)) | ||
189 | #define lazy_load_gs(v) loadsegment(gs, (v)) | ||
190 | #else /* X86_32_LAZY_GS */ | ||
191 | #define get_user_gs(regs) (u16)((regs)->gs) | ||
192 | #define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) | ||
193 | #define task_user_gs(tsk) (task_pt_regs(tsk)->gs) | ||
194 | #define lazy_save_gs(v) do { } while (0) | ||
195 | #define lazy_load_gs(v) do { } while (0) | ||
196 | #endif /* X86_32_LAZY_GS */ | ||
197 | #endif /* X86_32 */ | ||
198 | |||
199 | static inline unsigned long get_limit(unsigned long segment) | ||
200 | { | ||
201 | unsigned long __limit; | ||
202 | asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); | ||
203 | return __limit + 1; | ||
204 | } | ||
205 | |||
206 | static inline void native_clts(void) | ||
207 | { | ||
208 | asm volatile("clts"); | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * Volatile isn't enough to prevent the compiler from reordering the | ||
213 | * read/write functions for the control registers and messing everything up. | ||
214 | * A memory clobber would solve the problem, but would prevent reordering of | ||
215 | * all loads stores around it, which can hurt performance. Solution is to | ||
216 | * use a variable and mimic reads and writes to it to enforce serialization | ||
217 | */ | ||
218 | static unsigned long __force_order; | ||
219 | |||
220 | static inline unsigned long native_read_cr0(void) | ||
221 | { | ||
222 | unsigned long val; | ||
223 | asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); | ||
224 | return val; | ||
225 | } | ||
226 | |||
227 | static inline void native_write_cr0(unsigned long val) | ||
228 | { | ||
229 | asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); | ||
230 | } | ||
231 | |||
232 | static inline unsigned long native_read_cr2(void) | ||
233 | { | ||
234 | unsigned long val; | ||
235 | asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); | ||
236 | return val; | ||
237 | } | ||
238 | |||
239 | static inline void native_write_cr2(unsigned long val) | ||
240 | { | ||
241 | asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); | ||
242 | } | ||
243 | |||
244 | static inline unsigned long native_read_cr3(void) | ||
245 | { | ||
246 | unsigned long val; | ||
247 | asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); | ||
248 | return val; | ||
249 | } | ||
250 | |||
251 | static inline void native_write_cr3(unsigned long val) | ||
252 | { | ||
253 | asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); | ||
254 | } | ||
255 | |||
256 | static inline unsigned long native_read_cr4(void) | ||
257 | { | ||
258 | unsigned long val; | ||
259 | asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); | ||
260 | return val; | ||
261 | } | ||
262 | |||
263 | static inline unsigned long native_read_cr4_safe(void) | ||
264 | { | ||
265 | unsigned long val; | ||
266 | /* This could fault if %cr4 does not exist. In x86_64, a cr4 always | ||
267 | * exists, so it will never fail. */ | ||
268 | #ifdef CONFIG_X86_32 | ||
269 | asm volatile("1: mov %%cr4, %0\n" | ||
270 | "2:\n" | ||
271 | _ASM_EXTABLE(1b, 2b) | ||
272 | : "=r" (val), "=m" (__force_order) : "0" (0)); | ||
273 | #else | ||
274 | val = native_read_cr4(); | ||
275 | #endif | ||
276 | return val; | ||
277 | } | ||
278 | |||
279 | static inline void native_write_cr4(unsigned long val) | ||
280 | { | ||
281 | asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); | ||
282 | } | ||
283 | |||
284 | #ifdef CONFIG_X86_64 | ||
285 | static inline unsigned long native_read_cr8(void) | ||
286 | { | ||
287 | unsigned long cr8; | ||
288 | asm volatile("movq %%cr8,%0" : "=r" (cr8)); | ||
289 | return cr8; | ||
290 | } | ||
291 | |||
292 | static inline void native_write_cr8(unsigned long val) | ||
293 | { | ||
294 | asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); | ||
295 | } | ||
296 | #endif | ||
297 | |||
298 | static inline void native_wbinvd(void) | ||
299 | { | ||
300 | asm volatile("wbinvd": : :"memory"); | ||
301 | } | ||
302 | |||
303 | #ifdef CONFIG_PARAVIRT | ||
304 | #include <asm/paravirt.h> | ||
305 | #else | ||
306 | |||
307 | static inline unsigned long read_cr0(void) | ||
308 | { | ||
309 | return native_read_cr0(); | ||
310 | } | ||
311 | |||
312 | static inline void write_cr0(unsigned long x) | ||
313 | { | ||
314 | native_write_cr0(x); | ||
315 | } | ||
316 | |||
317 | static inline unsigned long read_cr2(void) | ||
318 | { | ||
319 | return native_read_cr2(); | ||
320 | } | ||
321 | |||
322 | static inline void write_cr2(unsigned long x) | ||
323 | { | ||
324 | native_write_cr2(x); | ||
325 | } | ||
326 | |||
327 | static inline unsigned long read_cr3(void) | ||
328 | { | ||
329 | return native_read_cr3(); | ||
330 | } | ||
331 | |||
332 | static inline void write_cr3(unsigned long x) | ||
333 | { | ||
334 | native_write_cr3(x); | ||
335 | } | ||
336 | |||
337 | static inline unsigned long read_cr4(void) | ||
338 | { | ||
339 | return native_read_cr4(); | ||
340 | } | ||
341 | |||
342 | static inline unsigned long read_cr4_safe(void) | ||
343 | { | ||
344 | return native_read_cr4_safe(); | ||
345 | } | ||
346 | |||
347 | static inline void write_cr4(unsigned long x) | ||
348 | { | ||
349 | native_write_cr4(x); | ||
350 | } | ||
351 | |||
352 | static inline void wbinvd(void) | ||
353 | { | ||
354 | native_wbinvd(); | ||
355 | } | ||
356 | |||
357 | #ifdef CONFIG_X86_64 | ||
358 | |||
359 | static inline unsigned long read_cr8(void) | ||
360 | { | ||
361 | return native_read_cr8(); | ||
362 | } | ||
363 | |||
364 | static inline void write_cr8(unsigned long x) | ||
365 | { | ||
366 | native_write_cr8(x); | ||
367 | } | ||
368 | |||
369 | static inline void load_gs_index(unsigned selector) | ||
370 | { | ||
371 | native_load_gs_index(selector); | ||
372 | } | ||
373 | |||
374 | #endif | ||
375 | |||
376 | /* Clear the 'TS' bit */ | ||
377 | static inline void clts(void) | ||
378 | { | ||
379 | native_clts(); | ||
380 | } | ||
381 | |||
382 | #endif/* CONFIG_PARAVIRT */ | ||
383 | |||
384 | #define stts() write_cr0(read_cr0() | X86_CR0_TS) | ||
385 | |||
386 | #endif /* __KERNEL__ */ | ||
387 | |||
388 | static inline void clflush(volatile void *__p) | ||
389 | { | ||
390 | asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); | ||
391 | } | ||
392 | |||
393 | #define nop() asm volatile ("nop") | ||
394 | |||
395 | void disable_hlt(void); | ||
396 | void enable_hlt(void); | ||
397 | |||
398 | void cpu_idle_wait(void); | ||
399 | |||
400 | extern unsigned long arch_align_stack(unsigned long sp); | ||
401 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | ||
402 | |||
403 | void default_idle(void); | ||
404 | bool set_pm_idle_to_default(void); | ||
405 | |||
406 | void stop_this_cpu(void *dummy); | ||
407 | |||
408 | /* | ||
409 | * Force strict CPU ordering. | ||
410 | * And yes, this is required on UP too when we're talking | ||
411 | * to devices. | ||
412 | */ | ||
413 | #ifdef CONFIG_X86_32 | ||
414 | /* | ||
415 | * Some non-Intel clones support out of order store. wmb() ceases to be a | ||
416 | * nop for these. | ||
417 | */ | ||
418 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
419 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
420 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
421 | #else | ||
422 | #define mb() asm volatile("mfence":::"memory") | ||
423 | #define rmb() asm volatile("lfence":::"memory") | ||
424 | #define wmb() asm volatile("sfence" ::: "memory") | ||
425 | #endif | ||
426 | |||
427 | /** | ||
428 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
429 | * depend on. | ||
430 | * | ||
431 | * No data-dependent reads from memory-like regions are ever reordered | ||
432 | * over this barrier. All reads preceding this primitive are guaranteed | ||
433 | * to access memory (but not necessarily other CPUs' caches) before any | ||
434 | * reads following this primitive that depend on the data return by | ||
435 | * any of the preceding reads. This primitive is much lighter weight than | ||
436 | * rmb() on most CPUs, and is never heavier weight than is | ||
437 | * rmb(). | ||
438 | * | ||
439 | * These ordering constraints are respected by both the local CPU | ||
440 | * and the compiler. | ||
441 | * | ||
442 | * Ordering is not guaranteed by anything other than these primitives, | ||
443 | * not even by data dependencies. See the documentation for | ||
444 | * memory_barrier() for examples and URLs to more information. | ||
445 | * | ||
446 | * For example, the following code would force ordering (the initial | ||
447 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
448 | * | ||
449 | * <programlisting> | ||
450 | * CPU 0 CPU 1 | ||
451 | * | ||
452 | * b = 2; | ||
453 | * memory_barrier(); | ||
454 | * p = &b; q = p; | ||
455 | * read_barrier_depends(); | ||
456 | * d = *q; | ||
457 | * </programlisting> | ||
458 | * | ||
459 | * because the read of "*q" depends on the read of "p" and these | ||
460 | * two reads are separated by a read_barrier_depends(). However, | ||
461 | * the following code, with the same initial values for "a" and "b": | ||
462 | * | ||
463 | * <programlisting> | ||
464 | * CPU 0 CPU 1 | ||
465 | * | ||
466 | * a = 2; | ||
467 | * memory_barrier(); | ||
468 | * b = 3; y = b; | ||
469 | * read_barrier_depends(); | ||
470 | * x = a; | ||
471 | * </programlisting> | ||
472 | * | ||
473 | * does not enforce ordering, since there is no data dependency between | ||
474 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
475 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
476 | * in cases like this where there are no data dependencies. | ||
477 | **/ | ||
478 | |||
479 | #define read_barrier_depends() do { } while (0) | ||
480 | |||
481 | #ifdef CONFIG_SMP | ||
482 | #define smp_mb() mb() | ||
483 | #ifdef CONFIG_X86_PPRO_FENCE | ||
484 | # define smp_rmb() rmb() | ||
485 | #else | ||
486 | # define smp_rmb() barrier() | ||
487 | #endif | ||
488 | #ifdef CONFIG_X86_OOSTORE | ||
489 | # define smp_wmb() wmb() | ||
490 | #else | ||
491 | # define smp_wmb() barrier() | ||
492 | #endif | ||
493 | #define smp_read_barrier_depends() read_barrier_depends() | ||
494 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||
495 | #else | ||
496 | #define smp_mb() barrier() | ||
497 | #define smp_rmb() barrier() | ||
498 | #define smp_wmb() barrier() | ||
499 | #define smp_read_barrier_depends() do { } while (0) | ||
500 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
501 | #endif | ||
502 | |||
503 | /* | ||
504 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | ||
505 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | ||
506 | * code region. | ||
507 | * | ||
508 | * (Could use an alternative three way for this if there was one.) | ||
509 | */ | ||
510 | static __always_inline void rdtsc_barrier(void) | ||
511 | { | ||
512 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | ||
513 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | ||
514 | } | ||
515 | |||
516 | /* | ||
517 | * We handle most unaligned accesses in hardware. On the other hand | ||
518 | * unaligned DMA can be quite expensive on some Nehalem processors. | ||
519 | * | ||
520 | * Based on this we disable the IP header alignment in network drivers. | ||
521 | */ | ||
522 | #define NET_IP_ALIGN 0 | ||
523 | #endif /* _ASM_X86_SYSTEM_H */ | ||
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 169be8938b9..c0e108e0807 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <linux/sched.h> | 5 | #include <linux/sched.h> |
6 | 6 | ||
7 | #include <asm/processor.h> | 7 | #include <asm/processor.h> |
8 | #include <asm/system.h> | 8 | #include <asm/special_insns.h> |
9 | 9 | ||
10 | #ifdef CONFIG_PARAVIRT | 10 | #ifdef CONFIG_PARAVIRT |
11 | #include <asm/paravirt.h> | 11 | #include <asm/paravirt.h> |
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index e0f9aa16358..5da71c27cc5 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h | |||
@@ -16,7 +16,6 @@ | |||
16 | #define _ASM_X86_VIRTEX_H | 16 | #define _ASM_X86_VIRTEX_H |
17 | 17 | ||
18 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
19 | #include <asm/system.h> | ||
20 | 19 | ||
21 | #include <asm/vmx.h> | 20 | #include <asm/vmx.h> |
22 | #include <asm/svm.h> | 21 | #include <asm/svm.h> |