diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2008-08-18 03:25:24 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2008-10-23 01:55:21 -0400 |
commit | 6a0eec8224db1191876770f7cac31f2a2637a6f5 (patch) | |
tree | f47080809d4b67f709667f387eb1e4bd7841ce18 /arch/um | |
parent | 32926b3be19fa4859c02f248a342f2d62822dcfe (diff) |
x86, um: get rid of system.h -> system.h include
Long-term we want to split system.h and include barriers part from
underlying target; for now copy that part to sysdep.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/um')
-rw-r--r-- | arch/um/Makefile | 2 | ||||
-rw-r--r-- | arch/um/include/asm/system-i386.h | 6 | ||||
-rw-r--r-- | arch/um/include/asm/system-ppc.h | 12 | ||||
-rw-r--r-- | arch/um/include/asm/system-x86_64.h | 23 | ||||
-rw-r--r-- | arch/um/include/asm/system.h (renamed from arch/um/include/asm/system-generic.h) | 14 | ||||
-rw-r--r-- | arch/um/include/shared/sysdep-i386/system.h | 132 | ||||
-rw-r--r-- | arch/um/include/shared/sysdep-x86_64/system.h | 132 |
7 files changed, 266 insertions, 55 deletions
diff --git a/arch/um/Makefile b/arch/um/Makefile index 909d16b07a44..11cb7baa0fee 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile | |||
@@ -19,7 +19,7 @@ core-y += $(ARCH_DIR)/kernel/ \ | |||
19 | $(ARCH_DIR)/os-$(OS)/ | 19 | $(ARCH_DIR)/os-$(OS)/ |
20 | 20 | ||
21 | # Have to precede the include because the included Makefiles reference them. | 21 | # Have to precede the include because the included Makefiles reference them. |
22 | SYMLINK_HEADERS := archparam.h system.h processor.h ptrace.h \ | 22 | SYMLINK_HEADERS := archparam.h processor.h ptrace.h \ |
23 | module.h vm-flags.h elf.h | 23 | module.h vm-flags.h elf.h |
24 | SYMLINK_HEADERS := $(foreach header,$(SYMLINK_HEADERS),$(ARCH_DIR)/include/asm/$(header)) | 24 | SYMLINK_HEADERS := $(foreach header,$(SYMLINK_HEADERS),$(ARCH_DIR)/include/asm/$(header)) |
25 | 25 | ||
diff --git a/arch/um/include/asm/system-i386.h b/arch/um/include/asm/system-i386.h deleted file mode 100644 index c436263e67ba..000000000000 --- a/arch/um/include/asm/system-i386.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __UM_SYSTEM_I386_H | ||
2 | #define __UM_SYSTEM_I386_H | ||
3 | |||
4 | #include "asm/system-generic.h" | ||
5 | |||
6 | #endif | ||
diff --git a/arch/um/include/asm/system-ppc.h b/arch/um/include/asm/system-ppc.h deleted file mode 100644 index 17cde6640bf5..000000000000 --- a/arch/um/include/asm/system-ppc.h +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | #ifndef __UM_SYSTEM_PPC_H | ||
2 | #define __UM_SYSTEM_PPC_H | ||
3 | |||
4 | #define _switch_to _ppc_switch_to | ||
5 | |||
6 | #include "asm/arch/system.h" | ||
7 | |||
8 | #undef _switch_to | ||
9 | |||
10 | #include "asm/system-generic.h" | ||
11 | |||
12 | #endif | ||
diff --git a/arch/um/include/asm/system-x86_64.h b/arch/um/include/asm/system-x86_64.h deleted file mode 100644 index e1b61b580734..000000000000 --- a/arch/um/include/asm/system-x86_64.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2003 PathScale, Inc. | ||
3 | * | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #ifndef __UM_SYSTEM_X86_64_H | ||
8 | #define __UM_SYSTEM_X86_64_H | ||
9 | |||
10 | #include "asm/system-generic.h" | ||
11 | |||
12 | #endif | ||
13 | |||
14 | /* | ||
15 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
16 | * Emacs will notice this stuff at the end of the file and automatically | ||
17 | * adjust the settings for this buffer only. This must remain at the end | ||
18 | * of the file. | ||
19 | * --------------------------------------------------------------------------- | ||
20 | * Local variables: | ||
21 | * c-file-style: "linux" | ||
22 | * End: | ||
23 | */ | ||
diff --git a/arch/um/include/asm/system-generic.h b/arch/um/include/asm/system.h index 5bcfa35e7a22..753346e2cdfd 100644 --- a/arch/um/include/asm/system-generic.h +++ b/arch/um/include/asm/system.h | |||
@@ -1,19 +1,7 @@ | |||
1 | #ifndef __UM_SYSTEM_GENERIC_H | 1 | #ifndef __UM_SYSTEM_GENERIC_H |
2 | #define __UM_SYSTEM_GENERIC_H | 2 | #define __UM_SYSTEM_GENERIC_H |
3 | 3 | ||
4 | #include "asm/arch/system.h" | 4 | #include "sysdep/system.h" |
5 | |||
6 | #undef switch_to | ||
7 | #undef local_irq_save | ||
8 | #undef local_irq_restore | ||
9 | #undef local_irq_disable | ||
10 | #undef local_irq_enable | ||
11 | #undef local_save_flags | ||
12 | #undef local_irq_restore | ||
13 | #undef local_irq_enable | ||
14 | #undef local_irq_disable | ||
15 | #undef local_irq_save | ||
16 | #undef irqs_disabled | ||
17 | 5 | ||
18 | extern void *switch_to(void *prev, void *next, void *last); | 6 | extern void *switch_to(void *prev, void *next, void *last); |
19 | 7 | ||
diff --git a/arch/um/include/shared/sysdep-i386/system.h b/arch/um/include/shared/sysdep-i386/system.h new file mode 100644 index 000000000000..d1b93c436200 --- /dev/null +++ b/arch/um/include/shared/sysdep-i386/system.h | |||
@@ -0,0 +1,132 @@ | |||
1 | #ifndef _ASM_X86_SYSTEM_H_ | ||
2 | #define _ASM_X86_SYSTEM_H_ | ||
3 | |||
4 | #include <asm/asm.h> | ||
5 | #include <asm/segment.h> | ||
6 | #include <asm/cpufeature.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | #include <asm/nops.h> | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/irqflags.h> | ||
12 | |||
13 | /* entries in ARCH_DLINFO: */ | ||
14 | #ifdef CONFIG_IA32_EMULATION | ||
15 | # define AT_VECTOR_SIZE_ARCH 2 | ||
16 | #else | ||
17 | # define AT_VECTOR_SIZE_ARCH 1 | ||
18 | #endif | ||
19 | |||
20 | extern unsigned long arch_align_stack(unsigned long sp); | ||
21 | |||
22 | void default_idle(void); | ||
23 | |||
24 | /* | ||
25 | * Force strict CPU ordering. | ||
26 | * And yes, this is required on UP too when we're talking | ||
27 | * to devices. | ||
28 | */ | ||
29 | #ifdef CONFIG_X86_32 | ||
30 | /* | ||
31 | * Some non-Intel clones support out of order store. wmb() ceases to be a | ||
32 | * nop for these. | ||
33 | */ | ||
34 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
35 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
36 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
37 | #else | ||
38 | #define mb() asm volatile("mfence":::"memory") | ||
39 | #define rmb() asm volatile("lfence":::"memory") | ||
40 | #define wmb() asm volatile("sfence" ::: "memory") | ||
41 | #endif | ||
42 | |||
43 | /** | ||
44 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
45 | * depend on. | ||
46 | * | ||
47 | * No data-dependent reads from memory-like regions are ever reordered | ||
48 | * over this barrier. All reads preceding this primitive are guaranteed | ||
49 | * to access memory (but not necessarily other CPUs' caches) before any | ||
50 | * reads following this primitive that depend on the data return by | ||
51 | * any of the preceding reads. This primitive is much lighter weight than | ||
52 | * rmb() on most CPUs, and is never heavier weight than is | ||
53 | * rmb(). | ||
54 | * | ||
55 | * These ordering constraints are respected by both the local CPU | ||
56 | * and the compiler. | ||
57 | * | ||
58 | * Ordering is not guaranteed by anything other than these primitives, | ||
59 | * not even by data dependencies. See the documentation for | ||
60 | * memory_barrier() for examples and URLs to more information. | ||
61 | * | ||
62 | * For example, the following code would force ordering (the initial | ||
63 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
64 | * | ||
65 | * <programlisting> | ||
66 | * CPU 0 CPU 1 | ||
67 | * | ||
68 | * b = 2; | ||
69 | * memory_barrier(); | ||
70 | * p = &b; q = p; | ||
71 | * read_barrier_depends(); | ||
72 | * d = *q; | ||
73 | * </programlisting> | ||
74 | * | ||
75 | * because the read of "*q" depends on the read of "p" and these | ||
76 | * two reads are separated by a read_barrier_depends(). However, | ||
77 | * the following code, with the same initial values for "a" and "b": | ||
78 | * | ||
79 | * <programlisting> | ||
80 | * CPU 0 CPU 1 | ||
81 | * | ||
82 | * a = 2; | ||
83 | * memory_barrier(); | ||
84 | * b = 3; y = b; | ||
85 | * read_barrier_depends(); | ||
86 | * x = a; | ||
87 | * </programlisting> | ||
88 | * | ||
89 | * does not enforce ordering, since there is no data dependency between | ||
90 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
91 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
92 | * in cases like this where there are no data dependencies. | ||
93 | **/ | ||
94 | |||
95 | #define read_barrier_depends() do { } while (0) | ||
96 | |||
97 | #ifdef CONFIG_SMP | ||
98 | #define smp_mb() mb() | ||
99 | #ifdef CONFIG_X86_PPRO_FENCE | ||
100 | # define smp_rmb() rmb() | ||
101 | #else | ||
102 | # define smp_rmb() barrier() | ||
103 | #endif | ||
104 | #ifdef CONFIG_X86_OOSTORE | ||
105 | # define smp_wmb() wmb() | ||
106 | #else | ||
107 | # define smp_wmb() barrier() | ||
108 | #endif | ||
109 | #define smp_read_barrier_depends() read_barrier_depends() | ||
110 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||
111 | #else | ||
112 | #define smp_mb() barrier() | ||
113 | #define smp_rmb() barrier() | ||
114 | #define smp_wmb() barrier() | ||
115 | #define smp_read_barrier_depends() do { } while (0) | ||
116 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
117 | #endif | ||
118 | |||
119 | /* | ||
120 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | ||
121 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | ||
122 | * code region. | ||
123 | * | ||
124 | * (Could use an alternative three way for this if there was one.) | ||
125 | */ | ||
126 | static inline void rdtsc_barrier(void) | ||
127 | { | ||
128 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | ||
129 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | ||
130 | } | ||
131 | |||
132 | #endif | ||
diff --git a/arch/um/include/shared/sysdep-x86_64/system.h b/arch/um/include/shared/sysdep-x86_64/system.h new file mode 100644 index 000000000000..d1b93c436200 --- /dev/null +++ b/arch/um/include/shared/sysdep-x86_64/system.h | |||
@@ -0,0 +1,132 @@ | |||
1 | #ifndef _ASM_X86_SYSTEM_H_ | ||
2 | #define _ASM_X86_SYSTEM_H_ | ||
3 | |||
4 | #include <asm/asm.h> | ||
5 | #include <asm/segment.h> | ||
6 | #include <asm/cpufeature.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | #include <asm/nops.h> | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/irqflags.h> | ||
12 | |||
13 | /* entries in ARCH_DLINFO: */ | ||
14 | #ifdef CONFIG_IA32_EMULATION | ||
15 | # define AT_VECTOR_SIZE_ARCH 2 | ||
16 | #else | ||
17 | # define AT_VECTOR_SIZE_ARCH 1 | ||
18 | #endif | ||
19 | |||
20 | extern unsigned long arch_align_stack(unsigned long sp); | ||
21 | |||
22 | void default_idle(void); | ||
23 | |||
24 | /* | ||
25 | * Force strict CPU ordering. | ||
26 | * And yes, this is required on UP too when we're talking | ||
27 | * to devices. | ||
28 | */ | ||
29 | #ifdef CONFIG_X86_32 | ||
30 | /* | ||
31 | * Some non-Intel clones support out of order store. wmb() ceases to be a | ||
32 | * nop for these. | ||
33 | */ | ||
34 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
35 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
36 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
37 | #else | ||
38 | #define mb() asm volatile("mfence":::"memory") | ||
39 | #define rmb() asm volatile("lfence":::"memory") | ||
40 | #define wmb() asm volatile("sfence" ::: "memory") | ||
41 | #endif | ||
42 | |||
43 | /** | ||
44 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
45 | * depend on. | ||
46 | * | ||
47 | * No data-dependent reads from memory-like regions are ever reordered | ||
48 | * over this barrier. All reads preceding this primitive are guaranteed | ||
49 | * to access memory (but not necessarily other CPUs' caches) before any | ||
50 | * reads following this primitive that depend on the data return by | ||
51 | * any of the preceding reads. This primitive is much lighter weight than | ||
52 | * rmb() on most CPUs, and is never heavier weight than is | ||
53 | * rmb(). | ||
54 | * | ||
55 | * These ordering constraints are respected by both the local CPU | ||
56 | * and the compiler. | ||
57 | * | ||
58 | * Ordering is not guaranteed by anything other than these primitives, | ||
59 | * not even by data dependencies. See the documentation for | ||
60 | * memory_barrier() for examples and URLs to more information. | ||
61 | * | ||
62 | * For example, the following code would force ordering (the initial | ||
63 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
64 | * | ||
65 | * <programlisting> | ||
66 | * CPU 0 CPU 1 | ||
67 | * | ||
68 | * b = 2; | ||
69 | * memory_barrier(); | ||
70 | * p = &b; q = p; | ||
71 | * read_barrier_depends(); | ||
72 | * d = *q; | ||
73 | * </programlisting> | ||
74 | * | ||
75 | * because the read of "*q" depends on the read of "p" and these | ||
76 | * two reads are separated by a read_barrier_depends(). However, | ||
77 | * the following code, with the same initial values for "a" and "b": | ||
78 | * | ||
79 | * <programlisting> | ||
80 | * CPU 0 CPU 1 | ||
81 | * | ||
82 | * a = 2; | ||
83 | * memory_barrier(); | ||
84 | * b = 3; y = b; | ||
85 | * read_barrier_depends(); | ||
86 | * x = a; | ||
87 | * </programlisting> | ||
88 | * | ||
89 | * does not enforce ordering, since there is no data dependency between | ||
90 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
91 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
92 | * in cases like this where there are no data dependencies. | ||
93 | **/ | ||
94 | |||
95 | #define read_barrier_depends() do { } while (0) | ||
96 | |||
97 | #ifdef CONFIG_SMP | ||
98 | #define smp_mb() mb() | ||
99 | #ifdef CONFIG_X86_PPRO_FENCE | ||
100 | # define smp_rmb() rmb() | ||
101 | #else | ||
102 | # define smp_rmb() barrier() | ||
103 | #endif | ||
104 | #ifdef CONFIG_X86_OOSTORE | ||
105 | # define smp_wmb() wmb() | ||
106 | #else | ||
107 | # define smp_wmb() barrier() | ||
108 | #endif | ||
109 | #define smp_read_barrier_depends() read_barrier_depends() | ||
110 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||
111 | #else | ||
112 | #define smp_mb() barrier() | ||
113 | #define smp_rmb() barrier() | ||
114 | #define smp_wmb() barrier() | ||
115 | #define smp_read_barrier_depends() do { } while (0) | ||
116 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
117 | #endif | ||
118 | |||
119 | /* | ||
120 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | ||
121 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | ||
122 | * code region. | ||
123 | * | ||
124 | * (Could use an alternative three way for this if there was one.) | ||
125 | */ | ||
126 | static inline void rdtsc_barrier(void) | ||
127 | { | ||
128 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | ||
129 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | ||
130 | } | ||
131 | |||
132 | #endif | ||