diff options
Diffstat (limited to 'include/asm-i386/system.h')
-rw-r--r-- | include/asm-i386/system.h | 313 |
1 files changed, 0 insertions, 313 deletions
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h deleted file mode 100644 index d69ba937e092..000000000000 --- a/include/asm-i386/system.h +++ /dev/null | |||
@@ -1,313 +0,0 @@ | |||
1 | #ifndef __ASM_SYSTEM_H | ||
2 | #define __ASM_SYSTEM_H | ||
3 | |||
4 | #include <linux/kernel.h> | ||
5 | #include <asm/segment.h> | ||
6 | #include <asm/cpufeature.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | |||
9 | #ifdef __KERNEL__ | ||
10 | |||
11 | struct task_struct; /* one of the stranger aspects of C forward declarations.. */ | ||
12 | extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); | ||
13 | |||
14 | /* | ||
15 | * Saving eflags is important. It switches not only IOPL between tasks, | ||
16 | * it also protects other tasks from NT leaking through sysenter etc. | ||
17 | */ | ||
18 | #define switch_to(prev,next,last) do { \ | ||
19 | unsigned long esi,edi; \ | ||
20 | asm volatile("pushfl\n\t" /* Save flags */ \ | ||
21 | "pushl %%ebp\n\t" \ | ||
22 | "movl %%esp,%0\n\t" /* save ESP */ \ | ||
23 | "movl %5,%%esp\n\t" /* restore ESP */ \ | ||
24 | "movl $1f,%1\n\t" /* save EIP */ \ | ||
25 | "pushl %6\n\t" /* restore EIP */ \ | ||
26 | "jmp __switch_to\n" \ | ||
27 | "1:\t" \ | ||
28 | "popl %%ebp\n\t" \ | ||
29 | "popfl" \ | ||
30 | :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ | ||
31 | "=a" (last),"=S" (esi),"=D" (edi) \ | ||
32 | :"m" (next->thread.esp),"m" (next->thread.eip), \ | ||
33 | "2" (prev), "d" (next)); \ | ||
34 | } while (0) | ||
35 | |||
36 | #define _set_base(addr,base) do { unsigned long __pr; \ | ||
37 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
38 | "rorl $16,%%edx\n\t" \ | ||
39 | "movb %%dl,%2\n\t" \ | ||
40 | "movb %%dh,%3" \ | ||
41 | :"=&d" (__pr) \ | ||
42 | :"m" (*((addr)+2)), \ | ||
43 | "m" (*((addr)+4)), \ | ||
44 | "m" (*((addr)+7)), \ | ||
45 | "0" (base) \ | ||
46 | ); } while(0) | ||
47 | |||
48 | #define _set_limit(addr,limit) do { unsigned long __lr; \ | ||
49 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
50 | "rorl $16,%%edx\n\t" \ | ||
51 | "movb %2,%%dh\n\t" \ | ||
52 | "andb $0xf0,%%dh\n\t" \ | ||
53 | "orb %%dh,%%dl\n\t" \ | ||
54 | "movb %%dl,%2" \ | ||
55 | :"=&d" (__lr) \ | ||
56 | :"m" (*(addr)), \ | ||
57 | "m" (*((addr)+6)), \ | ||
58 | "0" (limit) \ | ||
59 | ); } while(0) | ||
60 | |||
61 | #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) | ||
62 | #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) ) | ||
63 | |||
64 | /* | ||
65 | * Load a segment. Fall back on loading the zero | ||
66 | * segment if something goes wrong.. | ||
67 | */ | ||
68 | #define loadsegment(seg,value) \ | ||
69 | asm volatile("\n" \ | ||
70 | "1:\t" \ | ||
71 | "mov %0,%%" #seg "\n" \ | ||
72 | "2:\n" \ | ||
73 | ".section .fixup,\"ax\"\n" \ | ||
74 | "3:\t" \ | ||
75 | "pushl $0\n\t" \ | ||
76 | "popl %%" #seg "\n\t" \ | ||
77 | "jmp 2b\n" \ | ||
78 | ".previous\n" \ | ||
79 | ".section __ex_table,\"a\"\n\t" \ | ||
80 | ".align 4\n\t" \ | ||
81 | ".long 1b,3b\n" \ | ||
82 | ".previous" \ | ||
83 | : :"rm" (value)) | ||
84 | |||
85 | /* | ||
86 | * Save a segment register away | ||
87 | */ | ||
88 | #define savesegment(seg, value) \ | ||
89 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) | ||
90 | |||
91 | |||
92 | static inline void native_clts(void) | ||
93 | { | ||
94 | asm volatile ("clts"); | ||
95 | } | ||
96 | |||
97 | static inline unsigned long native_read_cr0(void) | ||
98 | { | ||
99 | unsigned long val; | ||
100 | asm volatile("movl %%cr0,%0\n\t" :"=r" (val)); | ||
101 | return val; | ||
102 | } | ||
103 | |||
104 | static inline void native_write_cr0(unsigned long val) | ||
105 | { | ||
106 | asm volatile("movl %0,%%cr0": :"r" (val)); | ||
107 | } | ||
108 | |||
109 | static inline unsigned long native_read_cr2(void) | ||
110 | { | ||
111 | unsigned long val; | ||
112 | asm volatile("movl %%cr2,%0\n\t" :"=r" (val)); | ||
113 | return val; | ||
114 | } | ||
115 | |||
116 | static inline void native_write_cr2(unsigned long val) | ||
117 | { | ||
118 | asm volatile("movl %0,%%cr2": :"r" (val)); | ||
119 | } | ||
120 | |||
121 | static inline unsigned long native_read_cr3(void) | ||
122 | { | ||
123 | unsigned long val; | ||
124 | asm volatile("movl %%cr3,%0\n\t" :"=r" (val)); | ||
125 | return val; | ||
126 | } | ||
127 | |||
128 | static inline void native_write_cr3(unsigned long val) | ||
129 | { | ||
130 | asm volatile("movl %0,%%cr3": :"r" (val)); | ||
131 | } | ||
132 | |||
133 | static inline unsigned long native_read_cr4(void) | ||
134 | { | ||
135 | unsigned long val; | ||
136 | asm volatile("movl %%cr4,%0\n\t" :"=r" (val)); | ||
137 | return val; | ||
138 | } | ||
139 | |||
140 | static inline unsigned long native_read_cr4_safe(void) | ||
141 | { | ||
142 | unsigned long val; | ||
143 | /* This could fault if %cr4 does not exist */ | ||
144 | asm("1: movl %%cr4, %0 \n" | ||
145 | "2: \n" | ||
146 | ".section __ex_table,\"a\" \n" | ||
147 | ".long 1b,2b \n" | ||
148 | ".previous \n" | ||
149 | : "=r" (val): "0" (0)); | ||
150 | return val; | ||
151 | } | ||
152 | |||
153 | static inline void native_write_cr4(unsigned long val) | ||
154 | { | ||
155 | asm volatile("movl %0,%%cr4": :"r" (val)); | ||
156 | } | ||
157 | |||
158 | static inline void native_wbinvd(void) | ||
159 | { | ||
160 | asm volatile("wbinvd": : :"memory"); | ||
161 | } | ||
162 | |||
163 | |||
164 | #ifdef CONFIG_PARAVIRT | ||
165 | #include <asm/paravirt.h> | ||
166 | #else | ||
167 | #define read_cr0() (native_read_cr0()) | ||
168 | #define write_cr0(x) (native_write_cr0(x)) | ||
169 | #define read_cr2() (native_read_cr2()) | ||
170 | #define write_cr2(x) (native_write_cr2(x)) | ||
171 | #define read_cr3() (native_read_cr3()) | ||
172 | #define write_cr3(x) (native_write_cr3(x)) | ||
173 | #define read_cr4() (native_read_cr4()) | ||
174 | #define read_cr4_safe() (native_read_cr4_safe()) | ||
175 | #define write_cr4(x) (native_write_cr4(x)) | ||
176 | #define wbinvd() (native_wbinvd()) | ||
177 | |||
178 | /* Clear the 'TS' bit */ | ||
179 | #define clts() (native_clts()) | ||
180 | |||
181 | #endif/* CONFIG_PARAVIRT */ | ||
182 | |||
183 | /* Set the 'TS' bit */ | ||
184 | #define stts() write_cr0(8 | read_cr0()) | ||
185 | |||
186 | #endif /* __KERNEL__ */ | ||
187 | |||
188 | static inline unsigned long get_limit(unsigned long segment) | ||
189 | { | ||
190 | unsigned long __limit; | ||
191 | __asm__("lsll %1,%0" | ||
192 | :"=r" (__limit):"r" (segment)); | ||
193 | return __limit+1; | ||
194 | } | ||
195 | |||
196 | #define nop() __asm__ __volatile__ ("nop") | ||
197 | |||
198 | /* | ||
199 | * Force strict CPU ordering. | ||
200 | * And yes, this is required on UP too when we're talking | ||
201 | * to devices. | ||
202 | * | ||
203 | * For now, "wmb()" doesn't actually do anything, as all | ||
204 | * Intel CPU's follow what Intel calls a *Processor Order*, | ||
205 | * in which all writes are seen in the program order even | ||
206 | * outside the CPU. | ||
207 | * | ||
208 | * I expect future Intel CPU's to have a weaker ordering, | ||
209 | * but I'd also expect them to finally get their act together | ||
210 | * and add some real memory barriers if so. | ||
211 | * | ||
212 | * Some non intel clones support out of order store. wmb() ceases to be a | ||
213 | * nop for these. | ||
214 | */ | ||
215 | |||
216 | |||
217 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
218 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
219 | |||
220 | /** | ||
221 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
222 | * depend on. | ||
223 | * | ||
224 | * No data-dependent reads from memory-like regions are ever reordered | ||
225 | * over this barrier. All reads preceding this primitive are guaranteed | ||
226 | * to access memory (but not necessarily other CPUs' caches) before any | ||
227 | * reads following this primitive that depend on the data return by | ||
228 | * any of the preceding reads. This primitive is much lighter weight than | ||
229 | * rmb() on most CPUs, and is never heavier weight than is | ||
230 | * rmb(). | ||
231 | * | ||
232 | * These ordering constraints are respected by both the local CPU | ||
233 | * and the compiler. | ||
234 | * | ||
235 | * Ordering is not guaranteed by anything other than these primitives, | ||
236 | * not even by data dependencies. See the documentation for | ||
237 | * memory_barrier() for examples and URLs to more information. | ||
238 | * | ||
239 | * For example, the following code would force ordering (the initial | ||
240 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
241 | * | ||
242 | * <programlisting> | ||
243 | * CPU 0 CPU 1 | ||
244 | * | ||
245 | * b = 2; | ||
246 | * memory_barrier(); | ||
247 | * p = &b; q = p; | ||
248 | * read_barrier_depends(); | ||
249 | * d = *q; | ||
250 | * </programlisting> | ||
251 | * | ||
252 | * because the read of "*q" depends on the read of "p" and these | ||
253 | * two reads are separated by a read_barrier_depends(). However, | ||
254 | * the following code, with the same initial values for "a" and "b": | ||
255 | * | ||
256 | * <programlisting> | ||
257 | * CPU 0 CPU 1 | ||
258 | * | ||
259 | * a = 2; | ||
260 | * memory_barrier(); | ||
261 | * b = 3; y = b; | ||
262 | * read_barrier_depends(); | ||
263 | * x = a; | ||
264 | * </programlisting> | ||
265 | * | ||
266 | * does not enforce ordering, since there is no data dependency between | ||
267 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
268 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
269 | * in cases like this where there are no data dependencies. | ||
270 | **/ | ||
271 | |||
272 | #define read_barrier_depends() do { } while(0) | ||
273 | |||
274 | #ifdef CONFIG_X86_OOSTORE | ||
275 | /* Actually there are no OOO store capable CPUs for now that do SSE, | ||
276 | but make it already an possibility. */ | ||
277 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
278 | #else | ||
279 | #define wmb() __asm__ __volatile__ ("": : :"memory") | ||
280 | #endif | ||
281 | |||
282 | #ifdef CONFIG_SMP | ||
283 | #define smp_mb() mb() | ||
284 | #define smp_rmb() rmb() | ||
285 | #define smp_wmb() wmb() | ||
286 | #define smp_read_barrier_depends() read_barrier_depends() | ||
287 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | ||
288 | #else | ||
289 | #define smp_mb() barrier() | ||
290 | #define smp_rmb() barrier() | ||
291 | #define smp_wmb() barrier() | ||
292 | #define smp_read_barrier_depends() do { } while(0) | ||
293 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
294 | #endif | ||
295 | |||
296 | #include <linux/irqflags.h> | ||
297 | |||
298 | /* | ||
299 | * disable hlt during certain critical i/o operations | ||
300 | */ | ||
301 | #define HAVE_DISABLE_HLT | ||
302 | void disable_hlt(void); | ||
303 | void enable_hlt(void); | ||
304 | |||
305 | extern int es7000_plat; | ||
306 | void cpu_idle_wait(void); | ||
307 | |||
308 | extern unsigned long arch_align_stack(unsigned long sp); | ||
309 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | ||
310 | |||
311 | void default_idle(void); | ||
312 | |||
313 | #endif | ||