diff options
Diffstat (limited to 'include/asm-i386/system.h')
-rw-r--r-- | include/asm-i386/system.h | 473 |
1 files changed, 473 insertions, 0 deletions
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h new file mode 100644 index 000000000000..6f74d4c44a0e --- /dev/null +++ b/include/asm-i386/system.h | |||
@@ -0,0 +1,473 @@ | |||
1 | #ifndef __ASM_SYSTEM_H | ||
2 | #define __ASM_SYSTEM_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | #include <linux/kernel.h> | ||
6 | #include <asm/segment.h> | ||
7 | #include <asm/cpufeature.h> | ||
8 | #include <linux/bitops.h> /* for LOCK_PREFIX */ | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | |||
12 | struct task_struct; /* one of the stranger aspects of C forward declarations.. */ | ||
13 | extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); | ||
14 | |||
15 | #define switch_to(prev,next,last) do { \ | ||
16 | unsigned long esi,edi; \ | ||
17 | asm volatile("pushfl\n\t" \ | ||
18 | "pushl %%ebp\n\t" \ | ||
19 | "movl %%esp,%0\n\t" /* save ESP */ \ | ||
20 | "movl %5,%%esp\n\t" /* restore ESP */ \ | ||
21 | "movl $1f,%1\n\t" /* save EIP */ \ | ||
22 | "pushl %6\n\t" /* restore EIP */ \ | ||
23 | "jmp __switch_to\n" \ | ||
24 | "1:\t" \ | ||
25 | "popl %%ebp\n\t" \ | ||
26 | "popfl" \ | ||
27 | :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ | ||
28 | "=a" (last),"=S" (esi),"=D" (edi) \ | ||
29 | :"m" (next->thread.esp),"m" (next->thread.eip), \ | ||
30 | "2" (prev), "d" (next)); \ | ||
31 | } while (0) | ||
32 | |||
33 | #define _set_base(addr,base) do { unsigned long __pr; \ | ||
34 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
35 | "rorl $16,%%edx\n\t" \ | ||
36 | "movb %%dl,%2\n\t" \ | ||
37 | "movb %%dh,%3" \ | ||
38 | :"=&d" (__pr) \ | ||
39 | :"m" (*((addr)+2)), \ | ||
40 | "m" (*((addr)+4)), \ | ||
41 | "m" (*((addr)+7)), \ | ||
42 | "0" (base) \ | ||
43 | ); } while(0) | ||
44 | |||
45 | #define _set_limit(addr,limit) do { unsigned long __lr; \ | ||
46 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
47 | "rorl $16,%%edx\n\t" \ | ||
48 | "movb %2,%%dh\n\t" \ | ||
49 | "andb $0xf0,%%dh\n\t" \ | ||
50 | "orb %%dh,%%dl\n\t" \ | ||
51 | "movb %%dl,%2" \ | ||
52 | :"=&d" (__lr) \ | ||
53 | :"m" (*(addr)), \ | ||
54 | "m" (*((addr)+6)), \ | ||
55 | "0" (limit) \ | ||
56 | ); } while(0) | ||
57 | |||
58 | #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) | ||
59 | #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 ) | ||
60 | |||
61 | static inline unsigned long _get_base(char * addr) | ||
62 | { | ||
63 | unsigned long __base; | ||
64 | __asm__("movb %3,%%dh\n\t" | ||
65 | "movb %2,%%dl\n\t" | ||
66 | "shll $16,%%edx\n\t" | ||
67 | "movw %1,%%dx" | ||
68 | :"=&d" (__base) | ||
69 | :"m" (*((addr)+2)), | ||
70 | "m" (*((addr)+4)), | ||
71 | "m" (*((addr)+7))); | ||
72 | return __base; | ||
73 | } | ||
74 | |||
75 | #define get_base(ldt) _get_base( ((char *)&(ldt)) ) | ||
76 | |||
77 | /* | ||
78 | * Load a segment. Fall back on loading the zero | ||
79 | * segment if something goes wrong.. | ||
80 | */ | ||
81 | #define loadsegment(seg,value) \ | ||
82 | asm volatile("\n" \ | ||
83 | "1:\t" \ | ||
84 | "movl %0,%%" #seg "\n" \ | ||
85 | "2:\n" \ | ||
86 | ".section .fixup,\"ax\"\n" \ | ||
87 | "3:\t" \ | ||
88 | "pushl $0\n\t" \ | ||
89 | "popl %%" #seg "\n\t" \ | ||
90 | "jmp 2b\n" \ | ||
91 | ".previous\n" \ | ||
92 | ".section __ex_table,\"a\"\n\t" \ | ||
93 | ".align 4\n\t" \ | ||
94 | ".long 1b,3b\n" \ | ||
95 | ".previous" \ | ||
96 | : :"m" (*(unsigned int *)&(value))) | ||
97 | |||
98 | /* | ||
99 | * Save a segment register away | ||
100 | */ | ||
101 | #define savesegment(seg, value) \ | ||
102 | asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value))) | ||
103 | |||
104 | /* | ||
105 | * Clear and set 'TS' bit respectively | ||
106 | */ | ||
107 | #define clts() __asm__ __volatile__ ("clts") | ||
108 | #define read_cr0() ({ \ | ||
109 | unsigned int __dummy; \ | ||
110 | __asm__( \ | ||
111 | "movl %%cr0,%0\n\t" \ | ||
112 | :"=r" (__dummy)); \ | ||
113 | __dummy; \ | ||
114 | }) | ||
115 | #define write_cr0(x) \ | ||
116 | __asm__("movl %0,%%cr0": :"r" (x)); | ||
117 | |||
118 | #define read_cr4() ({ \ | ||
119 | unsigned int __dummy; \ | ||
120 | __asm__( \ | ||
121 | "movl %%cr4,%0\n\t" \ | ||
122 | :"=r" (__dummy)); \ | ||
123 | __dummy; \ | ||
124 | }) | ||
125 | #define write_cr4(x) \ | ||
126 | __asm__("movl %0,%%cr4": :"r" (x)); | ||
127 | #define stts() write_cr0(8 | read_cr0()) | ||
128 | |||
129 | #endif /* __KERNEL__ */ | ||
130 | |||
131 | #define wbinvd() \ | ||
132 | __asm__ __volatile__ ("wbinvd": : :"memory"); | ||
133 | |||
134 | static inline unsigned long get_limit(unsigned long segment) | ||
135 | { | ||
136 | unsigned long __limit; | ||
137 | __asm__("lsll %1,%0" | ||
138 | :"=r" (__limit):"r" (segment)); | ||
139 | return __limit+1; | ||
140 | } | ||
141 | |||
142 | #define nop() __asm__ __volatile__ ("nop") | ||
143 | |||
144 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) | ||
145 | |||
146 | #define tas(ptr) (xchg((ptr),1)) | ||
147 | |||
148 | struct __xchg_dummy { unsigned long a[100]; }; | ||
149 | #define __xg(x) ((struct __xchg_dummy *)(x)) | ||
150 | |||
151 | |||
152 | /* | ||
153 | * The semantics of XCHGCMP8B are a bit strange, this is why | ||
154 | * there is a loop and the loading of %%eax and %%edx has to | ||
155 | * be inside. This inlines well in most cases, the cached | ||
156 | * cost is around ~38 cycles. (in the future we might want | ||
157 | * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that | ||
158 | * might have an implicit FPU-save as a cost, so it's not | ||
159 | * clear which path to go.) | ||
160 | * | ||
161 | * cmpxchg8b must be used with the lock prefix here to allow | ||
162 | * the instruction to be executed atomically, see page 3-102 | ||
163 | * of the instruction set reference 24319102.pdf. We need | ||
164 | * the reader side to see the coherent 64bit value. | ||
165 | */ | ||
166 | static inline void __set_64bit (unsigned long long * ptr, | ||
167 | unsigned int low, unsigned int high) | ||
168 | { | ||
169 | __asm__ __volatile__ ( | ||
170 | "\n1:\t" | ||
171 | "movl (%0), %%eax\n\t" | ||
172 | "movl 4(%0), %%edx\n\t" | ||
173 | "lock cmpxchg8b (%0)\n\t" | ||
174 | "jnz 1b" | ||
175 | : /* no outputs */ | ||
176 | : "D"(ptr), | ||
177 | "b"(low), | ||
178 | "c"(high) | ||
179 | : "ax","dx","memory"); | ||
180 | } | ||
181 | |||
182 | static inline void __set_64bit_constant (unsigned long long *ptr, | ||
183 | unsigned long long value) | ||
184 | { | ||
185 | __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); | ||
186 | } | ||
187 | #define ll_low(x) *(((unsigned int*)&(x))+0) | ||
188 | #define ll_high(x) *(((unsigned int*)&(x))+1) | ||
189 | |||
190 | static inline void __set_64bit_var (unsigned long long *ptr, | ||
191 | unsigned long long value) | ||
192 | { | ||
193 | __set_64bit(ptr,ll_low(value), ll_high(value)); | ||
194 | } | ||
195 | |||
196 | #define set_64bit(ptr,value) \ | ||
197 | (__builtin_constant_p(value) ? \ | ||
198 | __set_64bit_constant(ptr, value) : \ | ||
199 | __set_64bit_var(ptr, value) ) | ||
200 | |||
201 | #define _set_64bit(ptr,value) \ | ||
202 | (__builtin_constant_p(value) ? \ | ||
203 | __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ | ||
204 | __set_64bit(ptr, ll_low(value), ll_high(value)) ) | ||
205 | |||
206 | /* | ||
207 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | ||
208 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | ||
209 | * but generally the primitive is invalid, *ptr is output argument. --ANK | ||
210 | */ | ||
211 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | ||
212 | { | ||
213 | switch (size) { | ||
214 | case 1: | ||
215 | __asm__ __volatile__("xchgb %b0,%1" | ||
216 | :"=q" (x) | ||
217 | :"m" (*__xg(ptr)), "0" (x) | ||
218 | :"memory"); | ||
219 | break; | ||
220 | case 2: | ||
221 | __asm__ __volatile__("xchgw %w0,%1" | ||
222 | :"=r" (x) | ||
223 | :"m" (*__xg(ptr)), "0" (x) | ||
224 | :"memory"); | ||
225 | break; | ||
226 | case 4: | ||
227 | __asm__ __volatile__("xchgl %0,%1" | ||
228 | :"=r" (x) | ||
229 | :"m" (*__xg(ptr)), "0" (x) | ||
230 | :"memory"); | ||
231 | break; | ||
232 | } | ||
233 | return x; | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | ||
238 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
239 | * indicated by comparing RETURN with OLD. | ||
240 | */ | ||
241 | |||
242 | #ifdef CONFIG_X86_CMPXCHG | ||
243 | #define __HAVE_ARCH_CMPXCHG 1 | ||
244 | #endif | ||
245 | |||
246 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | ||
247 | unsigned long new, int size) | ||
248 | { | ||
249 | unsigned long prev; | ||
250 | switch (size) { | ||
251 | case 1: | ||
252 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" | ||
253 | : "=a"(prev) | ||
254 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
255 | : "memory"); | ||
256 | return prev; | ||
257 | case 2: | ||
258 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" | ||
259 | : "=a"(prev) | ||
260 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
261 | : "memory"); | ||
262 | return prev; | ||
263 | case 4: | ||
264 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" | ||
265 | : "=a"(prev) | ||
266 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
267 | : "memory"); | ||
268 | return prev; | ||
269 | } | ||
270 | return old; | ||
271 | } | ||
272 | |||
273 | #define cmpxchg(ptr,o,n)\ | ||
274 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | ||
275 | (unsigned long)(n),sizeof(*(ptr)))) | ||
276 | |||
277 | #ifdef __KERNEL__ | ||
278 | struct alt_instr { | ||
279 | __u8 *instr; /* original instruction */ | ||
280 | __u8 *replacement; | ||
281 | __u8 cpuid; /* cpuid bit set for replacement */ | ||
282 | __u8 instrlen; /* length of original instruction */ | ||
283 | __u8 replacementlen; /* length of new instruction, <= instrlen */ | ||
284 | __u8 pad; | ||
285 | }; | ||
286 | #endif | ||
287 | |||
288 | /* | ||
289 | * Alternative instructions for different CPU types or capabilities. | ||
290 | * | ||
291 | * This allows to use optimized instructions even on generic binary | ||
292 | * kernels. | ||
293 | * | ||
294 | * length of oldinstr must be longer or equal the length of newinstr | ||
295 | * It can be padded with nops as needed. | ||
296 | * | ||
297 | * For non barrier like inlines please define new variants | ||
298 | * without volatile and memory clobber. | ||
299 | */ | ||
300 | #define alternative(oldinstr, newinstr, feature) \ | ||
301 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
302 | ".section .altinstructions,\"a\"\n" \ | ||
303 | " .align 4\n" \ | ||
304 | " .long 661b\n" /* label */ \ | ||
305 | " .long 663f\n" /* new instruction */ \ | ||
306 | " .byte %c0\n" /* feature bit */ \ | ||
307 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
308 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
309 | ".previous\n" \ | ||
310 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
311 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | ||
312 | ".previous" :: "i" (feature) : "memory") | ||
313 | |||
314 | /* | ||
315 | * Alternative inline assembly with input. | ||
316 | * | ||
317 | * Pecularities: | ||
318 | * No memory clobber here. | ||
319 | * Argument numbers start with 1. | ||
320 | * Best is to use constraints that are fixed size (like (%1) ... "r") | ||
321 | * If you use variable sized constraints like "m" or "g" in the | ||
322 | * replacement maake sure to pad to the worst case length. | ||
323 | */ | ||
324 | #define alternative_input(oldinstr, newinstr, feature, input...) \ | ||
325 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
326 | ".section .altinstructions,\"a\"\n" \ | ||
327 | " .align 4\n" \ | ||
328 | " .long 661b\n" /* label */ \ | ||
329 | " .long 663f\n" /* new instruction */ \ | ||
330 | " .byte %c0\n" /* feature bit */ \ | ||
331 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
332 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
333 | ".previous\n" \ | ||
334 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
335 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | ||
336 | ".previous" :: "i" (feature), ##input) | ||
337 | |||
338 | /* | ||
339 | * Force strict CPU ordering. | ||
340 | * And yes, this is required on UP too when we're talking | ||
341 | * to devices. | ||
342 | * | ||
343 | * For now, "wmb()" doesn't actually do anything, as all | ||
344 | * Intel CPU's follow what Intel calls a *Processor Order*, | ||
345 | * in which all writes are seen in the program order even | ||
346 | * outside the CPU. | ||
347 | * | ||
348 | * I expect future Intel CPU's to have a weaker ordering, | ||
349 | * but I'd also expect them to finally get their act together | ||
350 | * and add some real memory barriers if so. | ||
351 | * | ||
352 | * Some non intel clones support out of order store. wmb() ceases to be a | ||
353 | * nop for these. | ||
354 | */ | ||
355 | |||
356 | |||
357 | /* | ||
358 | * Actually only lfence would be needed for mb() because all stores done | ||
359 | * by the kernel should be already ordered. But keep a full barrier for now. | ||
360 | */ | ||
361 | |||
362 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
363 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
364 | |||
365 | /** | ||
366 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
367 | * depend on. | ||
368 | * | ||
369 | * No data-dependent reads from memory-like regions are ever reordered | ||
370 | * over this barrier. All reads preceding this primitive are guaranteed | ||
371 | * to access memory (but not necessarily other CPUs' caches) before any | ||
372 | * reads following this primitive that depend on the data return by | ||
373 | * any of the preceding reads. This primitive is much lighter weight than | ||
374 | * rmb() on most CPUs, and is never heavier weight than is | ||
375 | * rmb(). | ||
376 | * | ||
377 | * These ordering constraints are respected by both the local CPU | ||
378 | * and the compiler. | ||
379 | * | ||
380 | * Ordering is not guaranteed by anything other than these primitives, | ||
381 | * not even by data dependencies. See the documentation for | ||
382 | * memory_barrier() for examples and URLs to more information. | ||
383 | * | ||
384 | * For example, the following code would force ordering (the initial | ||
385 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
386 | * | ||
387 | * <programlisting> | ||
388 | * CPU 0 CPU 1 | ||
389 | * | ||
390 | * b = 2; | ||
391 | * memory_barrier(); | ||
392 | * p = &b; q = p; | ||
393 | * read_barrier_depends(); | ||
394 | * d = *q; | ||
395 | * </programlisting> | ||
396 | * | ||
397 | * because the read of "*q" depends on the read of "p" and these | ||
398 | * two reads are separated by a read_barrier_depends(). However, | ||
399 | * the following code, with the same initial values for "a" and "b": | ||
400 | * | ||
401 | * <programlisting> | ||
402 | * CPU 0 CPU 1 | ||
403 | * | ||
404 | * a = 2; | ||
405 | * memory_barrier(); | ||
406 | * b = 3; y = b; | ||
407 | * read_barrier_depends(); | ||
408 | * x = a; | ||
409 | * </programlisting> | ||
410 | * | ||
411 | * does not enforce ordering, since there is no data dependency between | ||
412 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
413 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
414 | * in cases like thiswhere there are no data dependencies. | ||
415 | **/ | ||
416 | |||
417 | #define read_barrier_depends() do { } while(0) | ||
418 | |||
419 | #ifdef CONFIG_X86_OOSTORE | ||
420 | /* Actually there are no OOO store capable CPUs for now that do SSE, | ||
421 | but make it already an possibility. */ | ||
422 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
423 | #else | ||
424 | #define wmb() __asm__ __volatile__ ("": : :"memory") | ||
425 | #endif | ||
426 | |||
427 | #ifdef CONFIG_SMP | ||
428 | #define smp_mb() mb() | ||
429 | #define smp_rmb() rmb() | ||
430 | #define smp_wmb() wmb() | ||
431 | #define smp_read_barrier_depends() read_barrier_depends() | ||
432 | #define set_mb(var, value) do { xchg(&var, value); } while (0) | ||
433 | #else | ||
434 | #define smp_mb() barrier() | ||
435 | #define smp_rmb() barrier() | ||
436 | #define smp_wmb() barrier() | ||
437 | #define smp_read_barrier_depends() do { } while(0) | ||
438 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
439 | #endif | ||
440 | |||
441 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | ||
442 | |||
443 | /* interrupt control.. */ | ||
444 | #define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0) | ||
445 | #define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0) | ||
446 | #define local_irq_disable() __asm__ __volatile__("cli": : :"memory") | ||
447 | #define local_irq_enable() __asm__ __volatile__("sti": : :"memory") | ||
448 | /* used in the idle loop; sti takes one instruction cycle to complete */ | ||
449 | #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") | ||
450 | |||
451 | #define irqs_disabled() \ | ||
452 | ({ \ | ||
453 | unsigned long flags; \ | ||
454 | local_save_flags(flags); \ | ||
455 | !(flags & (1<<9)); \ | ||
456 | }) | ||
457 | |||
458 | /* For spinlocks etc */ | ||
459 | #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") | ||
460 | |||
461 | /* | ||
462 | * disable hlt during certain critical i/o operations | ||
463 | */ | ||
464 | #define HAVE_DISABLE_HLT | ||
465 | void disable_hlt(void); | ||
466 | void enable_hlt(void); | ||
467 | |||
468 | extern int es7000_plat; | ||
469 | void cpu_idle_wait(void); | ||
470 | |||
471 | extern unsigned long arch_align_stack(unsigned long sp); | ||
472 | |||
473 | #endif | ||