aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386/system.h')
-rw-r--r--include/asm-i386/system.h89
1 files changed, 55 insertions, 34 deletions
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 0249f912a29c..a6dabbcd6e6a 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -11,9 +11,14 @@
11struct task_struct; /* one of the stranger aspects of C forward declarations.. */ 11struct task_struct; /* one of the stranger aspects of C forward declarations.. */
12extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); 12extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
13 13
14/*
15 * Saving eflags is important. It switches not only IOPL between tasks,
16 * it also protects other tasks from NT leaking through sysenter etc.
17 */
14#define switch_to(prev,next,last) do { \ 18#define switch_to(prev,next,last) do { \
15 unsigned long esi,edi; \ 19 unsigned long esi,edi; \
16 asm volatile("pushl %%ebp\n\t" \ 20 asm volatile("pushfl\n\t" /* Save flags */ \
21 "pushl %%ebp\n\t" \
17 "movl %%esp,%0\n\t" /* save ESP */ \ 22 "movl %%esp,%0\n\t" /* save ESP */ \
18 "movl %5,%%esp\n\t" /* restore ESP */ \ 23 "movl %5,%%esp\n\t" /* restore ESP */ \
19 "movl $1f,%1\n\t" /* save EIP */ \ 24 "movl $1f,%1\n\t" /* save EIP */ \
@@ -21,6 +26,7 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
21 "jmp __switch_to\n" \ 26 "jmp __switch_to\n" \
22 "1:\t" \ 27 "1:\t" \
23 "popl %%ebp\n\t" \ 28 "popl %%ebp\n\t" \
29 "popfl" \
24 :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ 30 :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
25 "=a" (last),"=S" (esi),"=D" (edi) \ 31 "=a" (last),"=S" (esi),"=D" (edi) \
26 :"m" (next->thread.esp),"m" (next->thread.eip), \ 32 :"m" (next->thread.esp),"m" (next->thread.eip), \
@@ -82,10 +88,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
82#define savesegment(seg, value) \ 88#define savesegment(seg, value) \
83 asm volatile("mov %%" #seg ",%0":"=rm" (value)) 89 asm volatile("mov %%" #seg ",%0":"=rm" (value))
84 90
85/*
86 * Clear and set 'TS' bit respectively
87 */
88#define clts() __asm__ __volatile__ ("clts")
89#define read_cr0() ({ \ 91#define read_cr0() ({ \
90 unsigned int __dummy; \ 92 unsigned int __dummy; \
91 __asm__ __volatile__( \ 93 __asm__ __volatile__( \
@@ -94,7 +96,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
94 __dummy; \ 96 __dummy; \
95}) 97})
96#define write_cr0(x) \ 98#define write_cr0(x) \
97 __asm__ __volatile__("movl %0,%%cr0": :"r" (x)); 99 __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
98 100
99#define read_cr2() ({ \ 101#define read_cr2() ({ \
100 unsigned int __dummy; \ 102 unsigned int __dummy; \
@@ -104,7 +106,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
104 __dummy; \ 106 __dummy; \
105}) 107})
106#define write_cr2(x) \ 108#define write_cr2(x) \
107 __asm__ __volatile__("movl %0,%%cr2": :"r" (x)); 109 __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
108 110
109#define read_cr3() ({ \ 111#define read_cr3() ({ \
110 unsigned int __dummy; \ 112 unsigned int __dummy; \
@@ -114,7 +116,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
114 __dummy; \ 116 __dummy; \
115}) 117})
116#define write_cr3(x) \ 118#define write_cr3(x) \
117 __asm__ __volatile__("movl %0,%%cr3": :"r" (x)); 119 __asm__ __volatile__("movl %0,%%cr3": :"r" (x))
118 120
119#define read_cr4() ({ \ 121#define read_cr4() ({ \
120 unsigned int __dummy; \ 122 unsigned int __dummy; \
@@ -123,7 +125,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
123 :"=r" (__dummy)); \ 125 :"=r" (__dummy)); \
124 __dummy; \ 126 __dummy; \
125}) 127})
126
127#define read_cr4_safe() ({ \ 128#define read_cr4_safe() ({ \
128 unsigned int __dummy; \ 129 unsigned int __dummy; \
129 /* This could fault if %cr4 does not exist */ \ 130 /* This could fault if %cr4 does not exist */ \
@@ -135,15 +136,19 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
135 : "=r" (__dummy): "0" (0)); \ 136 : "=r" (__dummy): "0" (0)); \
136 __dummy; \ 137 __dummy; \
137}) 138})
138
139#define write_cr4(x) \ 139#define write_cr4(x) \
140 __asm__ __volatile__("movl %0,%%cr4": :"r" (x)); 140 __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
141
142/*
143 * Clear and set 'TS' bit respectively
144 */
145#define clts() __asm__ __volatile__ ("clts")
141#define stts() write_cr0(8 | read_cr0()) 146#define stts() write_cr0(8 | read_cr0())
142 147
143#endif /* __KERNEL__ */ 148#endif /* __KERNEL__ */
144 149
145#define wbinvd() \ 150#define wbinvd() \
146 __asm__ __volatile__ ("wbinvd": : :"memory"); 151 __asm__ __volatile__ ("wbinvd": : :"memory")
147 152
148static inline unsigned long get_limit(unsigned long segment) 153static inline unsigned long get_limit(unsigned long segment)
149{ 154{
@@ -262,6 +267,9 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
262#define cmpxchg(ptr,o,n)\ 267#define cmpxchg(ptr,o,n)\
263 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ 268 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
264 (unsigned long)(n),sizeof(*(ptr)))) 269 (unsigned long)(n),sizeof(*(ptr))))
270#define sync_cmpxchg(ptr,o,n)\
271 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
272 (unsigned long)(n),sizeof(*(ptr))))
265#endif 273#endif
266 274
267static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 275static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
@@ -291,6 +299,39 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
291 return old; 299 return old;
292} 300}
293 301
302/*
303 * Always use locked operations when touching memory shared with a
304 * hypervisor, since the system may be SMP even if the guest kernel
305 * isn't.
306 */
307static inline unsigned long __sync_cmpxchg(volatile void *ptr,
308 unsigned long old,
309 unsigned long new, int size)
310{
311 unsigned long prev;
312 switch (size) {
313 case 1:
314 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
315 : "=a"(prev)
316 : "q"(new), "m"(*__xg(ptr)), "0"(old)
317 : "memory");
318 return prev;
319 case 2:
320 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
321 : "=a"(prev)
322 : "r"(new), "m"(*__xg(ptr)), "0"(old)
323 : "memory");
324 return prev;
325 case 4:
326 __asm__ __volatile__("lock; cmpxchgl %1,%2"
327 : "=a"(prev)
328 : "r"(new), "m"(*__xg(ptr)), "0"(old)
329 : "memory");
330 return prev;
331 }
332 return old;
333}
334
294#ifndef CONFIG_X86_CMPXCHG 335#ifndef CONFIG_X86_CMPXCHG
295/* 336/*
296 * Building a kernel capable running on 80386. It may be necessary to 337 * Building a kernel capable running on 80386. It may be necessary to
@@ -427,7 +468,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
427 * does not enforce ordering, since there is no data dependency between 468 * does not enforce ordering, since there is no data dependency between
428 * the read of "a" and the read of "b". Therefore, on some CPUs, such 469 * the read of "a" and the read of "b". Therefore, on some CPUs, such
429 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 470 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
430 * in cases like thiswhere there are no data dependencies. 471 * in cases like this where there are no data dependencies.
431 **/ 472 **/
432 473
433#define read_barrier_depends() do { } while(0) 474#define read_barrier_depends() do { } while(0)
@@ -454,27 +495,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
454#define set_mb(var, value) do { var = value; barrier(); } while (0) 495#define set_mb(var, value) do { var = value; barrier(); } while (0)
455#endif 496#endif
456 497
457#define set_wmb(var, value) do { var = value; wmb(); } while (0) 498#include <linux/irqflags.h>
458
459/* interrupt control.. */
460#define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
461#define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
462#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
463#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
464/* used in the idle loop; sti takes one instruction cycle to complete */
465#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
466/* used when interrupts are already enabled or to shutdown the processor */
467#define halt() __asm__ __volatile__("hlt": : :"memory")
468
469#define irqs_disabled() \
470({ \
471 unsigned long flags; \
472 local_save_flags(flags); \
473 !(flags & (1<<9)); \
474})
475
476/* For spinlocks etc */
477#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
478 499
479/* 500/*
480 * disable hlt during certain critical i/o operations 501 * disable hlt during certain critical i/o operations