diff options
Diffstat (limited to 'include/asm-x86/system.h')
-rw-r--r-- | include/asm-x86/system.h | 115 |
1 files changed, 64 insertions, 51 deletions
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index 9cff02ffe6c2..a2f04cd79b29 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h | |||
@@ -27,22 +27,44 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
27 | * Saving eflags is important. It switches not only IOPL between tasks, | 27 | * Saving eflags is important. It switches not only IOPL between tasks, |
28 | * it also protects other tasks from NT leaking through sysenter etc. | 28 | * it also protects other tasks from NT leaking through sysenter etc. |
29 | */ | 29 | */ |
30 | #define switch_to(prev, next, last) do { \ | 30 | #define switch_to(prev, next, last) \ |
31 | unsigned long esi, edi; \ | 31 | do { \ |
32 | asm volatile("pushfl\n\t" /* Save flags */ \ | 32 | /* \ |
33 | "pushl %%ebp\n\t" \ | 33 | * Context-switching clobbers all registers, so we clobber \ |
34 | "movl %%esp,%0\n\t" /* save ESP */ \ | 34 | * them explicitly, via unused output variables. \ |
35 | "movl %5,%%esp\n\t" /* restore ESP */ \ | 35 | * (EAX and EBP is not listed because EBP is saved/restored \ |
36 | "movl $1f,%1\n\t" /* save EIP */ \ | 36 | * explicitly for wchan access and EAX is the return value of \ |
37 | "pushl %6\n\t" /* restore EIP */ \ | 37 | * __switch_to()) \ |
38 | "jmp __switch_to\n" \ | 38 | */ \ |
39 | unsigned long ebx, ecx, edx, esi, edi; \ | ||
40 | \ | ||
41 | asm volatile("pushfl\n\t" /* save flags */ \ | ||
42 | "pushl %%ebp\n\t" /* save EBP */ \ | ||
43 | "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \ | ||
44 | "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ | ||
45 | "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ | ||
46 | "pushl %[next_ip]\n\t" /* restore EIP */ \ | ||
47 | "jmp __switch_to\n" /* regparm call */ \ | ||
39 | "1:\t" \ | 48 | "1:\t" \ |
40 | "popl %%ebp\n\t" \ | 49 | "popl %%ebp\n\t" /* restore EBP */ \ |
41 | "popfl" \ | 50 | "popfl\n" /* restore flags */ \ |
42 | :"=m" (prev->thread.sp), "=m" (prev->thread.ip), \ | 51 | \ |
43 | "=a" (last), "=S" (esi), "=D" (edi) \ | 52 | /* output parameters */ \ |
44 | :"m" (next->thread.sp), "m" (next->thread.ip), \ | 53 | : [prev_sp] "=m" (prev->thread.sp), \ |
45 | "2" (prev), "d" (next)); \ | 54 | [prev_ip] "=m" (prev->thread.ip), \ |
55 | "=a" (last), \ | ||
56 | \ | ||
57 | /* clobbered output registers: */ \ | ||
58 | "=b" (ebx), "=c" (ecx), "=d" (edx), \ | ||
59 | "=S" (esi), "=D" (edi) \ | ||
60 | \ | ||
61 | /* input parameters: */ \ | ||
62 | : [next_sp] "m" (next->thread.sp), \ | ||
63 | [next_ip] "m" (next->thread.ip), \ | ||
64 | \ | ||
65 | /* regparm parameters for __switch_to(): */ \ | ||
66 | [prev] "a" (prev), \ | ||
67 | [next] "d" (next)); \ | ||
46 | } while (0) | 68 | } while (0) |
47 | 69 | ||
48 | /* | 70 | /* |
@@ -122,35 +144,34 @@ extern void load_gs_index(unsigned); | |||
122 | */ | 144 | */ |
123 | #define loadsegment(seg, value) \ | 145 | #define loadsegment(seg, value) \ |
124 | asm volatile("\n" \ | 146 | asm volatile("\n" \ |
125 | "1:\t" \ | 147 | "1:\t" \ |
126 | "movl %k0,%%" #seg "\n" \ | 148 | "movl %k0,%%" #seg "\n" \ |
127 | "2:\n" \ | 149 | "2:\n" \ |
128 | ".section .fixup,\"ax\"\n" \ | 150 | ".section .fixup,\"ax\"\n" \ |
129 | "3:\t" \ | 151 | "3:\t" \ |
130 | "movl %k1, %%" #seg "\n\t" \ | 152 | "movl %k1, %%" #seg "\n\t" \ |
131 | "jmp 2b\n" \ | 153 | "jmp 2b\n" \ |
132 | ".previous\n" \ | 154 | ".previous\n" \ |
133 | _ASM_EXTABLE(1b,3b) \ | 155 | _ASM_EXTABLE(1b,3b) \ |
134 | : :"r" (value), "r" (0)) | 156 | : :"r" (value), "r" (0)) |
135 | 157 | ||
136 | 158 | ||
137 | /* | 159 | /* |
138 | * Save a segment register away | 160 | * Save a segment register away |
139 | */ | 161 | */ |
140 | #define savesegment(seg, value) \ | 162 | #define savesegment(seg, value) \ |
141 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) | 163 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) |
142 | 164 | ||
143 | static inline unsigned long get_limit(unsigned long segment) | 165 | static inline unsigned long get_limit(unsigned long segment) |
144 | { | 166 | { |
145 | unsigned long __limit; | 167 | unsigned long __limit; |
146 | __asm__("lsll %1,%0" | 168 | asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); |
147 | :"=r" (__limit):"r" (segment)); | 169 | return __limit + 1; |
148 | return __limit+1; | ||
149 | } | 170 | } |
150 | 171 | ||
151 | static inline void native_clts(void) | 172 | static inline void native_clts(void) |
152 | { | 173 | { |
153 | asm volatile ("clts"); | 174 | asm volatile("clts"); |
154 | } | 175 | } |
155 | 176 | ||
156 | /* | 177 | /* |
@@ -165,43 +186,43 @@ static unsigned long __force_order; | |||
165 | static inline unsigned long native_read_cr0(void) | 186 | static inline unsigned long native_read_cr0(void) |
166 | { | 187 | { |
167 | unsigned long val; | 188 | unsigned long val; |
168 | asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order)); | 189 | asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); |
169 | return val; | 190 | return val; |
170 | } | 191 | } |
171 | 192 | ||
172 | static inline void native_write_cr0(unsigned long val) | 193 | static inline void native_write_cr0(unsigned long val) |
173 | { | 194 | { |
174 | asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order)); | 195 | asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); |
175 | } | 196 | } |
176 | 197 | ||
177 | static inline unsigned long native_read_cr2(void) | 198 | static inline unsigned long native_read_cr2(void) |
178 | { | 199 | { |
179 | unsigned long val; | 200 | unsigned long val; |
180 | asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order)); | 201 | asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); |
181 | return val; | 202 | return val; |
182 | } | 203 | } |
183 | 204 | ||
184 | static inline void native_write_cr2(unsigned long val) | 205 | static inline void native_write_cr2(unsigned long val) |
185 | { | 206 | { |
186 | asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order)); | 207 | asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); |
187 | } | 208 | } |
188 | 209 | ||
189 | static inline unsigned long native_read_cr3(void) | 210 | static inline unsigned long native_read_cr3(void) |
190 | { | 211 | { |
191 | unsigned long val; | 212 | unsigned long val; |
192 | asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order)); | 213 | asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); |
193 | return val; | 214 | return val; |
194 | } | 215 | } |
195 | 216 | ||
196 | static inline void native_write_cr3(unsigned long val) | 217 | static inline void native_write_cr3(unsigned long val) |
197 | { | 218 | { |
198 | asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order)); | 219 | asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); |
199 | } | 220 | } |
200 | 221 | ||
201 | static inline unsigned long native_read_cr4(void) | 222 | static inline unsigned long native_read_cr4(void) |
202 | { | 223 | { |
203 | unsigned long val; | 224 | unsigned long val; |
204 | asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order)); | 225 | asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); |
205 | return val; | 226 | return val; |
206 | } | 227 | } |
207 | 228 | ||
@@ -213,7 +234,7 @@ static inline unsigned long native_read_cr4_safe(void) | |||
213 | #ifdef CONFIG_X86_32 | 234 | #ifdef CONFIG_X86_32 |
214 | asm volatile("1: mov %%cr4, %0\n" | 235 | asm volatile("1: mov %%cr4, %0\n" |
215 | "2:\n" | 236 | "2:\n" |
216 | _ASM_EXTABLE(1b,2b) | 237 | _ASM_EXTABLE(1b, 2b) |
217 | : "=r" (val), "=m" (__force_order) : "0" (0)); | 238 | : "=r" (val), "=m" (__force_order) : "0" (0)); |
218 | #else | 239 | #else |
219 | val = native_read_cr4(); | 240 | val = native_read_cr4(); |
@@ -223,7 +244,7 @@ static inline unsigned long native_read_cr4_safe(void) | |||
223 | 244 | ||
224 | static inline void native_write_cr4(unsigned long val) | 245 | static inline void native_write_cr4(unsigned long val) |
225 | { | 246 | { |
226 | asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order)); | 247 | asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); |
227 | } | 248 | } |
228 | 249 | ||
229 | #ifdef CONFIG_X86_64 | 250 | #ifdef CONFIG_X86_64 |
@@ -244,6 +265,7 @@ static inline void native_wbinvd(void) | |||
244 | { | 265 | { |
245 | asm volatile("wbinvd": : :"memory"); | 266 | asm volatile("wbinvd": : :"memory"); |
246 | } | 267 | } |
268 | |||
247 | #ifdef CONFIG_PARAVIRT | 269 | #ifdef CONFIG_PARAVIRT |
248 | #include <asm/paravirt.h> | 270 | #include <asm/paravirt.h> |
249 | #else | 271 | #else |
@@ -276,7 +298,7 @@ static inline void clflush(volatile void *__p) | |||
276 | asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); | 298 | asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); |
277 | } | 299 | } |
278 | 300 | ||
279 | #define nop() __asm__ __volatile__ ("nop") | 301 | #define nop() asm volatile ("nop") |
280 | 302 | ||
281 | void disable_hlt(void); | 303 | void disable_hlt(void); |
282 | void enable_hlt(void); | 304 | void enable_hlt(void); |
@@ -296,16 +318,7 @@ void default_idle(void); | |||
296 | */ | 318 | */ |
297 | #ifdef CONFIG_X86_32 | 319 | #ifdef CONFIG_X86_32 |
298 | /* | 320 | /* |
299 | * For now, "wmb()" doesn't actually do anything, as all | 321 | * Some non-Intel clones support out of order store. wmb() ceases to be a |
300 | * Intel CPU's follow what Intel calls a *Processor Order*, | ||
301 | * in which all writes are seen in the program order even | ||
302 | * outside the CPU. | ||
303 | * | ||
304 | * I expect future Intel CPU's to have a weaker ordering, | ||
305 | * but I'd also expect them to finally get their act together | ||
306 | * and add some real memory barriers if so. | ||
307 | * | ||
308 | * Some non intel clones support out of order store. wmb() ceases to be a | ||
309 | * nop for these. | 322 | * nop for these. |
310 | */ | 323 | */ |
311 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | 324 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) |
@@ -384,7 +397,7 @@ void default_idle(void); | |||
384 | # define smp_wmb() barrier() | 397 | # define smp_wmb() barrier() |
385 | #endif | 398 | #endif |
386 | #define smp_read_barrier_depends() read_barrier_depends() | 399 | #define smp_read_barrier_depends() read_barrier_depends() |
387 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | 400 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
388 | #else | 401 | #else |
389 | #define smp_mb() barrier() | 402 | #define smp_mb() barrier() |
390 | #define smp_rmb() barrier() | 403 | #define smp_rmb() barrier() |