aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2006-12-06 20:14:08 -0500
committerAndi Kleen <andi@basil.nowhere.org>2006-12-06 20:14:08 -0500
commit139ec7c416248b9ea227d21839235344edfee1e0 (patch)
tree54c396848b08367c0352c77f4633be6380a8eb16 /include/asm-i386
parentd3561b7fa0fb0fc583bab0eeda32bec9e4c4056d (diff)
[PATCH] paravirt: Patch inline replacements for paravirt intercepts
It turns out that the most called ops, by several orders of magnitude, are the interrupt manipulation ops. These are obvious candidates for patching, so mark them up and create infrastructure for it. The method used is that the ops structure has a patch function, which is called for each place which needs to be patched: this returns a number of instructions (the rest are NOP-padded). Usually we can spare a register (%eax) for the binary patched code to use, but in a couple of critical places in entry.S we can't: we make the clobbers explicit at the call site, and manually clobber the allowed registers in debug mode as an extra check. And: Don't abuse CONFIG_DEBUG_KERNEL, add CONFIG_DEBUG_PARAVIRT. And: AK: Fix warnings in x86-64 alternative.c build And: AK: Fix compilation with defconfig And: ^From: Andrew Morton <akpm@osdl.org> Some binutlises still like to emit references to __stop_parainstructions and __start_parainstructions. And: AK: Fix warnings about unused variables when PARAVIRT is disabled. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Zachary Amsden <zach@vmware.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org>
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/alternative.h13
-rw-r--r--include/asm-i386/desc.h41
-rw-r--r--include/asm-i386/irqflags.h4
-rw-r--r--include/asm-i386/paravirt.h189
-rw-r--r--include/asm-i386/processor.h198
-rw-r--r--include/asm-i386/spinlock.h15
6 files changed, 297 insertions, 163 deletions
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index b01a7ec409ce..b8fa9557c532 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -4,7 +4,7 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <asm/types.h> 6#include <asm/types.h>
7 7#include <linux/stddef.h>
8#include <linux/types.h> 8#include <linux/types.h>
9 9
10struct alt_instr { 10struct alt_instr {
@@ -118,4 +118,15 @@ static inline void alternatives_smp_switch(int smp) {}
118#define LOCK_PREFIX "" 118#define LOCK_PREFIX ""
119#endif 119#endif
120 120
121struct paravirt_patch;
122#ifdef CONFIG_PARAVIRT
123void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
124#else
125static inline void
126apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
127{}
128#define __start_parainstructions NULL
129#define __stop_parainstructions NULL
130#endif
131
121#endif /* _I386_ALTERNATIVE_H */ 132#endif /* _I386_ALTERNATIVE_H */
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index f19820f0834f..f398cc456448 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -81,31 +81,15 @@ static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
81#undef C 81#undef C
82} 82}
83 83
84static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
85{
86 __u32 *lp = (__u32 *)((char *)dt + entry*8);
87 *lp = entry_a;
88 *(lp+1) = entry_b;
89}
90
91#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 84#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
92#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 85#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
93#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 86#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
94 87
95static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) 88static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
96{
97 __u32 a, b;
98 pack_gate(&a, &b, (unsigned long)addr, seg, type, 0);
99 write_idt_entry(idt_table, gate, a, b);
100}
101
102static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr)
103{ 89{
104 __u32 a, b; 90 __u32 *lp = (__u32 *)((char *)dt + entry*8);
105 pack_descriptor(&a, &b, (unsigned long)addr, 91 *lp = entry_a;
106 offsetof(struct tss_struct, __cacheline_filler) - 1, 92 *(lp+1) = entry_b;
107 DESCTYPE_TSS, 0);
108 write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
109} 93}
110 94
111#define set_ldt native_set_ldt 95#define set_ldt native_set_ldt
@@ -128,6 +112,23 @@ static inline fastcall void native_set_ldt(const void *addr,
128 } 112 }
129} 113}
130 114
115static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
116{
117 __u32 a, b;
118 pack_gate(&a, &b, (unsigned long)addr, seg, type, 0);
119 write_idt_entry(idt_table, gate, a, b);
120}
121
122static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr)
123{
124 __u32 a, b;
125 pack_descriptor(&a, &b, (unsigned long)addr,
126 offsetof(struct tss_struct, __cacheline_filler) - 1,
127 DESCTYPE_TSS, 0);
128 write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
129}
130
131
131#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) 132#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
132 133
133#define LDT_entry_a(info) \ 134#define LDT_entry_a(info) \
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h
index 9ce01f3fb7bc..17b18cf4fe9d 100644
--- a/include/asm-i386/irqflags.h
+++ b/include/asm-i386/irqflags.h
@@ -79,8 +79,8 @@ static inline unsigned long __raw_local_irq_save(void)
79} 79}
80 80
81#else 81#else
82#define DISABLE_INTERRUPTS cli 82#define DISABLE_INTERRUPTS(clobbers) cli
83#define ENABLE_INTERRUPTS sti 83#define ENABLE_INTERRUPTS(clobbers) sti
84#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit 84#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
85#define INTERRUPT_RETURN iret 85#define INTERRUPT_RETURN iret
86#define GET_CR0_INTO_EAX movl %cr0, %eax 86#define GET_CR0_INTO_EAX movl %cr0, %eax
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index a7551a44686f..081194751ade 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -3,8 +3,26 @@
3/* Various instructions on x86 need to be replaced for 3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */ 4 * para-virtualization: those hooks are defined here. */
5#include <linux/linkage.h> 5#include <linux/linkage.h>
6#include <linux/stringify.h>
6 7
7#ifdef CONFIG_PARAVIRT 8#ifdef CONFIG_PARAVIRT
9/* These are the most performance critical ops, so we want to be able to patch
10 * callers */
11#define PARAVIRT_IRQ_DISABLE 0
12#define PARAVIRT_IRQ_ENABLE 1
13#define PARAVIRT_RESTORE_FLAGS 2
14#define PARAVIRT_SAVE_FLAGS 3
15#define PARAVIRT_SAVE_FLAGS_IRQ_DISABLE 4
16#define PARAVIRT_INTERRUPT_RETURN 5
17#define PARAVIRT_STI_SYSEXIT 6
18
19/* Bitmask of what can be clobbered: usually at least eax. */
20#define CLBR_NONE 0x0
21#define CLBR_EAX 0x1
22#define CLBR_ECX 0x2
23#define CLBR_EDX 0x4
24#define CLBR_ANY 0x7
25
8#ifndef __ASSEMBLY__ 26#ifndef __ASSEMBLY__
9struct thread_struct; 27struct thread_struct;
10struct Xgt_desc_struct; 28struct Xgt_desc_struct;
@@ -15,6 +33,15 @@ struct paravirt_ops
15 int paravirt_enabled; 33 int paravirt_enabled;
16 const char *name; 34 const char *name;
17 35
36 /*
37 * Patch may replace one of the defined code sequences with arbitrary
38 * code, subject to the same register constraints. This generally
39 * means the code is not free to clobber any registers other than EAX.
40 * The patch function should return the number of bytes of code
41 * generated, as we nop pad the rest in generic code.
42 */
43 unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len);
44
18 void (*arch_setup)(void); 45 void (*arch_setup)(void);
19 char *(*memory_setup)(void); 46 char *(*memory_setup)(void);
20 void (*init_IRQ)(void); 47 void (*init_IRQ)(void);
@@ -147,35 +174,6 @@ static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
147#define read_cr4_safe(x) paravirt_ops.read_cr4_safe() 174#define read_cr4_safe(x) paravirt_ops.read_cr4_safe()
148#define write_cr4(x) paravirt_ops.write_cr4(x) 175#define write_cr4(x) paravirt_ops.write_cr4(x)
149 176
150static inline unsigned long __raw_local_save_flags(void)
151{
152 return paravirt_ops.save_fl();
153}
154
155static inline void raw_local_irq_restore(unsigned long flags)
156{
157 return paravirt_ops.restore_fl(flags);
158}
159
160static inline void raw_local_irq_disable(void)
161{
162 paravirt_ops.irq_disable();
163}
164
165static inline void raw_local_irq_enable(void)
166{
167 paravirt_ops.irq_enable();
168}
169
170static inline unsigned long __raw_local_irq_save(void)
171{
172 unsigned long flags = paravirt_ops.save_fl();
173
174 paravirt_ops.irq_disable();
175
176 return flags;
177}
178
179static inline void raw_safe_halt(void) 177static inline void raw_safe_halt(void)
180{ 178{
181 paravirt_ops.safe_halt(); 179 paravirt_ops.safe_halt();
@@ -267,15 +265,134 @@ static inline void slow_down_io(void) {
267#endif 265#endif
268} 266}
269 267
270#define CLI_STRING "pushl %eax; pushl %ecx; pushl %edx; call *paravirt_ops+PARAVIRT_irq_disable; popl %edx; popl %ecx; popl %eax" 268/* These all sit in the .parainstructions section to tell us what to patch. */
271#define STI_STRING "pushl %eax; pushl %ecx; pushl %edx; call *paravirt_ops+PARAVIRT_irq_enable; popl %edx; popl %ecx; popl %eax" 269struct paravirt_patch {
270 u8 *instr; /* original instructions */
271 u8 instrtype; /* type of this instruction */
272 u8 len; /* length of original instruction */
273 u16 clobbers; /* what registers you may clobber */
274};
275
276#define paravirt_alt(insn_string, typenum, clobber) \
277 "771:\n\t" insn_string "\n" "772:\n" \
278 ".pushsection .parainstructions,\"a\"\n" \
279 " .long 771b\n" \
280 " .byte " __stringify(typenum) "\n" \
281 " .byte 772b-771b\n" \
282 " .short " __stringify(clobber) "\n" \
283 ".popsection"
284
285static inline unsigned long __raw_local_save_flags(void)
286{
287 unsigned long f;
288
289 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
290 "call *%1;"
291 "popl %%edx; popl %%ecx",
292 PARAVIRT_SAVE_FLAGS, CLBR_NONE)
293 : "=a"(f): "m"(paravirt_ops.save_fl)
294 : "memory", "cc");
295 return f;
296}
297
298static inline void raw_local_irq_restore(unsigned long f)
299{
300 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
301 "call *%1;"
302 "popl %%edx; popl %%ecx",
303 PARAVIRT_RESTORE_FLAGS, CLBR_EAX)
304 : "=a"(f) : "m" (paravirt_ops.restore_fl), "0"(f)
305 : "memory", "cc");
306}
307
308static inline void raw_local_irq_disable(void)
309{
310 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
311 "call *%0;"
312 "popl %%edx; popl %%ecx",
313 PARAVIRT_IRQ_DISABLE, CLBR_EAX)
314 : : "m" (paravirt_ops.irq_disable)
315 : "memory", "eax", "cc");
316}
317
318static inline void raw_local_irq_enable(void)
319{
320 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
321 "call *%0;"
322 "popl %%edx; popl %%ecx",
323 PARAVIRT_IRQ_ENABLE, CLBR_EAX)
324 : : "m" (paravirt_ops.irq_enable)
325 : "memory", "eax", "cc");
326}
327
328static inline unsigned long __raw_local_irq_save(void)
329{
330 unsigned long f;
331
332 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
333 "call *%1; pushl %%eax;"
334 "call *%2; popl %%eax;"
335 "popl %%edx; popl %%ecx",
336 PARAVIRT_SAVE_FLAGS_IRQ_DISABLE,
337 CLBR_NONE)
338 : "=a"(f)
339 : "m" (paravirt_ops.save_fl),
340 "m" (paravirt_ops.irq_disable)
341 : "memory", "cc");
342 return f;
343}
344
345#define CLI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \
346 "call *paravirt_ops+%c[irq_disable];" \
347 "popl %%edx; popl %%ecx", \
348 PARAVIRT_IRQ_DISABLE, CLBR_EAX)
349
350#define STI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \
351 "call *paravirt_ops+%c[irq_enable];" \
352 "popl %%edx; popl %%ecx", \
353 PARAVIRT_IRQ_ENABLE, CLBR_EAX)
354#define CLI_STI_CLOBBERS , "%eax"
355#define CLI_STI_INPUT_ARGS \
356 , \
357 [irq_disable] "i" (offsetof(struct paravirt_ops, irq_disable)), \
358 [irq_enable] "i" (offsetof(struct paravirt_ops, irq_enable))
359
272#else /* __ASSEMBLY__ */ 360#else /* __ASSEMBLY__ */
273 361
274#define INTERRUPT_RETURN jmp *%cs:paravirt_ops+PARAVIRT_iret 362#define PARA_PATCH(ptype, clobbers, ops) \
275#define DISABLE_INTERRUPTS pushl %eax; pushl %ecx; pushl %edx; call *paravirt_ops+PARAVIRT_irq_disable; popl %edx; popl %ecx; popl %eax 363771:; \
276#define ENABLE_INTERRUPTS pushl %eax; pushl %ecx; pushl %edx; call *%cs:paravirt_ops+PARAVIRT_irq_enable; popl %edx; popl %ecx; popl %eax 364 ops; \
277#define ENABLE_INTERRUPTS_SYSEXIT jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit 365772:; \
278#define GET_CR0_INTO_EAX call *paravirt_ops+PARAVIRT_read_cr0 366 .pushsection .parainstructions,"a"; \
367 .long 771b; \
368 .byte ptype; \
369 .byte 772b-771b; \
370 .short clobbers; \
371 .popsection
372
373#define INTERRUPT_RETURN \
374 PARA_PATCH(PARAVIRT_INTERRUPT_RETURN, CLBR_ANY, \
375 jmp *%cs:paravirt_ops+PARAVIRT_iret)
376
377#define DISABLE_INTERRUPTS(clobbers) \
378 PARA_PATCH(PARAVIRT_IRQ_DISABLE, clobbers, \
379 pushl %ecx; pushl %edx; \
380 call *paravirt_ops+PARAVIRT_irq_disable; \
381 popl %edx; popl %ecx) \
382
383#define ENABLE_INTERRUPTS(clobbers) \
384 PARA_PATCH(PARAVIRT_IRQ_ENABLE, clobbers, \
385 pushl %ecx; pushl %edx; \
386 call *%cs:paravirt_ops+PARAVIRT_irq_enable; \
387 popl %edx; popl %ecx)
388
389#define ENABLE_INTERRUPTS_SYSEXIT \
390 PARA_PATCH(PARAVIRT_STI_SYSEXIT, CLBR_ANY, \
391 jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit)
392
393#define GET_CR0_INTO_EAX \
394 call *paravirt_ops+PARAVIRT_read_cr0
395
279#endif /* __ASSEMBLY__ */ 396#endif /* __ASSEMBLY__ */
280#endif /* CONFIG_PARAVIRT */ 397#endif /* CONFIG_PARAVIRT */
281#endif /* __ASM_PARAVIRT_H */ 398#endif /* __ASM_PARAVIRT_H */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 6c2c4457be0a..5f0418d0078c 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -156,59 +156,6 @@ static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx,
156 : "0" (*eax), "2" (*ecx)); 156 : "0" (*eax), "2" (*ecx));
157} 157}
158 158
159/*
160 * Generic CPUID function
161 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
162 * resulting in stale register contents being returned.
163 */
164static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
165{
166 *eax = op;
167 *ecx = 0;
168 __cpuid(eax, ebx, ecx, edx);
169}
170
171/* Some CPUID calls want 'count' to be placed in ecx */
172static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
173 int *edx)
174{
175 *eax = op;
176 *ecx = count;
177 __cpuid(eax, ebx, ecx, edx);
178}
179
180/*
181 * CPUID functions returning a single datum
182 */
183static inline unsigned int cpuid_eax(unsigned int op)
184{
185 unsigned int eax, ebx, ecx, edx;
186
187 cpuid(op, &eax, &ebx, &ecx, &edx);
188 return eax;
189}
190static inline unsigned int cpuid_ebx(unsigned int op)
191{
192 unsigned int eax, ebx, ecx, edx;
193
194 cpuid(op, &eax, &ebx, &ecx, &edx);
195 return ebx;
196}
197static inline unsigned int cpuid_ecx(unsigned int op)
198{
199 unsigned int eax, ebx, ecx, edx;
200
201 cpuid(op, &eax, &ebx, &ecx, &edx);
202 return ecx;
203}
204static inline unsigned int cpuid_edx(unsigned int op)
205{
206 unsigned int eax, ebx, ecx, edx;
207
208 cpuid(op, &eax, &ebx, &ecx, &edx);
209 return edx;
210}
211
212#define load_cr3(pgdir) write_cr3(__pa(pgdir)) 159#define load_cr3(pgdir) write_cr3(__pa(pgdir))
213 160
214/* 161/*
@@ -491,22 +438,6 @@ struct thread_struct {
491 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ 438 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
492} 439}
493 440
494#ifdef CONFIG_PARAVIRT
495#include <asm/paravirt.h>
496#else
497#define paravirt_enabled() 0
498#define __cpuid native_cpuid
499
500static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
501{
502 tss->esp0 = thread->esp0;
503 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
504 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
505 tss->ss1 = thread->sysenter_cs;
506 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
507 }
508}
509
510#define start_thread(regs, new_eip, new_esp) do { \ 441#define start_thread(regs, new_eip, new_esp) do { \
511 __asm__("movl %0,%%fs": :"r" (0)); \ 442 __asm__("movl %0,%%fs": :"r" (0)); \
512 regs->xgs = 0; \ 443 regs->xgs = 0; \
@@ -519,36 +450,6 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa
519 regs->esp = new_esp; \ 450 regs->esp = new_esp; \
520} while (0) 451} while (0)
521 452
522/*
523 * These special macros can be used to get or set a debugging register
524 */
525#define get_debugreg(var, register) \
526 __asm__("movl %%db" #register ", %0" \
527 :"=r" (var))
528#define set_debugreg(value, register) \
529 __asm__("movl %0,%%db" #register \
530 : /* no output */ \
531 :"r" (value))
532
533#define set_iopl_mask native_set_iopl_mask
534#endif /* CONFIG_PARAVIRT */
535
536/*
537 * Set IOPL bits in EFLAGS from given mask
538 */
539static fastcall inline void native_set_iopl_mask(unsigned mask)
540{
541 unsigned int reg;
542 __asm__ __volatile__ ("pushfl;"
543 "popl %0;"
544 "andl %1, %0;"
545 "orl %2, %0;"
546 "pushl %0;"
547 "popfl"
548 : "=&r" (reg)
549 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
550}
551
552/* Forward declaration, a strange C thing */ 453/* Forward declaration, a strange C thing */
553struct task_struct; 454struct task_struct;
554struct mm_struct; 455struct mm_struct;
@@ -640,6 +541,105 @@ static inline void rep_nop(void)
640 541
641#define cpu_relax() rep_nop() 542#define cpu_relax() rep_nop()
642 543
544#ifdef CONFIG_PARAVIRT
545#include <asm/paravirt.h>
546#else
547#define paravirt_enabled() 0
548#define __cpuid native_cpuid
549
550static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
551{
552 tss->esp0 = thread->esp0;
553 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
554 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
555 tss->ss1 = thread->sysenter_cs;
556 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
557 }
558}
559
560/*
561 * These special macros can be used to get or set a debugging register
562 */
563#define get_debugreg(var, register) \
564 __asm__("movl %%db" #register ", %0" \
565 :"=r" (var))
566#define set_debugreg(value, register) \
567 __asm__("movl %0,%%db" #register \
568 : /* no output */ \
569 :"r" (value))
570
571#define set_iopl_mask native_set_iopl_mask
572#endif /* CONFIG_PARAVIRT */
573
574/*
575 * Set IOPL bits in EFLAGS from given mask
576 */
577static fastcall inline void native_set_iopl_mask(unsigned mask)
578{
579 unsigned int reg;
580 __asm__ __volatile__ ("pushfl;"
581 "popl %0;"
582 "andl %1, %0;"
583 "orl %2, %0;"
584 "pushl %0;"
585 "popfl"
586 : "=&r" (reg)
587 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
588}
589
590/*
591 * Generic CPUID function
592 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
593 * resulting in stale register contents being returned.
594 */
595static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
596{
597 *eax = op;
598 *ecx = 0;
599 __cpuid(eax, ebx, ecx, edx);
600}
601
602/* Some CPUID calls want 'count' to be placed in ecx */
603static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
604 int *edx)
605{
606 *eax = op;
607 *ecx = count;
608 __cpuid(eax, ebx, ecx, edx);
609}
610
611/*
612 * CPUID functions returning a single datum
613 */
614static inline unsigned int cpuid_eax(unsigned int op)
615{
616 unsigned int eax, ebx, ecx, edx;
617
618 cpuid(op, &eax, &ebx, &ecx, &edx);
619 return eax;
620}
621static inline unsigned int cpuid_ebx(unsigned int op)
622{
623 unsigned int eax, ebx, ecx, edx;
624
625 cpuid(op, &eax, &ebx, &ecx, &edx);
626 return ebx;
627}
628static inline unsigned int cpuid_ecx(unsigned int op)
629{
630 unsigned int eax, ebx, ecx, edx;
631
632 cpuid(op, &eax, &ebx, &ecx, &edx);
633 return ecx;
634}
635static inline unsigned int cpuid_edx(unsigned int op)
636{
637 unsigned int eax, ebx, ecx, edx;
638
639 cpuid(op, &eax, &ebx, &ecx, &edx);
640 return edx;
641}
642
643/* generic versions from gas */ 643/* generic versions from gas */
644#define GENERIC_NOP1 ".byte 0x90\n" 644#define GENERIC_NOP1 ".byte 0x90\n"
645#define GENERIC_NOP2 ".byte 0x89,0xf6\n" 645#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index dea60709db29..d3bcebed60ca 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -12,6 +12,8 @@
12#else 12#else
13#define CLI_STRING "cli" 13#define CLI_STRING "cli"
14#define STI_STRING "sti" 14#define STI_STRING "sti"
15#define CLI_STI_CLOBBERS
16#define CLI_STI_INPUT_ARGS
15#endif /* CONFIG_PARAVIRT */ 17#endif /* CONFIG_PARAVIRT */
16 18
17/* 19/*
@@ -57,25 +59,28 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
57{ 59{
58 asm volatile( 60 asm volatile(
59 "\n1:\t" 61 "\n1:\t"
60 LOCK_PREFIX " ; decb %0\n\t" 62 LOCK_PREFIX " ; decb %[slock]\n\t"
61 "jns 5f\n" 63 "jns 5f\n"
62 "2:\t" 64 "2:\t"
63 "testl $0x200, %1\n\t" 65 "testl $0x200, %[flags]\n\t"
64 "jz 4f\n\t" 66 "jz 4f\n\t"
65 STI_STRING "\n" 67 STI_STRING "\n"
66 "3:\t" 68 "3:\t"
67 "rep;nop\n\t" 69 "rep;nop\n\t"
68 "cmpb $0, %0\n\t" 70 "cmpb $0, %[slock]\n\t"
69 "jle 3b\n\t" 71 "jle 3b\n\t"
70 CLI_STRING "\n\t" 72 CLI_STRING "\n\t"
71 "jmp 1b\n" 73 "jmp 1b\n"
72 "4:\t" 74 "4:\t"
73 "rep;nop\n\t" 75 "rep;nop\n\t"
74 "cmpb $0, %0\n\t" 76 "cmpb $0, %[slock]\n\t"
75 "jg 1b\n\t" 77 "jg 1b\n\t"
76 "jmp 4b\n" 78 "jmp 4b\n"
77 "5:\n\t" 79 "5:\n\t"
78 : "+m" (lock->slock) : "r" (flags) : "memory"); 80 : [slock] "+m" (lock->slock)
81 : [flags] "r" (flags)
82 CLI_STI_INPUT_ARGS
83 : "memory" CLI_STI_CLOBBERS);
79} 84}
80#endif 85#endif
81 86