aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2007-05-02 13:27:10 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:10 -0400
commit90a0a06aa81692028864c21f981905fda46b1208 (patch)
tree516528b328d5288ee057d1eff5491e2ba1b49af1 /include
parent52de74dd3994e165ef1b35c33d54655a6400e30c (diff)
[PATCH] i386: rationalize paravirt wrappers
paravirt.c used to implement native versions of all low-level functions. Far cleaner is to have the native versions exposed in the headers and as inline native_XXX, and if !CONFIG_PARAVIRT, then simply #define XXX native_XXX. There are several nice side effects: 1) write_dt_entry() now takes the correct "struct Xgt_desc_struct *" not "void *". 2) load_TLS is reintroduced to the for loop, not manually unrolled with a #error in case the bounds ever change. 3) Macros become inlines, with type checking. 4) Access to the native versions is trivial for KVM, lguest, Xen and others who might want it. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Andi Kleen <ak@muc.de> Cc: Avi Kivity <avi@qumranet.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/desc.h82
-rw-r--r--include/asm-i386/io.h15
-rw-r--r--include/asm-i386/irqflags.h61
-rw-r--r--include/asm-i386/msr.h163
-rw-r--r--include/asm-i386/paravirt.h17
-rw-r--r--include/asm-i386/processor.h94
-rw-r--r--include/asm-i386/system.h139
7 files changed, 384 insertions, 187 deletions
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index a75ae6b9786..13f701ea9a8 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -57,45 +57,33 @@ static inline void pack_gate(__u32 *a, __u32 *b,
57#ifdef CONFIG_PARAVIRT 57#ifdef CONFIG_PARAVIRT
58#include <asm/paravirt.h> 58#include <asm/paravirt.h>
59#else 59#else
60#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) 60#define load_TR_desc() native_load_tr_desc()
61 61#define load_gdt(dtr) native_load_gdt(dtr)
62#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) 62#define load_idt(dtr) native_load_idt(dtr)
63#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
64#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) 63#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
65#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) 64#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
66 65
67#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) 66#define store_gdt(dtr) native_store_gdt(dtr)
68#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) 67#define store_idt(dtr) native_store_idt(dtr)
69#define store_tr(tr) __asm__ ("str %0":"=m" (tr)) 68#define store_tr(tr) (tr = native_store_tr())
70#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) 69#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
71 70
72#if TLS_SIZE != 24 71#define load_TLS(t, cpu) native_load_tls(t, cpu)
73# error update this code. 72#define set_ldt native_set_ldt
74#endif
75
76static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
77{
78#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
79 C(0); C(1); C(2);
80#undef C
81}
82 73
83#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 74#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
84#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 75#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
85#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 76#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
77#endif
86 78
87static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b) 79static inline void write_dt_entry(struct desc_struct *dt,
80 int entry, u32 entry_low, u32 entry_high)
88{ 81{
89 __u32 *lp = (__u32 *)((char *)dt + entry*8); 82 dt[entry].a = entry_low;
90 *lp = entry_a; 83 dt[entry].b = entry_high;
91 *(lp+1) = entry_b;
92} 84}
93 85
94#define set_ldt native_set_ldt 86static inline void native_set_ldt(const void *addr, unsigned int entries)
95#endif /* CONFIG_PARAVIRT */
96
97static inline fastcall void native_set_ldt(const void *addr,
98 unsigned int entries)
99{ 87{
100 if (likely(entries == 0)) 88 if (likely(entries == 0))
101 __asm__ __volatile__("lldt %w0"::"q" (0)); 89 __asm__ __volatile__("lldt %w0"::"q" (0));
@@ -111,6 +99,48 @@ static inline fastcall void native_set_ldt(const void *addr,
111 } 99 }
112} 100}
113 101
102
103static inline void native_load_tr_desc(void)
104{
105 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
106}
107
108static inline void native_load_gdt(const struct Xgt_desc_struct *dtr)
109{
110 asm volatile("lgdt %0"::"m" (*dtr));
111}
112
113static inline void native_load_idt(const struct Xgt_desc_struct *dtr)
114{
115 asm volatile("lidt %0"::"m" (*dtr));
116}
117
118static inline void native_store_gdt(struct Xgt_desc_struct *dtr)
119{
120 asm ("sgdt %0":"=m" (*dtr));
121}
122
123static inline void native_store_idt(struct Xgt_desc_struct *dtr)
124{
125 asm ("sidt %0":"=m" (*dtr));
126}
127
128static inline unsigned long native_store_tr(void)
129{
130 unsigned long tr;
131 asm ("str %0":"=r" (tr));
132 return tr;
133}
134
135static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
136{
137 unsigned int i;
138 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
139
140 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
141 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
142}
143
114static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) 144static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
115{ 145{
116 __u32 a, b; 146 __u32 a, b;
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 59fe616933c..e797586a5bf 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -250,19 +250,22 @@ static inline void flush_write_buffers(void)
250 250
251#endif /* __KERNEL__ */ 251#endif /* __KERNEL__ */
252 252
253static inline void native_io_delay(void)
254{
255 asm volatile("outb %%al,$0x80" : : : "memory");
256}
257
253#if defined(CONFIG_PARAVIRT) 258#if defined(CONFIG_PARAVIRT)
254#include <asm/paravirt.h> 259#include <asm/paravirt.h>
255#else 260#else
256 261
257#define __SLOW_DOWN_IO "outb %%al,$0x80;"
258
259static inline void slow_down_io(void) { 262static inline void slow_down_io(void) {
260 __asm__ __volatile__( 263 native_io_delay();
261 __SLOW_DOWN_IO
262#ifdef REALLY_SLOW_IO 264#ifdef REALLY_SLOW_IO
263 __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO 265 native_io_delay();
266 native_io_delay();
267 native_io_delay();
264#endif 268#endif
265 : : );
266} 269}
267 270
268#endif 271#endif
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h
index 17b18cf4fe9..c1cdd094938 100644
--- a/include/asm-i386/irqflags.h
+++ b/include/asm-i386/irqflags.h
@@ -10,6 +10,42 @@
10#ifndef _ASM_IRQFLAGS_H 10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H 11#define _ASM_IRQFLAGS_H
12 12
13#ifndef __ASSEMBLY__
14static inline unsigned long native_save_fl(void)
15{
16 unsigned long f;
17 asm volatile("pushfl ; popl %0":"=g" (f): /* no input */);
18 return f;
19}
20
21static inline void native_restore_fl(unsigned long f)
22{
23 asm volatile("pushl %0 ; popfl": /* no output */
24 :"g" (f)
25 :"memory", "cc");
26}
27
28static inline void native_irq_disable(void)
29{
30 asm volatile("cli": : :"memory");
31}
32
33static inline void native_irq_enable(void)
34{
35 asm volatile("sti": : :"memory");
36}
37
38static inline void native_safe_halt(void)
39{
40 asm volatile("sti; hlt": : :"memory");
41}
42
43static inline void native_halt(void)
44{
45 asm volatile("hlt": : :"memory");
46}
47#endif /* __ASSEMBLY__ */
48
13#ifdef CONFIG_PARAVIRT 49#ifdef CONFIG_PARAVIRT
14#include <asm/paravirt.h> 50#include <asm/paravirt.h>
15#else 51#else
@@ -17,35 +53,22 @@
17 53
18static inline unsigned long __raw_local_save_flags(void) 54static inline unsigned long __raw_local_save_flags(void)
19{ 55{
20 unsigned long flags; 56 return native_save_fl();
21
22 __asm__ __volatile__(
23 "pushfl ; popl %0"
24 : "=g" (flags)
25 : /* no input */
26 );
27
28 return flags;
29} 57}
30 58
31static inline void raw_local_irq_restore(unsigned long flags) 59static inline void raw_local_irq_restore(unsigned long flags)
32{ 60{
33 __asm__ __volatile__( 61 native_restore_fl(flags);
34 "pushl %0 ; popfl"
35 : /* no output */
36 :"g" (flags)
37 :"memory", "cc"
38 );
39} 62}
40 63
41static inline void raw_local_irq_disable(void) 64static inline void raw_local_irq_disable(void)
42{ 65{
43 __asm__ __volatile__("cli" : : : "memory"); 66 native_irq_disable();
44} 67}
45 68
46static inline void raw_local_irq_enable(void) 69static inline void raw_local_irq_enable(void)
47{ 70{
48 __asm__ __volatile__("sti" : : : "memory"); 71 native_irq_enable();
49} 72}
50 73
51/* 74/*
@@ -54,7 +77,7 @@ static inline void raw_local_irq_enable(void)
54 */ 77 */
55static inline void raw_safe_halt(void) 78static inline void raw_safe_halt(void)
56{ 79{
57 __asm__ __volatile__("sti; hlt" : : : "memory"); 80 native_safe_halt();
58} 81}
59 82
60/* 83/*
@@ -63,7 +86,7 @@ static inline void raw_safe_halt(void)
63 */ 86 */
64static inline void halt(void) 87static inline void halt(void)
65{ 88{
66 __asm__ __volatile__("hlt": : :"memory"); 89 native_halt();
67} 90}
68 91
69/* 92/*
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 2ad3f30b1a6..00acaa8b36b 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -1,6 +1,74 @@
1#ifndef __ASM_MSR_H 1#ifndef __ASM_MSR_H
2#define __ASM_MSR_H 2#define __ASM_MSR_H
3 3
4#include <asm/errno.h>
5
6static inline unsigned long long native_read_msr(unsigned int msr)
7{
8 unsigned long long val;
9
10 asm volatile("rdmsr" : "=A" (val) : "c" (msr));
11 return val;
12}
13
14static inline unsigned long long native_read_msr_safe(unsigned int msr,
15 int *err)
16{
17 unsigned long long val;
18
19 asm volatile("2: rdmsr ; xorl %0,%0\n"
20 "1:\n\t"
21 ".section .fixup,\"ax\"\n\t"
22 "3: movl %3,%0 ; jmp 1b\n\t"
23 ".previous\n\t"
24 ".section __ex_table,\"a\"\n"
25 " .align 4\n\t"
26 " .long 2b,3b\n\t"
27 ".previous"
28 : "=r" (*err), "=A" (val)
29 : "c" (msr), "i" (-EFAULT));
30
31 return val;
32}
33
34static inline void native_write_msr(unsigned int msr, unsigned long long val)
35{
36 asm volatile("wrmsr" : : "c" (msr), "A"(val));
37}
38
39static inline int native_write_msr_safe(unsigned int msr,
40 unsigned long long val)
41{
42 int err;
43 asm volatile("2: wrmsr ; xorl %0,%0\n"
44 "1:\n\t"
45 ".section .fixup,\"ax\"\n\t"
46 "3: movl %4,%0 ; jmp 1b\n\t"
47 ".previous\n\t"
48 ".section __ex_table,\"a\"\n"
49 " .align 4\n\t"
50 " .long 2b,3b\n\t"
51 ".previous"
52 : "=a" (err)
53 : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
54 "i" (-EFAULT));
55 return err;
56}
57
58static inline unsigned long long native_read_tsc(void)
59{
60 unsigned long long val;
61 asm volatile("rdtsc" : "=A" (val));
62 return val;
63}
64
65static inline unsigned long long native_read_pmc(void)
66{
67 unsigned long long val;
68 asm volatile("rdpmc" : "=A" (val));
69 return val;
70}
71
4#ifdef CONFIG_PARAVIRT 72#ifdef CONFIG_PARAVIRT
5#include <asm/paravirt.h> 73#include <asm/paravirt.h>
6#else 74#else
@@ -11,22 +79,20 @@
11 * pointer indirection), this allows gcc to optimize better 79 * pointer indirection), this allows gcc to optimize better
12 */ 80 */
13 81
14#define rdmsr(msr,val1,val2) \ 82#define rdmsr(msr,val1,val2) \
15 __asm__ __volatile__("rdmsr" \ 83 do { \
16 : "=a" (val1), "=d" (val2) \ 84 unsigned long long __val = native_read_msr(msr); \
17 : "c" (msr)) 85 val1 = __val; \
86 val2 = __val >> 32; \
87 } while(0)
18 88
19#define wrmsr(msr,val1,val2) \ 89#define wrmsr(msr,val1,val2) \
20 __asm__ __volatile__("wrmsr" \ 90 native_write_msr(msr, ((unsigned long long)val2 << 32) | val1)
21 : /* no outputs */ \
22 : "c" (msr), "a" (val1), "d" (val2))
23 91
24#define rdmsrl(msr,val) do { \ 92#define rdmsrl(msr,val) \
25 unsigned long l__,h__; \ 93 do { \
26 rdmsr (msr, l__, h__); \ 94 (val) = native_read_msr(msr); \
27 val = l__; \ 95 } while(0)
28 val |= ((u64)h__<<32); \
29} while(0)
30 96
31static inline void wrmsrl (unsigned long msr, unsigned long long val) 97static inline void wrmsrl (unsigned long msr, unsigned long long val)
32{ 98{
@@ -37,50 +103,41 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
37} 103}
38 104
39/* wrmsr with exception handling */ 105/* wrmsr with exception handling */
40#define wrmsr_safe(msr,a,b) ({ int ret__; \ 106#define wrmsr_safe(msr,val1,val2) \
41 asm volatile("2: wrmsr ; xorl %0,%0\n" \ 107 (native_write_msr_safe(msr, ((unsigned long long)val2 << 32) | val1))
42 "1:\n\t" \
43 ".section .fixup,\"ax\"\n\t" \
44 "3: movl %4,%0 ; jmp 1b\n\t" \
45 ".previous\n\t" \
46 ".section __ex_table,\"a\"\n" \
47 " .align 4\n\t" \
48 " .long 2b,3b\n\t" \
49 ".previous" \
50 : "=a" (ret__) \
51 : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\
52 ret__; })
53 108
54/* rdmsr with exception handling */ 109/* rdmsr with exception handling */
55#define rdmsr_safe(msr,a,b) ({ int ret__; \ 110#define rdmsr_safe(msr,p1,p2) \
56 asm volatile("2: rdmsr ; xorl %0,%0\n" \ 111 ({ \
57 "1:\n\t" \ 112 int __err; \
58 ".section .fixup,\"ax\"\n\t" \ 113 unsigned long long __val = native_read_msr_safe(msr, &__err);\
59 "3: movl %4,%0 ; jmp 1b\n\t" \ 114 (*p1) = __val; \
60 ".previous\n\t" \ 115 (*p2) = __val >> 32; \
61 ".section __ex_table,\"a\"\n" \ 116 __err; \
62 " .align 4\n\t" \ 117 })
63 " .long 2b,3b\n\t" \ 118
64 ".previous" \ 119#define rdtsc(low,high) \
65 : "=r" (ret__), "=a" (*(a)), "=d" (*(b)) \ 120 do { \
66 : "c" (msr), "i" (-EFAULT));\ 121 u64 _l = native_read_tsc(); \
67 ret__; }) 122 (low) = (u32)_l; \
68 123 (high) = _l >> 32; \
69#define rdtsc(low,high) \ 124 } while(0)
70 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) 125
71 126#define rdtscl(low) \
72#define rdtscl(low) \ 127 do { \
73 __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx") 128 (low) = native_read_tsc(); \
74 129 } while(0)
75#define rdtscll(val) \ 130
76 __asm__ __volatile__("rdtsc" : "=A" (val)) 131#define rdtscll(val) ((val) = native_read_tsc())
77 132
78#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 133#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
79 134
80#define rdpmc(counter,low,high) \ 135#define rdpmc(counter,low,high) \
81 __asm__ __volatile__("rdpmc" \ 136 do { \
82 : "=a" (low), "=d" (high) \ 137 u64 _l = native_read_pmc(); \
83 : "c" (counter)) 138 low = (u32)_l; \
139 high = _l >> 32; \
140 } while(0)
84#endif /* !CONFIG_PARAVIRT */ 141#endif /* !CONFIG_PARAVIRT */
85 142
86#ifdef CONFIG_SMP 143#ifdef CONFIG_SMP
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index e63f1e444fc..32acebce9ae 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -29,6 +29,7 @@ struct thread_struct;
29struct Xgt_desc_struct; 29struct Xgt_desc_struct;
30struct tss_struct; 30struct tss_struct;
31struct mm_struct; 31struct mm_struct;
32struct desc_struct;
32struct paravirt_ops 33struct paravirt_ops
33{ 34{
34 unsigned int kernel_rpl; 35 unsigned int kernel_rpl;
@@ -105,14 +106,13 @@ struct paravirt_ops
105 void (*set_ldt)(const void *desc, unsigned entries); 106 void (*set_ldt)(const void *desc, unsigned entries);
106 unsigned long (*store_tr)(void); 107 unsigned long (*store_tr)(void);
107 void (*load_tls)(struct thread_struct *t, unsigned int cpu); 108 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
108 void (*write_ldt_entry)(void *dt, int entrynum, 109 void (*write_ldt_entry)(struct desc_struct *,
109 u32 low, u32 high); 110 int entrynum, u32 low, u32 high);
110 void (*write_gdt_entry)(void *dt, int entrynum, 111 void (*write_gdt_entry)(struct desc_struct *,
111 u32 low, u32 high); 112 int entrynum, u32 low, u32 high);
112 void (*write_idt_entry)(void *dt, int entrynum, 113 void (*write_idt_entry)(struct desc_struct *,
113 u32 low, u32 high); 114 int entrynum, u32 low, u32 high);
114 void (*load_esp0)(struct tss_struct *tss, 115 void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t);
115 struct thread_struct *thread);
116 116
117 void (*set_iopl_mask)(unsigned mask); 117 void (*set_iopl_mask)(unsigned mask);
118 118
@@ -232,6 +232,7 @@ static inline void halt(void)
232 232
233#define get_kernel_rpl() (paravirt_ops.kernel_rpl) 233#define get_kernel_rpl() (paravirt_ops.kernel_rpl)
234 234
235/* These should all do BUG_ON(_err), but our headers are too tangled. */
235#define rdmsr(msr,val1,val2) do { \ 236#define rdmsr(msr,val1,val2) do { \
236 int _err; \ 237 int _err; \
237 u64 _l = paravirt_ops.read_msr(msr,&_err); \ 238 u64 _l = paravirt_ops.read_msr(msr,&_err); \
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 80f7e8a1e87..96edfdfe32d 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -147,7 +147,7 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {}
147#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ 147#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
148#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ 148#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
149 149
150static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx, 150static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
151 unsigned int *ecx, unsigned int *edx) 151 unsigned int *ecx, unsigned int *edx)
152{ 152{
153 /* ecx is often an input as well as an output. */ 153 /* ecx is often an input as well as an output. */
@@ -545,13 +545,7 @@ static inline void rep_nop(void)
545 545
546#define cpu_relax() rep_nop() 546#define cpu_relax() rep_nop()
547 547
548#ifdef CONFIG_PARAVIRT 548static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread)
549#include <asm/paravirt.h>
550#else
551#define paravirt_enabled() 0
552#define __cpuid native_cpuid
553
554static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
555{ 549{
556 tss->esp0 = thread->esp0; 550 tss->esp0 = thread->esp0;
557 /* This can only happen when SEP is enabled, no need to test "SEP"arately */ 551 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
@@ -561,24 +555,60 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa
561 } 555 }
562} 556}
563 557
564/*
565 * These special macros can be used to get or set a debugging register
566 */
567#define get_debugreg(var, register) \
568 __asm__("movl %%db" #register ", %0" \
569 :"=r" (var))
570#define set_debugreg(value, register) \
571 __asm__("movl %0,%%db" #register \
572 : /* no output */ \
573 :"r" (value))
574 558
575#define set_iopl_mask native_set_iopl_mask 559static inline unsigned long native_get_debugreg(int regno)
576#endif /* CONFIG_PARAVIRT */ 560{
561 unsigned long val = 0; /* Damn you, gcc! */
562
563 switch (regno) {
564 case 0:
565 asm("movl %%db0, %0" :"=r" (val)); break;
566 case 1:
567 asm("movl %%db1, %0" :"=r" (val)); break;
568 case 2:
569 asm("movl %%db2, %0" :"=r" (val)); break;
570 case 3:
571 asm("movl %%db3, %0" :"=r" (val)); break;
572 case 6:
573 asm("movl %%db6, %0" :"=r" (val)); break;
574 case 7:
575 asm("movl %%db7, %0" :"=r" (val)); break;
576 default:
577 BUG();
578 }
579 return val;
580}
581
582static inline void native_set_debugreg(int regno, unsigned long value)
583{
584 switch (regno) {
585 case 0:
586 asm("movl %0,%%db0" : /* no output */ :"r" (value));
587 break;
588 case 1:
589 asm("movl %0,%%db1" : /* no output */ :"r" (value));
590 break;
591 case 2:
592 asm("movl %0,%%db2" : /* no output */ :"r" (value));
593 break;
594 case 3:
595 asm("movl %0,%%db3" : /* no output */ :"r" (value));
596 break;
597 case 6:
598 asm("movl %0,%%db6" : /* no output */ :"r" (value));
599 break;
600 case 7:
601 asm("movl %0,%%db7" : /* no output */ :"r" (value));
602 break;
603 default:
604 BUG();
605 }
606}
577 607
578/* 608/*
579 * Set IOPL bits in EFLAGS from given mask 609 * Set IOPL bits in EFLAGS from given mask
580 */ 610 */
581static fastcall inline void native_set_iopl_mask(unsigned mask) 611static inline void native_set_iopl_mask(unsigned mask)
582{ 612{
583 unsigned int reg; 613 unsigned int reg;
584 __asm__ __volatile__ ("pushfl;" 614 __asm__ __volatile__ ("pushfl;"
@@ -591,6 +621,28 @@ static fastcall inline void native_set_iopl_mask(unsigned mask)
591 : "i" (~X86_EFLAGS_IOPL), "r" (mask)); 621 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
592} 622}
593 623
624#ifdef CONFIG_PARAVIRT
625#include <asm/paravirt.h>
626#else
627#define paravirt_enabled() 0
628#define __cpuid native_cpuid
629
630static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
631{
632 native_load_esp0(tss, thread);
633}
634
635/*
636 * These special macros can be used to get or set a debugging register
637 */
638#define get_debugreg(var, register) \
639 (var) = native_get_debugreg(register)
640#define set_debugreg(value, register) \
641 native_set_debugreg(register, value)
642
643#define set_iopl_mask native_set_iopl_mask
644#endif /* CONFIG_PARAVIRT */
645
594/* 646/*
595 * Generic CPUID function 647 * Generic CPUID function
596 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx 648 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index a6d20d9a1a3..c3a58c08c49 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -88,65 +88,96 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
88#define savesegment(seg, value) \ 88#define savesegment(seg, value) \
89 asm volatile("mov %%" #seg ",%0":"=rm" (value)) 89 asm volatile("mov %%" #seg ",%0":"=rm" (value))
90 90
91
92static inline void native_clts(void)
93{
94 asm volatile ("clts");
95}
96
97static inline unsigned long native_read_cr0(void)
98{
99 unsigned long val;
100 asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
101 return val;
102}
103
104static inline void native_write_cr0(unsigned long val)
105{
106 asm volatile("movl %0,%%cr0": :"r" (val));
107}
108
109static inline unsigned long native_read_cr2(void)
110{
111 unsigned long val;
112 asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
113 return val;
114}
115
116static inline void native_write_cr2(unsigned long val)
117{
118 asm volatile("movl %0,%%cr2": :"r" (val));
119}
120
121static inline unsigned long native_read_cr3(void)
122{
123 unsigned long val;
124 asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
125 return val;
126}
127
128static inline void native_write_cr3(unsigned long val)
129{
130 asm volatile("movl %0,%%cr3": :"r" (val));
131}
132
133static inline unsigned long native_read_cr4(void)
134{
135 unsigned long val;
136 asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
137 return val;
138}
139
140static inline unsigned long native_read_cr4_safe(void)
141{
142 unsigned long val;
143 /* This could fault if %cr4 does not exist */
144 asm("1: movl %%cr4, %0 \n"
145 "2: \n"
146 ".section __ex_table,\"a\" \n"
147 ".long 1b,2b \n"
148 ".previous \n"
149 : "=r" (val): "0" (0));
150 return val;
151}
152
153static inline void native_write_cr4(unsigned long val)
154{
155 asm volatile("movl %0,%%cr4": :"r" (val));
156}
157
158static inline void native_wbinvd(void)
159{
160 asm volatile("wbinvd": : :"memory");
161}
162
163
91#ifdef CONFIG_PARAVIRT 164#ifdef CONFIG_PARAVIRT
92#include <asm/paravirt.h> 165#include <asm/paravirt.h>
93#else 166#else
94#define read_cr0() ({ \ 167#define read_cr0() (native_read_cr0())
95 unsigned int __dummy; \ 168#define write_cr0(x) (native_write_cr0(x))
96 __asm__ __volatile__( \ 169#define read_cr2() (native_read_cr2())
97 "movl %%cr0,%0\n\t" \ 170#define write_cr2(x) (native_write_cr2(x))
98 :"=r" (__dummy)); \ 171#define read_cr3() (native_read_cr3())
99 __dummy; \ 172#define write_cr3(x) (native_write_cr3(x))
100}) 173#define read_cr4() (native_read_cr4())
101#define write_cr0(x) \ 174#define read_cr4_safe() (native_read_cr4_safe())
102 __asm__ __volatile__("movl %0,%%cr0": :"r" (x)) 175#define write_cr4(x) (native_write_cr4(x))
103 176#define wbinvd() (native_wbinvd())
104#define read_cr2() ({ \
105 unsigned int __dummy; \
106 __asm__ __volatile__( \
107 "movl %%cr2,%0\n\t" \
108 :"=r" (__dummy)); \
109 __dummy; \
110})
111#define write_cr2(x) \
112 __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
113
114#define read_cr3() ({ \
115 unsigned int __dummy; \
116 __asm__ ( \
117 "movl %%cr3,%0\n\t" \
118 :"=r" (__dummy)); \
119 __dummy; \
120})
121#define write_cr3(x) \
122 __asm__ __volatile__("movl %0,%%cr3": :"r" (x))
123
124#define read_cr4() ({ \
125 unsigned int __dummy; \
126 __asm__( \
127 "movl %%cr4,%0\n\t" \
128 :"=r" (__dummy)); \
129 __dummy; \
130})
131#define read_cr4_safe() ({ \
132 unsigned int __dummy; \
133 /* This could fault if %cr4 does not exist */ \
134 __asm__("1: movl %%cr4, %0 \n" \
135 "2: \n" \
136 ".section __ex_table,\"a\" \n" \
137 ".long 1b,2b \n" \
138 ".previous \n" \
139 : "=r" (__dummy): "0" (0)); \
140 __dummy; \
141})
142#define write_cr4(x) \
143 __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
144
145#define wbinvd() \
146 __asm__ __volatile__ ("wbinvd": : :"memory")
147 177
148/* Clear the 'TS' bit */ 178/* Clear the 'TS' bit */
149#define clts() __asm__ __volatile__ ("clts") 179#define clts() (native_clts())
180
150#endif/* CONFIG_PARAVIRT */ 181#endif/* CONFIG_PARAVIRT */
151 182
152/* Set the 'TS' bit */ 183/* Set the 'TS' bit */