aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/paravirt.c293
-rw-r--r--include/asm-i386/desc.h82
-rw-r--r--include/asm-i386/io.h15
-rw-r--r--include/asm-i386/irqflags.h61
-rw-r--r--include/asm-i386/msr.h163
-rw-r--r--include/asm-i386/paravirt.h17
-rw-r--r--include/asm-i386/processor.h94
-rw-r--r--include/asm-i386/system.h139
8 files changed, 389 insertions, 475 deletions
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
index 2ec331e03fa9..47698756aec5 100644
--- a/arch/i386/kernel/paravirt.c
+++ b/arch/i386/kernel/paravirt.c
@@ -93,294 +93,11 @@ static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len)
93 return insn_len; 93 return insn_len;
94} 94}
95 95
96static unsigned long native_get_debugreg(int regno)
97{
98 unsigned long val = 0; /* Damn you, gcc! */
99
100 switch (regno) {
101 case 0:
102 asm("movl %%db0, %0" :"=r" (val)); break;
103 case 1:
104 asm("movl %%db1, %0" :"=r" (val)); break;
105 case 2:
106 asm("movl %%db2, %0" :"=r" (val)); break;
107 case 3:
108 asm("movl %%db3, %0" :"=r" (val)); break;
109 case 6:
110 asm("movl %%db6, %0" :"=r" (val)); break;
111 case 7:
112 asm("movl %%db7, %0" :"=r" (val)); break;
113 default:
114 BUG();
115 }
116 return val;
117}
118
119static void native_set_debugreg(int regno, unsigned long value)
120{
121 switch (regno) {
122 case 0:
123 asm("movl %0,%%db0" : /* no output */ :"r" (value));
124 break;
125 case 1:
126 asm("movl %0,%%db1" : /* no output */ :"r" (value));
127 break;
128 case 2:
129 asm("movl %0,%%db2" : /* no output */ :"r" (value));
130 break;
131 case 3:
132 asm("movl %0,%%db3" : /* no output */ :"r" (value));
133 break;
134 case 6:
135 asm("movl %0,%%db6" : /* no output */ :"r" (value));
136 break;
137 case 7:
138 asm("movl %0,%%db7" : /* no output */ :"r" (value));
139 break;
140 default:
141 BUG();
142 }
143}
144
145void init_IRQ(void) 96void init_IRQ(void)
146{ 97{
147 paravirt_ops.init_IRQ(); 98 paravirt_ops.init_IRQ();
148} 99}
149 100
150static void native_clts(void)
151{
152 asm volatile ("clts");
153}
154
155static unsigned long native_read_cr0(void)
156{
157 unsigned long val;
158 asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
159 return val;
160}
161
162static void native_write_cr0(unsigned long val)
163{
164 asm volatile("movl %0,%%cr0": :"r" (val));
165}
166
167static unsigned long native_read_cr2(void)
168{
169 unsigned long val;
170 asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
171 return val;
172}
173
174static void native_write_cr2(unsigned long val)
175{
176 asm volatile("movl %0,%%cr2": :"r" (val));
177}
178
179static unsigned long native_read_cr3(void)
180{
181 unsigned long val;
182 asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
183 return val;
184}
185
186static void native_write_cr3(unsigned long val)
187{
188 asm volatile("movl %0,%%cr3": :"r" (val));
189}
190
191static unsigned long native_read_cr4(void)
192{
193 unsigned long val;
194 asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
195 return val;
196}
197
198static unsigned long native_read_cr4_safe(void)
199{
200 unsigned long val;
201 /* This could fault if %cr4 does not exist */
202 asm("1: movl %%cr4, %0 \n"
203 "2: \n"
204 ".section __ex_table,\"a\" \n"
205 ".long 1b,2b \n"
206 ".previous \n"
207 : "=r" (val): "0" (0));
208 return val;
209}
210
211static void native_write_cr4(unsigned long val)
212{
213 asm volatile("movl %0,%%cr4": :"r" (val));
214}
215
216static unsigned long native_save_fl(void)
217{
218 unsigned long f;
219 asm volatile("pushfl ; popl %0":"=g" (f): /* no input */);
220 return f;
221}
222
223static void native_restore_fl(unsigned long f)
224{
225 asm volatile("pushl %0 ; popfl": /* no output */
226 :"g" (f)
227 :"memory", "cc");
228}
229
230static void native_irq_disable(void)
231{
232 asm volatile("cli": : :"memory");
233}
234
235static void native_irq_enable(void)
236{
237 asm volatile("sti": : :"memory");
238}
239
240static void native_safe_halt(void)
241{
242 asm volatile("sti; hlt": : :"memory");
243}
244
245static void native_halt(void)
246{
247 asm volatile("hlt": : :"memory");
248}
249
250static void native_wbinvd(void)
251{
252 asm volatile("wbinvd": : :"memory");
253}
254
255static unsigned long long native_read_msr(unsigned int msr, int *err)
256{
257 unsigned long long val;
258
259 asm volatile("2: rdmsr ; xorl %0,%0\n"
260 "1:\n\t"
261 ".section .fixup,\"ax\"\n\t"
262 "3: movl %3,%0 ; jmp 1b\n\t"
263 ".previous\n\t"
264 ".section __ex_table,\"a\"\n"
265 " .align 4\n\t"
266 " .long 2b,3b\n\t"
267 ".previous"
268 : "=r" (*err), "=A" (val)
269 : "c" (msr), "i" (-EFAULT));
270
271 return val;
272}
273
274static int native_write_msr(unsigned int msr, unsigned long long val)
275{
276 int err;
277 asm volatile("2: wrmsr ; xorl %0,%0\n"
278 "1:\n\t"
279 ".section .fixup,\"ax\"\n\t"
280 "3: movl %4,%0 ; jmp 1b\n\t"
281 ".previous\n\t"
282 ".section __ex_table,\"a\"\n"
283 " .align 4\n\t"
284 " .long 2b,3b\n\t"
285 ".previous"
286 : "=a" (err)
287 : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
288 "i" (-EFAULT));
289 return err;
290}
291
292static unsigned long long native_read_tsc(void)
293{
294 unsigned long long val;
295 asm volatile("rdtsc" : "=A" (val));
296 return val;
297}
298
299static unsigned long long native_read_pmc(void)
300{
301 unsigned long long val;
302 asm volatile("rdpmc" : "=A" (val));
303 return val;
304}
305
306static void native_load_tr_desc(void)
307{
308 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
309}
310
311static void native_load_gdt(const struct Xgt_desc_struct *dtr)
312{
313 asm volatile("lgdt %0"::"m" (*dtr));
314}
315
316static void native_load_idt(const struct Xgt_desc_struct *dtr)
317{
318 asm volatile("lidt %0"::"m" (*dtr));
319}
320
321static void native_store_gdt(struct Xgt_desc_struct *dtr)
322{
323 asm ("sgdt %0":"=m" (*dtr));
324}
325
326static void native_store_idt(struct Xgt_desc_struct *dtr)
327{
328 asm ("sidt %0":"=m" (*dtr));
329}
330
331static unsigned long native_store_tr(void)
332{
333 unsigned long tr;
334 asm ("str %0":"=r" (tr));
335 return tr;
336}
337
338static void native_load_tls(struct thread_struct *t, unsigned int cpu)
339{
340#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
341 C(0); C(1); C(2);
342#undef C
343}
344
345static inline void native_write_dt_entry(void *dt, int entry, u32 entry_low, u32 entry_high)
346{
347 u32 *lp = (u32 *)((char *)dt + entry*8);
348 lp[0] = entry_low;
349 lp[1] = entry_high;
350}
351
352static void native_write_ldt_entry(void *dt, int entrynum, u32 low, u32 high)
353{
354 native_write_dt_entry(dt, entrynum, low, high);
355}
356
357static void native_write_gdt_entry(void *dt, int entrynum, u32 low, u32 high)
358{
359 native_write_dt_entry(dt, entrynum, low, high);
360}
361
362static void native_write_idt_entry(void *dt, int entrynum, u32 low, u32 high)
363{
364 native_write_dt_entry(dt, entrynum, low, high);
365}
366
367static void native_load_esp0(struct tss_struct *tss,
368 struct thread_struct *thread)
369{
370 tss->esp0 = thread->esp0;
371
372 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
373 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
374 tss->ss1 = thread->sysenter_cs;
375 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
376 }
377}
378
379static void native_io_delay(void)
380{
381 asm volatile("outb %al,$0x80");
382}
383
384static void native_flush_tlb(void) 101static void native_flush_tlb(void)
385{ 102{
386 __native_flush_tlb(); 103 __native_flush_tlb();
@@ -517,8 +234,8 @@ struct paravirt_ops paravirt_ops = {
517 .safe_halt = native_safe_halt, 234 .safe_halt = native_safe_halt,
518 .halt = native_halt, 235 .halt = native_halt,
519 .wbinvd = native_wbinvd, 236 .wbinvd = native_wbinvd,
520 .read_msr = native_read_msr, 237 .read_msr = native_read_msr_safe,
521 .write_msr = native_write_msr, 238 .write_msr = native_write_msr_safe,
522 .read_tsc = native_read_tsc, 239 .read_tsc = native_read_tsc,
523 .read_pmc = native_read_pmc, 240 .read_pmc = native_read_pmc,
524 .get_scheduled_cycles = native_read_tsc, 241 .get_scheduled_cycles = native_read_tsc,
@@ -531,9 +248,9 @@ struct paravirt_ops paravirt_ops = {
531 .store_idt = native_store_idt, 248 .store_idt = native_store_idt,
532 .store_tr = native_store_tr, 249 .store_tr = native_store_tr,
533 .load_tls = native_load_tls, 250 .load_tls = native_load_tls,
534 .write_ldt_entry = native_write_ldt_entry, 251 .write_ldt_entry = write_dt_entry,
535 .write_gdt_entry = native_write_gdt_entry, 252 .write_gdt_entry = write_dt_entry,
536 .write_idt_entry = native_write_idt_entry, 253 .write_idt_entry = write_dt_entry,
537 .load_esp0 = native_load_esp0, 254 .load_esp0 = native_load_esp0,
538 255
539 .set_iopl_mask = native_set_iopl_mask, 256 .set_iopl_mask = native_set_iopl_mask,
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index a75ae6b97860..13f701ea9a8f 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -57,45 +57,33 @@ static inline void pack_gate(__u32 *a, __u32 *b,
57#ifdef CONFIG_PARAVIRT 57#ifdef CONFIG_PARAVIRT
58#include <asm/paravirt.h> 58#include <asm/paravirt.h>
59#else 59#else
60#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) 60#define load_TR_desc() native_load_tr_desc()
61 61#define load_gdt(dtr) native_load_gdt(dtr)
62#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) 62#define load_idt(dtr) native_load_idt(dtr)
63#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
64#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) 63#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
65#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) 64#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
66 65
67#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) 66#define store_gdt(dtr) native_store_gdt(dtr)
68#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) 67#define store_idt(dtr) native_store_idt(dtr)
69#define store_tr(tr) __asm__ ("str %0":"=m" (tr)) 68#define store_tr(tr) (tr = native_store_tr())
70#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) 69#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
71 70
72#if TLS_SIZE != 24 71#define load_TLS(t, cpu) native_load_tls(t, cpu)
73# error update this code. 72#define set_ldt native_set_ldt
74#endif
75
76static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
77{
78#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
79 C(0); C(1); C(2);
80#undef C
81}
82 73
83#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 74#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
84#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 75#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
85#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 76#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
77#endif
86 78
87static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b) 79static inline void write_dt_entry(struct desc_struct *dt,
80 int entry, u32 entry_low, u32 entry_high)
88{ 81{
89 __u32 *lp = (__u32 *)((char *)dt + entry*8); 82 dt[entry].a = entry_low;
90 *lp = entry_a; 83 dt[entry].b = entry_high;
91 *(lp+1) = entry_b;
92} 84}
93 85
94#define set_ldt native_set_ldt 86static inline void native_set_ldt(const void *addr, unsigned int entries)
95#endif /* CONFIG_PARAVIRT */
96
97static inline fastcall void native_set_ldt(const void *addr,
98 unsigned int entries)
99{ 87{
100 if (likely(entries == 0)) 88 if (likely(entries == 0))
101 __asm__ __volatile__("lldt %w0"::"q" (0)); 89 __asm__ __volatile__("lldt %w0"::"q" (0));
@@ -111,6 +99,48 @@ static inline fastcall void native_set_ldt(const void *addr,
111 } 99 }
112} 100}
113 101
102
103static inline void native_load_tr_desc(void)
104{
105 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
106}
107
108static inline void native_load_gdt(const struct Xgt_desc_struct *dtr)
109{
110 asm volatile("lgdt %0"::"m" (*dtr));
111}
112
113static inline void native_load_idt(const struct Xgt_desc_struct *dtr)
114{
115 asm volatile("lidt %0"::"m" (*dtr));
116}
117
118static inline void native_store_gdt(struct Xgt_desc_struct *dtr)
119{
120 asm ("sgdt %0":"=m" (*dtr));
121}
122
123static inline void native_store_idt(struct Xgt_desc_struct *dtr)
124{
125 asm ("sidt %0":"=m" (*dtr));
126}
127
128static inline unsigned long native_store_tr(void)
129{
130 unsigned long tr;
131 asm ("str %0":"=r" (tr));
132 return tr;
133}
134
135static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
136{
137 unsigned int i;
138 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
139
140 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
141 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
142}
143
114static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) 144static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
115{ 145{
116 __u32 a, b; 146 __u32 a, b;
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 59fe616933c4..e797586a5bfc 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -250,19 +250,22 @@ static inline void flush_write_buffers(void)
250 250
251#endif /* __KERNEL__ */ 251#endif /* __KERNEL__ */
252 252
253static inline void native_io_delay(void)
254{
255 asm volatile("outb %%al,$0x80" : : : "memory");
256}
257
253#if defined(CONFIG_PARAVIRT) 258#if defined(CONFIG_PARAVIRT)
254#include <asm/paravirt.h> 259#include <asm/paravirt.h>
255#else 260#else
256 261
257#define __SLOW_DOWN_IO "outb %%al,$0x80;"
258
259static inline void slow_down_io(void) { 262static inline void slow_down_io(void) {
260 __asm__ __volatile__( 263 native_io_delay();
261 __SLOW_DOWN_IO
262#ifdef REALLY_SLOW_IO 264#ifdef REALLY_SLOW_IO
263 __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO 265 native_io_delay();
266 native_io_delay();
267 native_io_delay();
264#endif 268#endif
265 : : );
266} 269}
267 270
268#endif 271#endif
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h
index 17b18cf4fe9d..c1cdd094938e 100644
--- a/include/asm-i386/irqflags.h
+++ b/include/asm-i386/irqflags.h
@@ -10,6 +10,42 @@
10#ifndef _ASM_IRQFLAGS_H 10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H 11#define _ASM_IRQFLAGS_H
12 12
13#ifndef __ASSEMBLY__
14static inline unsigned long native_save_fl(void)
15{
16 unsigned long f;
17 asm volatile("pushfl ; popl %0":"=g" (f): /* no input */);
18 return f;
19}
20
21static inline void native_restore_fl(unsigned long f)
22{
23 asm volatile("pushl %0 ; popfl": /* no output */
24 :"g" (f)
25 :"memory", "cc");
26}
27
28static inline void native_irq_disable(void)
29{
30 asm volatile("cli": : :"memory");
31}
32
33static inline void native_irq_enable(void)
34{
35 asm volatile("sti": : :"memory");
36}
37
38static inline void native_safe_halt(void)
39{
40 asm volatile("sti; hlt": : :"memory");
41}
42
43static inline void native_halt(void)
44{
45 asm volatile("hlt": : :"memory");
46}
47#endif /* __ASSEMBLY__ */
48
13#ifdef CONFIG_PARAVIRT 49#ifdef CONFIG_PARAVIRT
14#include <asm/paravirt.h> 50#include <asm/paravirt.h>
15#else 51#else
@@ -17,35 +53,22 @@
17 53
18static inline unsigned long __raw_local_save_flags(void) 54static inline unsigned long __raw_local_save_flags(void)
19{ 55{
20 unsigned long flags; 56 return native_save_fl();
21
22 __asm__ __volatile__(
23 "pushfl ; popl %0"
24 : "=g" (flags)
25 : /* no input */
26 );
27
28 return flags;
29} 57}
30 58
31static inline void raw_local_irq_restore(unsigned long flags) 59static inline void raw_local_irq_restore(unsigned long flags)
32{ 60{
33 __asm__ __volatile__( 61 native_restore_fl(flags);
34 "pushl %0 ; popfl"
35 : /* no output */
36 :"g" (flags)
37 :"memory", "cc"
38 );
39} 62}
40 63
41static inline void raw_local_irq_disable(void) 64static inline void raw_local_irq_disable(void)
42{ 65{
43 __asm__ __volatile__("cli" : : : "memory"); 66 native_irq_disable();
44} 67}
45 68
46static inline void raw_local_irq_enable(void) 69static inline void raw_local_irq_enable(void)
47{ 70{
48 __asm__ __volatile__("sti" : : : "memory"); 71 native_irq_enable();
49} 72}
50 73
51/* 74/*
@@ -54,7 +77,7 @@ static inline void raw_local_irq_enable(void)
54 */ 77 */
55static inline void raw_safe_halt(void) 78static inline void raw_safe_halt(void)
56{ 79{
57 __asm__ __volatile__("sti; hlt" : : : "memory"); 80 native_safe_halt();
58} 81}
59 82
60/* 83/*
@@ -63,7 +86,7 @@ static inline void raw_safe_halt(void)
63 */ 86 */
64static inline void halt(void) 87static inline void halt(void)
65{ 88{
66 __asm__ __volatile__("hlt": : :"memory"); 89 native_halt();
67} 90}
68 91
69/* 92/*
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 2ad3f30b1a68..00acaa8b36bb 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -1,6 +1,74 @@
1#ifndef __ASM_MSR_H 1#ifndef __ASM_MSR_H
2#define __ASM_MSR_H 2#define __ASM_MSR_H
3 3
4#include <asm/errno.h>
5
6static inline unsigned long long native_read_msr(unsigned int msr)
7{
8 unsigned long long val;
9
10 asm volatile("rdmsr" : "=A" (val) : "c" (msr));
11 return val;
12}
13
14static inline unsigned long long native_read_msr_safe(unsigned int msr,
15 int *err)
16{
17 unsigned long long val;
18
19 asm volatile("2: rdmsr ; xorl %0,%0\n"
20 "1:\n\t"
21 ".section .fixup,\"ax\"\n\t"
22 "3: movl %3,%0 ; jmp 1b\n\t"
23 ".previous\n\t"
24 ".section __ex_table,\"a\"\n"
25 " .align 4\n\t"
26 " .long 2b,3b\n\t"
27 ".previous"
28 : "=r" (*err), "=A" (val)
29 : "c" (msr), "i" (-EFAULT));
30
31 return val;
32}
33
34static inline void native_write_msr(unsigned int msr, unsigned long long val)
35{
36 asm volatile("wrmsr" : : "c" (msr), "A"(val));
37}
38
39static inline int native_write_msr_safe(unsigned int msr,
40 unsigned long long val)
41{
42 int err;
43 asm volatile("2: wrmsr ; xorl %0,%0\n"
44 "1:\n\t"
45 ".section .fixup,\"ax\"\n\t"
46 "3: movl %4,%0 ; jmp 1b\n\t"
47 ".previous\n\t"
48 ".section __ex_table,\"a\"\n"
49 " .align 4\n\t"
50 " .long 2b,3b\n\t"
51 ".previous"
52 : "=a" (err)
53 : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
54 "i" (-EFAULT));
55 return err;
56}
57
58static inline unsigned long long native_read_tsc(void)
59{
60 unsigned long long val;
61 asm volatile("rdtsc" : "=A" (val));
62 return val;
63}
64
65static inline unsigned long long native_read_pmc(void)
66{
67 unsigned long long val;
68 asm volatile("rdpmc" : "=A" (val));
69 return val;
70}
71
4#ifdef CONFIG_PARAVIRT 72#ifdef CONFIG_PARAVIRT
5#include <asm/paravirt.h> 73#include <asm/paravirt.h>
6#else 74#else
@@ -11,22 +79,20 @@
11 * pointer indirection), this allows gcc to optimize better 79 * pointer indirection), this allows gcc to optimize better
12 */ 80 */
13 81
14#define rdmsr(msr,val1,val2) \ 82#define rdmsr(msr,val1,val2) \
15 __asm__ __volatile__("rdmsr" \ 83 do { \
16 : "=a" (val1), "=d" (val2) \ 84 unsigned long long __val = native_read_msr(msr); \
17 : "c" (msr)) 85 val1 = __val; \
86 val2 = __val >> 32; \
87 } while(0)
18 88
19#define wrmsr(msr,val1,val2) \ 89#define wrmsr(msr,val1,val2) \
20 __asm__ __volatile__("wrmsr" \ 90 native_write_msr(msr, ((unsigned long long)val2 << 32) | val1)
21 : /* no outputs */ \
22 : "c" (msr), "a" (val1), "d" (val2))
23 91
24#define rdmsrl(msr,val) do { \ 92#define rdmsrl(msr,val) \
25 unsigned long l__,h__; \ 93 do { \
26 rdmsr (msr, l__, h__); \ 94 (val) = native_read_msr(msr); \
27 val = l__; \ 95 } while(0)
28 val |= ((u64)h__<<32); \
29} while(0)
30 96
31static inline void wrmsrl (unsigned long msr, unsigned long long val) 97static inline void wrmsrl (unsigned long msr, unsigned long long val)
32{ 98{
@@ -37,50 +103,41 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
37} 103}
38 104
39/* wrmsr with exception handling */ 105/* wrmsr with exception handling */
40#define wrmsr_safe(msr,a,b) ({ int ret__; \ 106#define wrmsr_safe(msr,val1,val2) \
41 asm volatile("2: wrmsr ; xorl %0,%0\n" \ 107 (native_write_msr_safe(msr, ((unsigned long long)val2 << 32) | val1))
42 "1:\n\t" \
43 ".section .fixup,\"ax\"\n\t" \
44 "3: movl %4,%0 ; jmp 1b\n\t" \
45 ".previous\n\t" \
46 ".section __ex_table,\"a\"\n" \
47 " .align 4\n\t" \
48 " .long 2b,3b\n\t" \
49 ".previous" \
50 : "=a" (ret__) \
51 : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\
52 ret__; })
53 108
54/* rdmsr with exception handling */ 109/* rdmsr with exception handling */
55#define rdmsr_safe(msr,a,b) ({ int ret__; \ 110#define rdmsr_safe(msr,p1,p2) \
56 asm volatile("2: rdmsr ; xorl %0,%0\n" \ 111 ({ \
57 "1:\n\t" \ 112 int __err; \
58 ".section .fixup,\"ax\"\n\t" \ 113 unsigned long long __val = native_read_msr_safe(msr, &__err);\
59 "3: movl %4,%0 ; jmp 1b\n\t" \ 114 (*p1) = __val; \
60 ".previous\n\t" \ 115 (*p2) = __val >> 32; \
61 ".section __ex_table,\"a\"\n" \ 116 __err; \
62 " .align 4\n\t" \ 117 })
63 " .long 2b,3b\n\t" \ 118
64 ".previous" \ 119#define rdtsc(low,high) \
65 : "=r" (ret__), "=a" (*(a)), "=d" (*(b)) \ 120 do { \
66 : "c" (msr), "i" (-EFAULT));\ 121 u64 _l = native_read_tsc(); \
67 ret__; }) 122 (low) = (u32)_l; \
68 123 (high) = _l >> 32; \
69#define rdtsc(low,high) \ 124 } while(0)
70 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) 125
71 126#define rdtscl(low) \
72#define rdtscl(low) \ 127 do { \
73 __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx") 128 (low) = native_read_tsc(); \
74 129 } while(0)
75#define rdtscll(val) \ 130
76 __asm__ __volatile__("rdtsc" : "=A" (val)) 131#define rdtscll(val) ((val) = native_read_tsc())
77 132
78#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 133#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
79 134
80#define rdpmc(counter,low,high) \ 135#define rdpmc(counter,low,high) \
81 __asm__ __volatile__("rdpmc" \ 136 do { \
82 : "=a" (low), "=d" (high) \ 137 u64 _l = native_read_pmc(); \
83 : "c" (counter)) 138 low = (u32)_l; \
139 high = _l >> 32; \
140 } while(0)
84#endif /* !CONFIG_PARAVIRT */ 141#endif /* !CONFIG_PARAVIRT */
85 142
86#ifdef CONFIG_SMP 143#ifdef CONFIG_SMP
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index e63f1e444fcf..32acebce9ae2 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -29,6 +29,7 @@ struct thread_struct;
29struct Xgt_desc_struct; 29struct Xgt_desc_struct;
30struct tss_struct; 30struct tss_struct;
31struct mm_struct; 31struct mm_struct;
32struct desc_struct;
32struct paravirt_ops 33struct paravirt_ops
33{ 34{
34 unsigned int kernel_rpl; 35 unsigned int kernel_rpl;
@@ -105,14 +106,13 @@ struct paravirt_ops
105 void (*set_ldt)(const void *desc, unsigned entries); 106 void (*set_ldt)(const void *desc, unsigned entries);
106 unsigned long (*store_tr)(void); 107 unsigned long (*store_tr)(void);
107 void (*load_tls)(struct thread_struct *t, unsigned int cpu); 108 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
108 void (*write_ldt_entry)(void *dt, int entrynum, 109 void (*write_ldt_entry)(struct desc_struct *,
109 u32 low, u32 high); 110 int entrynum, u32 low, u32 high);
110 void (*write_gdt_entry)(void *dt, int entrynum, 111 void (*write_gdt_entry)(struct desc_struct *,
111 u32 low, u32 high); 112 int entrynum, u32 low, u32 high);
112 void (*write_idt_entry)(void *dt, int entrynum, 113 void (*write_idt_entry)(struct desc_struct *,
113 u32 low, u32 high); 114 int entrynum, u32 low, u32 high);
114 void (*load_esp0)(struct tss_struct *tss, 115 void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t);
115 struct thread_struct *thread);
116 116
117 void (*set_iopl_mask)(unsigned mask); 117 void (*set_iopl_mask)(unsigned mask);
118 118
@@ -232,6 +232,7 @@ static inline void halt(void)
232 232
233#define get_kernel_rpl() (paravirt_ops.kernel_rpl) 233#define get_kernel_rpl() (paravirt_ops.kernel_rpl)
234 234
235/* These should all do BUG_ON(_err), but our headers are too tangled. */
235#define rdmsr(msr,val1,val2) do { \ 236#define rdmsr(msr,val1,val2) do { \
236 int _err; \ 237 int _err; \
237 u64 _l = paravirt_ops.read_msr(msr,&_err); \ 238 u64 _l = paravirt_ops.read_msr(msr,&_err); \
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 80f7e8a1e878..96edfdfe32d1 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -147,7 +147,7 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {}
147#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ 147#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
148#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ 148#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
149 149
150static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx, 150static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
151 unsigned int *ecx, unsigned int *edx) 151 unsigned int *ecx, unsigned int *edx)
152{ 152{
153 /* ecx is often an input as well as an output. */ 153 /* ecx is often an input as well as an output. */
@@ -545,13 +545,7 @@ static inline void rep_nop(void)
545 545
546#define cpu_relax() rep_nop() 546#define cpu_relax() rep_nop()
547 547
548#ifdef CONFIG_PARAVIRT 548static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread)
549#include <asm/paravirt.h>
550#else
551#define paravirt_enabled() 0
552#define __cpuid native_cpuid
553
554static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
555{ 549{
556 tss->esp0 = thread->esp0; 550 tss->esp0 = thread->esp0;
557 /* This can only happen when SEP is enabled, no need to test "SEP"arately */ 551 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
@@ -561,24 +555,60 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa
561 } 555 }
562} 556}
563 557
564/*
565 * These special macros can be used to get or set a debugging register
566 */
567#define get_debugreg(var, register) \
568 __asm__("movl %%db" #register ", %0" \
569 :"=r" (var))
570#define set_debugreg(value, register) \
571 __asm__("movl %0,%%db" #register \
572 : /* no output */ \
573 :"r" (value))
574 558
575#define set_iopl_mask native_set_iopl_mask 559static inline unsigned long native_get_debugreg(int regno)
576#endif /* CONFIG_PARAVIRT */ 560{
561 unsigned long val = 0; /* Damn you, gcc! */
562
563 switch (regno) {
564 case 0:
565 asm("movl %%db0, %0" :"=r" (val)); break;
566 case 1:
567 asm("movl %%db1, %0" :"=r" (val)); break;
568 case 2:
569 asm("movl %%db2, %0" :"=r" (val)); break;
570 case 3:
571 asm("movl %%db3, %0" :"=r" (val)); break;
572 case 6:
573 asm("movl %%db6, %0" :"=r" (val)); break;
574 case 7:
575 asm("movl %%db7, %0" :"=r" (val)); break;
576 default:
577 BUG();
578 }
579 return val;
580}
581
582static inline void native_set_debugreg(int regno, unsigned long value)
583{
584 switch (regno) {
585 case 0:
586 asm("movl %0,%%db0" : /* no output */ :"r" (value));
587 break;
588 case 1:
589 asm("movl %0,%%db1" : /* no output */ :"r" (value));
590 break;
591 case 2:
592 asm("movl %0,%%db2" : /* no output */ :"r" (value));
593 break;
594 case 3:
595 asm("movl %0,%%db3" : /* no output */ :"r" (value));
596 break;
597 case 6:
598 asm("movl %0,%%db6" : /* no output */ :"r" (value));
599 break;
600 case 7:
601 asm("movl %0,%%db7" : /* no output */ :"r" (value));
602 break;
603 default:
604 BUG();
605 }
606}
577 607
578/* 608/*
579 * Set IOPL bits in EFLAGS from given mask 609 * Set IOPL bits in EFLAGS from given mask
580 */ 610 */
581static fastcall inline void native_set_iopl_mask(unsigned mask) 611static inline void native_set_iopl_mask(unsigned mask)
582{ 612{
583 unsigned int reg; 613 unsigned int reg;
584 __asm__ __volatile__ ("pushfl;" 614 __asm__ __volatile__ ("pushfl;"
@@ -591,6 +621,28 @@ static fastcall inline void native_set_iopl_mask(unsigned mask)
591 : "i" (~X86_EFLAGS_IOPL), "r" (mask)); 621 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
592} 622}
593 623
624#ifdef CONFIG_PARAVIRT
625#include <asm/paravirt.h>
626#else
627#define paravirt_enabled() 0
628#define __cpuid native_cpuid
629
630static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
631{
632 native_load_esp0(tss, thread);
633}
634
635/*
636 * These special macros can be used to get or set a debugging register
637 */
638#define get_debugreg(var, register) \
639 (var) = native_get_debugreg(register)
640#define set_debugreg(value, register) \
641 native_set_debugreg(register, value)
642
643#define set_iopl_mask native_set_iopl_mask
644#endif /* CONFIG_PARAVIRT */
645
594/* 646/*
595 * Generic CPUID function 647 * Generic CPUID function
596 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx 648 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index a6d20d9a1a30..c3a58c08c495 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -88,65 +88,96 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
88#define savesegment(seg, value) \ 88#define savesegment(seg, value) \
89 asm volatile("mov %%" #seg ",%0":"=rm" (value)) 89 asm volatile("mov %%" #seg ",%0":"=rm" (value))
90 90
91
92static inline void native_clts(void)
93{
94 asm volatile ("clts");
95}
96
97static inline unsigned long native_read_cr0(void)
98{
99 unsigned long val;
100 asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
101 return val;
102}
103
104static inline void native_write_cr0(unsigned long val)
105{
106 asm volatile("movl %0,%%cr0": :"r" (val));
107}
108
109static inline unsigned long native_read_cr2(void)
110{
111 unsigned long val;
112 asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
113 return val;
114}
115
116static inline void native_write_cr2(unsigned long val)
117{
118 asm volatile("movl %0,%%cr2": :"r" (val));
119}
120
121static inline unsigned long native_read_cr3(void)
122{
123 unsigned long val;
124 asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
125 return val;
126}
127
128static inline void native_write_cr3(unsigned long val)
129{
130 asm volatile("movl %0,%%cr3": :"r" (val));
131}
132
133static inline unsigned long native_read_cr4(void)
134{
135 unsigned long val;
136 asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
137 return val;
138}
139
140static inline unsigned long native_read_cr4_safe(void)
141{
142 unsigned long val;
143 /* This could fault if %cr4 does not exist */
144 asm("1: movl %%cr4, %0 \n"
145 "2: \n"
146 ".section __ex_table,\"a\" \n"
147 ".long 1b,2b \n"
148 ".previous \n"
149 : "=r" (val): "0" (0));
150 return val;
151}
152
153static inline void native_write_cr4(unsigned long val)
154{
155 asm volatile("movl %0,%%cr4": :"r" (val));
156}
157
158static inline void native_wbinvd(void)
159{
160 asm volatile("wbinvd": : :"memory");
161}
162
163
91#ifdef CONFIG_PARAVIRT 164#ifdef CONFIG_PARAVIRT
92#include <asm/paravirt.h> 165#include <asm/paravirt.h>
93#else 166#else
94#define read_cr0() ({ \ 167#define read_cr0() (native_read_cr0())
95 unsigned int __dummy; \ 168#define write_cr0(x) (native_write_cr0(x))
96 __asm__ __volatile__( \ 169#define read_cr2() (native_read_cr2())
97 "movl %%cr0,%0\n\t" \ 170#define write_cr2(x) (native_write_cr2(x))
98 :"=r" (__dummy)); \ 171#define read_cr3() (native_read_cr3())
99 __dummy; \ 172#define write_cr3(x) (native_write_cr3(x))
100}) 173#define read_cr4() (native_read_cr4())
101#define write_cr0(x) \ 174#define read_cr4_safe() (native_read_cr4_safe())
102 __asm__ __volatile__("movl %0,%%cr0": :"r" (x)) 175#define write_cr4(x) (native_write_cr4(x))
103 176#define wbinvd() (native_wbinvd())
104#define read_cr2() ({ \
105 unsigned int __dummy; \
106 __asm__ __volatile__( \
107 "movl %%cr2,%0\n\t" \
108 :"=r" (__dummy)); \
109 __dummy; \
110})
111#define write_cr2(x) \
112 __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
113
114#define read_cr3() ({ \
115 unsigned int __dummy; \
116 __asm__ ( \
117 "movl %%cr3,%0\n\t" \
118 :"=r" (__dummy)); \
119 __dummy; \
120})
121#define write_cr3(x) \
122 __asm__ __volatile__("movl %0,%%cr3": :"r" (x))
123
124#define read_cr4() ({ \
125 unsigned int __dummy; \
126 __asm__( \
127 "movl %%cr4,%0\n\t" \
128 :"=r" (__dummy)); \
129 __dummy; \
130})
131#define read_cr4_safe() ({ \
132 unsigned int __dummy; \
133 /* This could fault if %cr4 does not exist */ \
134 __asm__("1: movl %%cr4, %0 \n" \
135 "2: \n" \
136 ".section __ex_table,\"a\" \n" \
137 ".long 1b,2b \n" \
138 ".previous \n" \
139 : "=r" (__dummy): "0" (0)); \
140 __dummy; \
141})
142#define write_cr4(x) \
143 __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
144
145#define wbinvd() \
146 __asm__ __volatile__ ("wbinvd": : :"memory")
147 177
148/* Clear the 'TS' bit */ 178/* Clear the 'TS' bit */
149#define clts() __asm__ __volatile__ ("clts") 179#define clts() (native_clts())
180
150#endif/* CONFIG_PARAVIRT */ 181#endif/* CONFIG_PARAVIRT */
151 182
152/* Set the 'TS' bit */ 183/* Set the 'TS' bit */