aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/paravirt.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2006-12-06 20:14:07 -0500
committerAndi Kleen <andi@basil.nowhere.org>2006-12-06 20:14:07 -0500
commitd3561b7fa0fb0fc583bab0eeda32bec9e4c4056d (patch)
tree39d835965878622d052ef3b3c7b759d83b6bc327 /arch/i386/kernel/paravirt.c
parentdb91b882aabd0b3b55a87cbfb344f2798bb740b4 (diff)
[PATCH] paravirt: header and stubs for paravirtualisation
Create a paravirt.h header for all the critical operations which need to be replaced with hypervisor calls, and include that instead of defining native operations, when CONFIG_PARAVIRT. This patch does the dumbest possible replacement of paravirtualized instructions: calls through a "paravirt_ops" structure. Currently these are function implementations of native hardware: hypervisors will override the ops structure with their own variants. All the pv-ops functions are declared "fastcall" so that a specific register-based ABI is used, to make inlining assember easier. And: +From: Andy Whitcroft <apw@shadowen.org> The paravirt ops introduce a 'weak' attribute onto memory_setup(). Code ordering leads to the following warnings on x86: arch/i386/kernel/setup.c:651: warning: weak declaration of `memory_setup' after first use results in unspecified behavior Move memory_setup() to avoid this. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Zachary Amsden <zach@vmware.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Diffstat (limited to 'arch/i386/kernel/paravirt.c')
-rw-r--r--arch/i386/kernel/paravirt.c404
1 files changed, 404 insertions, 0 deletions
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
new file mode 100644
index 000000000000..478192cd4b90
--- /dev/null
+++ b/arch/i386/kernel/paravirt.c
@@ -0,0 +1,404 @@
1/* Paravirtualization interfaces
2 Copyright (C) 2006 Rusty Russell IBM Corporation
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17*/
18#include <linux/errno.h>
19#include <linux/module.h>
20#include <linux/efi.h>
21#include <linux/bcd.h>
22
23#include <asm/bug.h>
24#include <asm/paravirt.h>
25#include <asm/desc.h>
26#include <asm/setup.h>
27#include <asm/arch_hooks.h>
28#include <asm/time.h>
29#include <asm/irq.h>
30#include <asm/delay.h>
31
32/* nop stub */
33static void native_nop(void)
34{
35}
36
37static void __init default_banner(void)
38{
39 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
40 paravirt_ops.name);
41}
42
43char *memory_setup(void)
44{
45 return paravirt_ops.memory_setup();
46}
47
48static fastcall unsigned long native_get_debugreg(int regno)
49{
50 unsigned long val = 0; /* Damn you, gcc! */
51
52 switch (regno) {
53 case 0:
54 asm("movl %%db0, %0" :"=r" (val)); break;
55 case 1:
56 asm("movl %%db1, %0" :"=r" (val)); break;
57 case 2:
58 asm("movl %%db2, %0" :"=r" (val)); break;
59 case 3:
60 asm("movl %%db3, %0" :"=r" (val)); break;
61 case 6:
62 asm("movl %%db6, %0" :"=r" (val)); break;
63 case 7:
64 asm("movl %%db7, %0" :"=r" (val)); break;
65 default:
66 BUG();
67 }
68 return val;
69}
70
71static fastcall void native_set_debugreg(int regno, unsigned long value)
72{
73 switch (regno) {
74 case 0:
75 asm("movl %0,%%db0" : /* no output */ :"r" (value));
76 break;
77 case 1:
78 asm("movl %0,%%db1" : /* no output */ :"r" (value));
79 break;
80 case 2:
81 asm("movl %0,%%db2" : /* no output */ :"r" (value));
82 break;
83 case 3:
84 asm("movl %0,%%db3" : /* no output */ :"r" (value));
85 break;
86 case 6:
87 asm("movl %0,%%db6" : /* no output */ :"r" (value));
88 break;
89 case 7:
90 asm("movl %0,%%db7" : /* no output */ :"r" (value));
91 break;
92 default:
93 BUG();
94 }
95}
96
97void init_IRQ(void)
98{
99 paravirt_ops.init_IRQ();
100}
101
102static fastcall void native_clts(void)
103{
104 asm volatile ("clts");
105}
106
107static fastcall unsigned long native_read_cr0(void)
108{
109 unsigned long val;
110 asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
111 return val;
112}
113
114static fastcall void native_write_cr0(unsigned long val)
115{
116 asm volatile("movl %0,%%cr0": :"r" (val));
117}
118
119static fastcall unsigned long native_read_cr2(void)
120{
121 unsigned long val;
122 asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
123 return val;
124}
125
126static fastcall void native_write_cr2(unsigned long val)
127{
128 asm volatile("movl %0,%%cr2": :"r" (val));
129}
130
131static fastcall unsigned long native_read_cr3(void)
132{
133 unsigned long val;
134 asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
135 return val;
136}
137
138static fastcall void native_write_cr3(unsigned long val)
139{
140 asm volatile("movl %0,%%cr3": :"r" (val));
141}
142
143static fastcall unsigned long native_read_cr4(void)
144{
145 unsigned long val;
146 asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
147 return val;
148}
149
150static fastcall unsigned long native_read_cr4_safe(void)
151{
152 unsigned long val;
153 /* This could fault if %cr4 does not exist */
154 asm("1: movl %%cr4, %0 \n"
155 "2: \n"
156 ".section __ex_table,\"a\" \n"
157 ".long 1b,2b \n"
158 ".previous \n"
159 : "=r" (val): "0" (0));
160 return val;
161}
162
163static fastcall void native_write_cr4(unsigned long val)
164{
165 asm volatile("movl %0,%%cr4": :"r" (val));
166}
167
168static fastcall unsigned long native_save_fl(void)
169{
170 unsigned long f;
171 asm volatile("pushfl ; popl %0":"=g" (f): /* no input */);
172 return f;
173}
174
175static fastcall void native_restore_fl(unsigned long f)
176{
177 asm volatile("pushl %0 ; popfl": /* no output */
178 :"g" (f)
179 :"memory", "cc");
180}
181
182static fastcall void native_irq_disable(void)
183{
184 asm volatile("cli": : :"memory");
185}
186
187static fastcall void native_irq_enable(void)
188{
189 asm volatile("sti": : :"memory");
190}
191
192static fastcall void native_safe_halt(void)
193{
194 asm volatile("sti; hlt": : :"memory");
195}
196
197static fastcall void native_halt(void)
198{
199 asm volatile("hlt": : :"memory");
200}
201
202static fastcall void native_wbinvd(void)
203{
204 asm volatile("wbinvd": : :"memory");
205}
206
207static fastcall unsigned long long native_read_msr(unsigned int msr, int *err)
208{
209 unsigned long long val;
210
211 asm volatile("2: rdmsr ; xorl %0,%0\n"
212 "1:\n\t"
213 ".section .fixup,\"ax\"\n\t"
214 "3: movl %3,%0 ; jmp 1b\n\t"
215 ".previous\n\t"
216 ".section __ex_table,\"a\"\n"
217 " .align 4\n\t"
218 " .long 2b,3b\n\t"
219 ".previous"
220 : "=r" (*err), "=A" (val)
221 : "c" (msr), "i" (-EFAULT));
222
223 return val;
224}
225
226static fastcall int native_write_msr(unsigned int msr, unsigned long long val)
227{
228 int err;
229 asm volatile("2: wrmsr ; xorl %0,%0\n"
230 "1:\n\t"
231 ".section .fixup,\"ax\"\n\t"
232 "3: movl %4,%0 ; jmp 1b\n\t"
233 ".previous\n\t"
234 ".section __ex_table,\"a\"\n"
235 " .align 4\n\t"
236 " .long 2b,3b\n\t"
237 ".previous"
238 : "=a" (err)
239 : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
240 "i" (-EFAULT));
241 return err;
242}
243
244static fastcall unsigned long long native_read_tsc(void)
245{
246 unsigned long long val;
247 asm volatile("rdtsc" : "=A" (val));
248 return val;
249}
250
251static fastcall unsigned long long native_read_pmc(void)
252{
253 unsigned long long val;
254 asm volatile("rdpmc" : "=A" (val));
255 return val;
256}
257
258static fastcall void native_load_tr_desc(void)
259{
260 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
261}
262
263static fastcall void native_load_gdt(const struct Xgt_desc_struct *dtr)
264{
265 asm volatile("lgdt %0"::"m" (*dtr));
266}
267
268static fastcall void native_load_idt(const struct Xgt_desc_struct *dtr)
269{
270 asm volatile("lidt %0"::"m" (*dtr));
271}
272
273static fastcall void native_store_gdt(struct Xgt_desc_struct *dtr)
274{
275 asm ("sgdt %0":"=m" (*dtr));
276}
277
278static fastcall void native_store_idt(struct Xgt_desc_struct *dtr)
279{
280 asm ("sidt %0":"=m" (*dtr));
281}
282
283static fastcall unsigned long native_store_tr(void)
284{
285 unsigned long tr;
286 asm ("str %0":"=r" (tr));
287 return tr;
288}
289
290static fastcall void native_load_tls(struct thread_struct *t, unsigned int cpu)
291{
292#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
293 C(0); C(1); C(2);
294#undef C
295}
296
297static inline void native_write_dt_entry(void *dt, int entry, u32 entry_low, u32 entry_high)
298{
299 u32 *lp = (u32 *)((char *)dt + entry*8);
300 lp[0] = entry_low;
301 lp[1] = entry_high;
302}
303
304static fastcall void native_write_ldt_entry(void *dt, int entrynum, u32 low, u32 high)
305{
306 native_write_dt_entry(dt, entrynum, low, high);
307}
308
309static fastcall void native_write_gdt_entry(void *dt, int entrynum, u32 low, u32 high)
310{
311 native_write_dt_entry(dt, entrynum, low, high);
312}
313
314static fastcall void native_write_idt_entry(void *dt, int entrynum, u32 low, u32 high)
315{
316 native_write_dt_entry(dt, entrynum, low, high);
317}
318
319static fastcall void native_load_esp0(struct tss_struct *tss,
320 struct thread_struct *thread)
321{
322 tss->esp0 = thread->esp0;
323
324 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
325 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
326 tss->ss1 = thread->sysenter_cs;
327 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
328 }
329}
330
331static fastcall void native_io_delay(void)
332{
333 asm volatile("outb %al,$0x80");
334}
335
336/* These are in entry.S */
337extern fastcall void native_iret(void);
338extern fastcall void native_irq_enable_sysexit(void);
339
340static int __init print_banner(void)
341{
342 paravirt_ops.banner();
343 return 0;
344}
345core_initcall(print_banner);
346
347struct paravirt_ops paravirt_ops = {
348 .name = "bare hardware",
349 .paravirt_enabled = 0,
350 .kernel_rpl = 0,
351
352 .banner = default_banner,
353 .arch_setup = native_nop,
354 .memory_setup = machine_specific_memory_setup,
355 .get_wallclock = native_get_wallclock,
356 .set_wallclock = native_set_wallclock,
357 .time_init = time_init_hook,
358 .init_IRQ = native_init_IRQ,
359
360 .cpuid = native_cpuid,
361 .get_debugreg = native_get_debugreg,
362 .set_debugreg = native_set_debugreg,
363 .clts = native_clts,
364 .read_cr0 = native_read_cr0,
365 .write_cr0 = native_write_cr0,
366 .read_cr2 = native_read_cr2,
367 .write_cr2 = native_write_cr2,
368 .read_cr3 = native_read_cr3,
369 .write_cr3 = native_write_cr3,
370 .read_cr4 = native_read_cr4,
371 .read_cr4_safe = native_read_cr4_safe,
372 .write_cr4 = native_write_cr4,
373 .save_fl = native_save_fl,
374 .restore_fl = native_restore_fl,
375 .irq_disable = native_irq_disable,
376 .irq_enable = native_irq_enable,
377 .safe_halt = native_safe_halt,
378 .halt = native_halt,
379 .wbinvd = native_wbinvd,
380 .read_msr = native_read_msr,
381 .write_msr = native_write_msr,
382 .read_tsc = native_read_tsc,
383 .read_pmc = native_read_pmc,
384 .load_tr_desc = native_load_tr_desc,
385 .set_ldt = native_set_ldt,
386 .load_gdt = native_load_gdt,
387 .load_idt = native_load_idt,
388 .store_gdt = native_store_gdt,
389 .store_idt = native_store_idt,
390 .store_tr = native_store_tr,
391 .load_tls = native_load_tls,
392 .write_ldt_entry = native_write_ldt_entry,
393 .write_gdt_entry = native_write_gdt_entry,
394 .write_idt_entry = native_write_idt_entry,
395 .load_esp0 = native_load_esp0,
396
397 .set_iopl_mask = native_set_iopl_mask,
398 .io_delay = native_io_delay,
399 .const_udelay = __const_udelay,
400
401 .irq_enable_sysexit = native_irq_enable_sysexit,
402 .iret = native_iret,
403};
404EXPORT_SYMBOL(paravirt_ops);