diff options
Diffstat (limited to 'drivers/lguest')
-rw-r--r-- | drivers/lguest/Makefile | 2 | ||||
-rw-r--r-- | drivers/lguest/core.c | 459 | ||||
-rw-r--r-- | drivers/lguest/interrupts_and_traps.c | 18 | ||||
-rw-r--r-- | drivers/lguest/lg.h | 63 | ||||
-rw-r--r-- | drivers/lguest/segments.c | 26 | ||||
-rw-r--r-- | drivers/lguest/x86/core.c | 476 | ||||
-rw-r--r-- | drivers/lguest/x86/switcher_32.S | 3 |
7 files changed, 525 insertions, 522 deletions
diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile index a4567c99991b..d330f5b8c456 100644 --- a/drivers/lguest/Makefile +++ b/drivers/lguest/Makefile | |||
@@ -6,7 +6,7 @@ obj-$(CONFIG_LGUEST) += lg.o | |||
6 | lg-y = core.o hypercalls.o page_tables.o interrupts_and_traps.o \ | 6 | lg-y = core.o hypercalls.o page_tables.o interrupts_and_traps.o \ |
7 | segments.o io.o lguest_user.o | 7 | segments.o io.o lguest_user.o |
8 | 8 | ||
9 | lg-$(CONFIG_X86_32) += x86/switcher_32.o | 9 | lg-$(CONFIG_X86_32) += x86/switcher_32.o x86/core.o |
10 | 10 | ||
11 | Preparation Preparation!: PREFIX=P | 11 | Preparation Preparation!: PREFIX=P |
12 | Guest: PREFIX=G | 12 | Guest: PREFIX=G |
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index ca581ef591e8..06869a2d3b40 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c | |||
@@ -11,54 +11,20 @@ | |||
11 | #include <linux/vmalloc.h> | 11 | #include <linux/vmalloc.h> |
12 | #include <linux/cpu.h> | 12 | #include <linux/cpu.h> |
13 | #include <linux/freezer.h> | 13 | #include <linux/freezer.h> |
14 | #include <linux/highmem.h> | ||
14 | #include <asm/paravirt.h> | 15 | #include <asm/paravirt.h> |
15 | #include <asm/desc.h> | ||
16 | #include <asm/pgtable.h> | 16 | #include <asm/pgtable.h> |
17 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
18 | #include <asm/poll.h> | 18 | #include <asm/poll.h> |
19 | #include <asm/highmem.h> | ||
20 | #include <asm/asm-offsets.h> | 19 | #include <asm/asm-offsets.h> |
21 | #include <asm/i387.h> | ||
22 | #include "lg.h" | 20 | #include "lg.h" |
23 | 21 | ||
24 | /* Found in switcher.S */ | ||
25 | extern char start_switcher_text[], end_switcher_text[], switch_to_guest[]; | ||
26 | extern unsigned long default_idt_entries[]; | ||
27 | |||
28 | /* Every guest maps the core switcher code. */ | ||
29 | #define SHARED_SWITCHER_PAGES \ | ||
30 | DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE) | ||
31 | /* Pages for switcher itself, then two pages per cpu */ | ||
32 | #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS) | ||
33 | |||
34 | /* We map at -4M for ease of mapping into the guest (one PTE page). */ | ||
35 | #define SWITCHER_ADDR 0xFFC00000 | ||
36 | 22 | ||
37 | static struct vm_struct *switcher_vma; | 23 | static struct vm_struct *switcher_vma; |
38 | static struct page **switcher_page; | 24 | static struct page **switcher_page; |
39 | 25 | ||
40 | static int cpu_had_pge; | ||
41 | static struct { | ||
42 | unsigned long offset; | ||
43 | unsigned short segment; | ||
44 | } lguest_entry; | ||
45 | |||
46 | /* This One Big lock protects all inter-guest data structures. */ | 26 | /* This One Big lock protects all inter-guest data structures. */ |
47 | DEFINE_MUTEX(lguest_lock); | 27 | DEFINE_MUTEX(lguest_lock); |
48 | static DEFINE_PER_CPU(struct lguest *, last_guest); | ||
49 | |||
50 | /* Offset from where switcher.S was compiled to where we've copied it */ | ||
51 | static unsigned long switcher_offset(void) | ||
52 | { | ||
53 | return SWITCHER_ADDR - (unsigned long)start_switcher_text; | ||
54 | } | ||
55 | |||
56 | /* This cpu's struct lguest_pages. */ | ||
57 | static struct lguest_pages *lguest_pages(unsigned int cpu) | ||
58 | { | ||
59 | return &(((struct lguest_pages *) | ||
60 | (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]); | ||
61 | } | ||
62 | 28 | ||
63 | /*H:010 We need to set up the Switcher at a high virtual address. Remember the | 29 | /*H:010 We need to set up the Switcher at a high virtual address. Remember the |
64 | * Switcher is a few hundred bytes of assembler code which actually changes the | 30 | * Switcher is a few hundred bytes of assembler code which actually changes the |
@@ -69,9 +35,7 @@ static struct lguest_pages *lguest_pages(unsigned int cpu) | |||
69 | * Host since it will be running as the switchover occurs. | 35 | * Host since it will be running as the switchover occurs. |
70 | * | 36 | * |
71 | * Trying to map memory at a particular address is an unusual thing to do, so | 37 | * Trying to map memory at a particular address is an unusual thing to do, so |
72 | * it's not a simple one-liner. We also set up the per-cpu parts of the | 38 | * it's not a simple one-liner. */ |
73 | * Switcher here. | ||
74 | */ | ||
75 | static __init int map_switcher(void) | 39 | static __init int map_switcher(void) |
76 | { | 40 | { |
77 | int i, err; | 41 | int i, err; |
@@ -128,90 +92,11 @@ static __init int map_switcher(void) | |||
128 | goto free_vma; | 92 | goto free_vma; |
129 | } | 93 | } |
130 | 94 | ||
131 | /* Now the switcher is mapped at the right address, we can't fail! | 95 | /* Now the Switcher is mapped at the right address, we can't fail! |
132 | * Copy in the compiled-in Switcher code (from switcher.S). */ | 96 | * Copy in the compiled-in Switcher code (from <arch>_switcher.S). */ |
133 | memcpy(switcher_vma->addr, start_switcher_text, | 97 | memcpy(switcher_vma->addr, start_switcher_text, |
134 | end_switcher_text - start_switcher_text); | 98 | end_switcher_text - start_switcher_text); |
135 | 99 | ||
136 | /* Most of the switcher.S doesn't care that it's been moved; on Intel, | ||
137 | * jumps are relative, and it doesn't access any references to external | ||
138 | * code or data. | ||
139 | * | ||
140 | * The only exception is the interrupt handlers in switcher.S: their | ||
141 | * addresses are placed in a table (default_idt_entries), so we need to | ||
142 | * update the table with the new addresses. switcher_offset() is a | ||
143 | * convenience function which returns the distance between the builtin | ||
144 | * switcher code and the high-mapped copy we just made. */ | ||
145 | for (i = 0; i < IDT_ENTRIES; i++) | ||
146 | default_idt_entries[i] += switcher_offset(); | ||
147 | |||
148 | /* | ||
149 | * Set up the Switcher's per-cpu areas. | ||
150 | * | ||
151 | * Each CPU gets two pages of its own within the high-mapped region | ||
152 | * (aka. "struct lguest_pages"). Much of this can be initialized now, | ||
153 | * but some depends on what Guest we are running (which is set up in | ||
154 | * copy_in_guest_info()). | ||
155 | */ | ||
156 | for_each_possible_cpu(i) { | ||
157 | /* lguest_pages() returns this CPU's two pages. */ | ||
158 | struct lguest_pages *pages = lguest_pages(i); | ||
159 | /* This is a convenience pointer to make the code fit one | ||
160 | * statement to a line. */ | ||
161 | struct lguest_ro_state *state = &pages->state; | ||
162 | |||
163 | /* The Global Descriptor Table: the Host has a different one | ||
164 | * for each CPU. We keep a descriptor for the GDT which says | ||
165 | * where it is and how big it is (the size is actually the last | ||
166 | * byte, not the size, hence the "-1"). */ | ||
167 | state->host_gdt_desc.size = GDT_SIZE-1; | ||
168 | state->host_gdt_desc.address = (long)get_cpu_gdt_table(i); | ||
169 | |||
170 | /* All CPUs on the Host use the same Interrupt Descriptor | ||
171 | * Table, so we just use store_idt(), which gets this CPU's IDT | ||
172 | * descriptor. */ | ||
173 | store_idt(&state->host_idt_desc); | ||
174 | |||
175 | /* The descriptors for the Guest's GDT and IDT can be filled | ||
176 | * out now, too. We copy the GDT & IDT into ->guest_gdt and | ||
177 | * ->guest_idt before actually running the Guest. */ | ||
178 | state->guest_idt_desc.size = sizeof(state->guest_idt)-1; | ||
179 | state->guest_idt_desc.address = (long)&state->guest_idt; | ||
180 | state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1; | ||
181 | state->guest_gdt_desc.address = (long)&state->guest_gdt; | ||
182 | |||
183 | /* We know where we want the stack to be when the Guest enters | ||
184 | * the switcher: in pages->regs. The stack grows upwards, so | ||
185 | * we start it at the end of that structure. */ | ||
186 | state->guest_tss.esp0 = (long)(&pages->regs + 1); | ||
187 | /* And this is the GDT entry to use for the stack: we keep a | ||
188 | * couple of special LGUEST entries. */ | ||
189 | state->guest_tss.ss0 = LGUEST_DS; | ||
190 | |||
191 | /* x86 can have a finegrained bitmap which indicates what I/O | ||
192 | * ports the process can use. We set it to the end of our | ||
193 | * structure, meaning "none". */ | ||
194 | state->guest_tss.io_bitmap_base = sizeof(state->guest_tss); | ||
195 | |||
196 | /* Some GDT entries are the same across all Guests, so we can | ||
197 | * set them up now. */ | ||
198 | setup_default_gdt_entries(state); | ||
199 | /* Most IDT entries are the same for all Guests, too.*/ | ||
200 | setup_default_idt_entries(state, default_idt_entries); | ||
201 | |||
202 | /* The Host needs to be able to use the LGUEST segments on this | ||
203 | * CPU, too, so put them in the Host GDT. */ | ||
204 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; | ||
205 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; | ||
206 | } | ||
207 | |||
208 | /* In the Switcher, we want the %cs segment register to use the | ||
209 | * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so | ||
210 | * it will be undisturbed when we switch. To change %cs and jump we | ||
211 | * need this structure to feed to Intel's "lcall" instruction. */ | ||
212 | lguest_entry.offset = (long)switch_to_guest + switcher_offset(); | ||
213 | lguest_entry.segment = LGUEST_CS; | ||
214 | |||
215 | printk(KERN_INFO "lguest: mapped switcher at %p\n", | 100 | printk(KERN_INFO "lguest: mapped switcher at %p\n", |
216 | switcher_vma->addr); | 101 | switcher_vma->addr); |
217 | /* And we succeeded... */ | 102 | /* And we succeeded... */ |
@@ -243,80 +128,6 @@ static void unmap_switcher(void) | |||
243 | __free_pages(switcher_page[i], 0); | 128 | __free_pages(switcher_page[i], 0); |
244 | } | 129 | } |
245 | 130 | ||
246 | /*H:130 Our Guest is usually so well behaved; it never tries to do things it | ||
247 | * isn't allowed to. Unfortunately, Linux's paravirtual infrastructure isn't | ||
248 | * quite complete, because it doesn't contain replacements for the Intel I/O | ||
249 | * instructions. As a result, the Guest sometimes fumbles across one during | ||
250 | * the boot process as it probes for various things which are usually attached | ||
251 | * to a PC. | ||
252 | * | ||
253 | * When the Guest uses one of these instructions, we get trap #13 (General | ||
254 | * Protection Fault) and come here. We see if it's one of those troublesome | ||
255 | * instructions and skip over it. We return true if we did. */ | ||
256 | static int emulate_insn(struct lguest *lg) | ||
257 | { | ||
258 | u8 insn; | ||
259 | unsigned int insnlen = 0, in = 0, shift = 0; | ||
260 | /* The eip contains the *virtual* address of the Guest's instruction: | ||
261 | * guest_pa just subtracts the Guest's page_offset. */ | ||
262 | unsigned long physaddr = guest_pa(lg, lg->regs->eip); | ||
263 | |||
264 | /* The guest_pa() function only works for Guest kernel addresses, but | ||
265 | * that's all we're trying to do anyway. */ | ||
266 | if (lg->regs->eip < lg->page_offset) | ||
267 | return 0; | ||
268 | |||
269 | /* Decoding x86 instructions is icky. */ | ||
270 | lgread(lg, &insn, physaddr, 1); | ||
271 | |||
272 | /* 0x66 is an "operand prefix". It means it's using the upper 16 bits | ||
273 | of the eax register. */ | ||
274 | if (insn == 0x66) { | ||
275 | shift = 16; | ||
276 | /* The instruction is 1 byte so far, read the next byte. */ | ||
277 | insnlen = 1; | ||
278 | lgread(lg, &insn, physaddr + insnlen, 1); | ||
279 | } | ||
280 | |||
281 | /* We can ignore the lower bit for the moment and decode the 4 opcodes | ||
282 | * we need to emulate. */ | ||
283 | switch (insn & 0xFE) { | ||
284 | case 0xE4: /* in <next byte>,%al */ | ||
285 | insnlen += 2; | ||
286 | in = 1; | ||
287 | break; | ||
288 | case 0xEC: /* in (%dx),%al */ | ||
289 | insnlen += 1; | ||
290 | in = 1; | ||
291 | break; | ||
292 | case 0xE6: /* out %al,<next byte> */ | ||
293 | insnlen += 2; | ||
294 | break; | ||
295 | case 0xEE: /* out %al,(%dx) */ | ||
296 | insnlen += 1; | ||
297 | break; | ||
298 | default: | ||
299 | /* OK, we don't know what this is, can't emulate. */ | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | /* If it was an "IN" instruction, they expect the result to be read | ||
304 | * into %eax, so we change %eax. We always return all-ones, which | ||
305 | * traditionally means "there's nothing there". */ | ||
306 | if (in) { | ||
307 | /* Lower bit tells is whether it's a 16 or 32 bit access */ | ||
308 | if (insn & 0x1) | ||
309 | lg->regs->eax = 0xFFFFFFFF; | ||
310 | else | ||
311 | lg->regs->eax |= (0xFFFF << shift); | ||
312 | } | ||
313 | /* Finally, we've "done" the instruction, so move past it. */ | ||
314 | lg->regs->eip += insnlen; | ||
315 | /* Success! */ | ||
316 | return 1; | ||
317 | } | ||
318 | /*:*/ | ||
319 | |||
320 | /*L:305 | 131 | /*L:305 |
321 | * Dealing With Guest Memory. | 132 | * Dealing With Guest Memory. |
322 | * | 133 | * |
@@ -380,104 +191,6 @@ void lgwrite(struct lguest *lg, unsigned long addr, const void *b, | |||
380 | } | 191 | } |
381 | /* (end of memory access helper routines) :*/ | 192 | /* (end of memory access helper routines) :*/ |
382 | 193 | ||
383 | static void set_ts(void) | ||
384 | { | ||
385 | u32 cr0; | ||
386 | |||
387 | cr0 = read_cr0(); | ||
388 | if (!(cr0 & 8)) | ||
389 | write_cr0(cr0|8); | ||
390 | } | ||
391 | |||
392 | /*S:010 | ||
393 | * We are getting close to the Switcher. | ||
394 | * | ||
395 | * Remember that each CPU has two pages which are visible to the Guest when it | ||
396 | * runs on that CPU. This has to contain the state for that Guest: we copy the | ||
397 | * state in just before we run the Guest. | ||
398 | * | ||
399 | * Each Guest has "changed" flags which indicate what has changed in the Guest | ||
400 | * since it last ran. We saw this set in interrupts_and_traps.c and | ||
401 | * segments.c. | ||
402 | */ | ||
403 | static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages) | ||
404 | { | ||
405 | /* Copying all this data can be quite expensive. We usually run the | ||
406 | * same Guest we ran last time (and that Guest hasn't run anywhere else | ||
407 | * meanwhile). If that's not the case, we pretend everything in the | ||
408 | * Guest has changed. */ | ||
409 | if (__get_cpu_var(last_guest) != lg || lg->last_pages != pages) { | ||
410 | __get_cpu_var(last_guest) = lg; | ||
411 | lg->last_pages = pages; | ||
412 | lg->changed = CHANGED_ALL; | ||
413 | } | ||
414 | |||
415 | /* These copies are pretty cheap, so we do them unconditionally: */ | ||
416 | /* Save the current Host top-level page directory. */ | ||
417 | pages->state.host_cr3 = __pa(current->mm->pgd); | ||
418 | /* Set up the Guest's page tables to see this CPU's pages (and no | ||
419 | * other CPU's pages). */ | ||
420 | map_switcher_in_guest(lg, pages); | ||
421 | /* Set up the two "TSS" members which tell the CPU what stack to use | ||
422 | * for traps which do directly into the Guest (ie. traps at privilege | ||
423 | * level 1). */ | ||
424 | pages->state.guest_tss.esp1 = lg->esp1; | ||
425 | pages->state.guest_tss.ss1 = lg->ss1; | ||
426 | |||
427 | /* Copy direct-to-Guest trap entries. */ | ||
428 | if (lg->changed & CHANGED_IDT) | ||
429 | copy_traps(lg, pages->state.guest_idt, default_idt_entries); | ||
430 | |||
431 | /* Copy all GDT entries which the Guest can change. */ | ||
432 | if (lg->changed & CHANGED_GDT) | ||
433 | copy_gdt(lg, pages->state.guest_gdt); | ||
434 | /* If only the TLS entries have changed, copy them. */ | ||
435 | else if (lg->changed & CHANGED_GDT_TLS) | ||
436 | copy_gdt_tls(lg, pages->state.guest_gdt); | ||
437 | |||
438 | /* Mark the Guest as unchanged for next time. */ | ||
439 | lg->changed = 0; | ||
440 | } | ||
441 | |||
442 | /* Finally: the code to actually call into the Switcher to run the Guest. */ | ||
443 | static void run_guest_once(struct lguest *lg, struct lguest_pages *pages) | ||
444 | { | ||
445 | /* This is a dummy value we need for GCC's sake. */ | ||
446 | unsigned int clobber; | ||
447 | |||
448 | /* Copy the guest-specific information into this CPU's "struct | ||
449 | * lguest_pages". */ | ||
450 | copy_in_guest_info(lg, pages); | ||
451 | |||
452 | /* Set the trap number to 256 (impossible value). If we fault while | ||
453 | * switching to the Guest (bad segment registers or bug), this will | ||
454 | * cause us to abort the Guest. */ | ||
455 | lg->regs->trapnum = 256; | ||
456 | |||
457 | /* Now: we push the "eflags" register on the stack, then do an "lcall". | ||
458 | * This is how we change from using the kernel code segment to using | ||
459 | * the dedicated lguest code segment, as well as jumping into the | ||
460 | * Switcher. | ||
461 | * | ||
462 | * The lcall also pushes the old code segment (KERNEL_CS) onto the | ||
463 | * stack, then the address of this call. This stack layout happens to | ||
464 | * exactly match the stack of an interrupt... */ | ||
465 | asm volatile("pushf; lcall *lguest_entry" | ||
466 | /* This is how we tell GCC that %eax ("a") and %ebx ("b") | ||
467 | * are changed by this routine. The "=" means output. */ | ||
468 | : "=a"(clobber), "=b"(clobber) | ||
469 | /* %eax contains the pages pointer. ("0" refers to the | ||
470 | * 0-th argument above, ie "a"). %ebx contains the | ||
471 | * physical address of the Guest's top-level page | ||
472 | * directory. */ | ||
473 | : "0"(pages), "1"(__pa(lg->pgdirs[lg->pgdidx].pgdir)) | ||
474 | /* We tell gcc that all these registers could change, | ||
475 | * which means we don't have to save and restore them in | ||
476 | * the Switcher. */ | ||
477 | : "memory", "%edx", "%ecx", "%edi", "%esi"); | ||
478 | } | ||
479 | /*:*/ | ||
480 | |||
481 | /*H:030 Let's jump straight to the the main loop which runs the Guest. | 194 | /*H:030 Let's jump straight to the the main loop which runs the Guest. |
482 | * Remember, this is called by the Launcher reading /dev/lguest, and we keep | 195 | * Remember, this is called by the Launcher reading /dev/lguest, and we keep |
483 | * going around and around until something interesting happens. */ | 196 | * going around and around until something interesting happens. */ |
@@ -485,11 +198,6 @@ int run_guest(struct lguest *lg, unsigned long __user *user) | |||
485 | { | 198 | { |
486 | /* We stop running once the Guest is dead. */ | 199 | /* We stop running once the Guest is dead. */ |
487 | while (!lg->dead) { | 200 | while (!lg->dead) { |
488 | /* We need to initialize this, otherwise gcc complains. It's | ||
489 | * not (yet) clever enough to see that it's initialized when we | ||
490 | * need it. */ | ||
491 | unsigned int cr2 = 0; /* Damn gcc */ | ||
492 | |||
493 | /* First we run any hypercalls the Guest wants done: either in | 201 | /* First we run any hypercalls the Guest wants done: either in |
494 | * the hypercall ring in "struct lguest_data", or directly by | 202 | * the hypercall ring in "struct lguest_data", or directly by |
495 | * using int 31 (LGUEST_TRAP_ENTRY). */ | 203 | * using int 31 (LGUEST_TRAP_ENTRY). */ |
@@ -538,132 +246,20 @@ int run_guest(struct lguest *lg, unsigned long __user *user) | |||
538 | * the "Do Not Disturb" sign: */ | 246 | * the "Do Not Disturb" sign: */ |
539 | local_irq_disable(); | 247 | local_irq_disable(); |
540 | 248 | ||
541 | /* Remember the awfully-named TS bit? If the Guest has asked | 249 | /* Actually run the Guest until something happens. */ |
542 | * to set it we set it now, so we can trap and pass that trap | 250 | lguest_arch_run_guest(lg); |
543 | * to the Guest if it uses the FPU. */ | ||
544 | if (lg->ts) | ||
545 | set_ts(); | ||
546 | |||
547 | /* SYSENTER is an optimized way of doing system calls. We | ||
548 | * can't allow it because it always jumps to privilege level 0. | ||
549 | * A normal Guest won't try it because we don't advertise it in | ||
550 | * CPUID, but a malicious Guest (or malicious Guest userspace | ||
551 | * program) could, so we tell the CPU to disable it before | ||
552 | * running the Guest. */ | ||
553 | if (boot_cpu_has(X86_FEATURE_SEP)) | ||
554 | wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); | ||
555 | |||
556 | /* Now we actually run the Guest. It will pop back out when | ||
557 | * something interesting happens, and we can examine its | ||
558 | * registers to see what it was doing. */ | ||
559 | run_guest_once(lg, lguest_pages(raw_smp_processor_id())); | ||
560 | |||
561 | /* The "regs" pointer contains two extra entries which are not | ||
562 | * really registers: a trap number which says what interrupt or | ||
563 | * trap made the switcher code come back, and an error code | ||
564 | * which some traps set. */ | ||
565 | |||
566 | /* If the Guest page faulted, then the cr2 register will tell | ||
567 | * us the bad virtual address. We have to grab this now, | ||
568 | * because once we re-enable interrupts an interrupt could | ||
569 | * fault and thus overwrite cr2, or we could even move off to a | ||
570 | * different CPU. */ | ||
571 | if (lg->regs->trapnum == 14) | ||
572 | cr2 = read_cr2(); | ||
573 | /* Similarly, if we took a trap because the Guest used the FPU, | ||
574 | * we have to restore the FPU it expects to see. */ | ||
575 | else if (lg->regs->trapnum == 7) | ||
576 | math_state_restore(); | ||
577 | |||
578 | /* Restore SYSENTER if it's supposed to be on. */ | ||
579 | if (boot_cpu_has(X86_FEATURE_SEP)) | ||
580 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); | ||
581 | 251 | ||
582 | /* Now we're ready to be interrupted or moved to other CPUs */ | 252 | /* Now we're ready to be interrupted or moved to other CPUs */ |
583 | local_irq_enable(); | 253 | local_irq_enable(); |
584 | 254 | ||
585 | /* OK, so what happened? */ | 255 | /* Now we deal with whatever happened to the Guest. */ |
586 | switch (lg->regs->trapnum) { | 256 | lguest_arch_handle_trap(lg); |
587 | case 13: /* We've intercepted a GPF. */ | ||
588 | /* Check if this was one of those annoying IN or OUT | ||
589 | * instructions which we need to emulate. If so, we | ||
590 | * just go back into the Guest after we've done it. */ | ||
591 | if (lg->regs->errcode == 0) { | ||
592 | if (emulate_insn(lg)) | ||
593 | continue; | ||
594 | } | ||
595 | break; | ||
596 | case 14: /* We've intercepted a page fault. */ | ||
597 | /* The Guest accessed a virtual address that wasn't | ||
598 | * mapped. This happens a lot: we don't actually set | ||
599 | * up most of the page tables for the Guest at all when | ||
600 | * we start: as it runs it asks for more and more, and | ||
601 | * we set them up as required. In this case, we don't | ||
602 | * even tell the Guest that the fault happened. | ||
603 | * | ||
604 | * The errcode tells whether this was a read or a | ||
605 | * write, and whether kernel or userspace code. */ | ||
606 | if (demand_page(lg, cr2, lg->regs->errcode)) | ||
607 | continue; | ||
608 | |||
609 | /* OK, it's really not there (or not OK): the Guest | ||
610 | * needs to know. We write out the cr2 value so it | ||
611 | * knows where the fault occurred. | ||
612 | * | ||
613 | * Note that if the Guest were really messed up, this | ||
614 | * could happen before it's done the INITIALIZE | ||
615 | * hypercall, so lg->lguest_data will be NULL */ | ||
616 | if (lg->lguest_data | ||
617 | && put_user(cr2, &lg->lguest_data->cr2)) | ||
618 | kill_guest(lg, "Writing cr2"); | ||
619 | break; | ||
620 | case 7: /* We've intercepted a Device Not Available fault. */ | ||
621 | /* If the Guest doesn't want to know, we already | ||
622 | * restored the Floating Point Unit, so we just | ||
623 | * continue without telling it. */ | ||
624 | if (!lg->ts) | ||
625 | continue; | ||
626 | break; | ||
627 | case 32 ... 255: | ||
628 | /* These values mean a real interrupt occurred, in | ||
629 | * which case the Host handler has already been run. | ||
630 | * We just do a friendly check if another process | ||
631 | * should now be run, then fall through to loop | ||
632 | * around: */ | ||
633 | cond_resched(); | ||
634 | case LGUEST_TRAP_ENTRY: /* Handled at top of loop */ | ||
635 | continue; | ||
636 | } | ||
637 | |||
638 | /* If we get here, it's a trap the Guest wants to know | ||
639 | * about. */ | ||
640 | if (deliver_trap(lg, lg->regs->trapnum)) | ||
641 | continue; | ||
642 | |||
643 | /* If the Guest doesn't have a handler (either it hasn't | ||
644 | * registered any yet, or it's one of the faults we don't let | ||
645 | * it handle), it dies with a cryptic error message. */ | ||
646 | kill_guest(lg, "unhandled trap %li at %#lx (%#lx)", | ||
647 | lg->regs->trapnum, lg->regs->eip, | ||
648 | lg->regs->trapnum == 14 ? cr2 : lg->regs->errcode); | ||
649 | } | 257 | } |
258 | |||
650 | /* The Guest is dead => "No such file or directory" */ | 259 | /* The Guest is dead => "No such file or directory" */ |
651 | return -ENOENT; | 260 | return -ENOENT; |
652 | } | 261 | } |
653 | 262 | ||
654 | /* Now we can look at each of the routines this calls, in increasing order of | ||
655 | * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(), | ||
656 | * deliver_trap() and demand_page(). After all those, we'll be ready to | ||
657 | * examine the Switcher, and our philosophical understanding of the Host/Guest | ||
658 | * duality will be complete. :*/ | ||
659 | static void adjust_pge(void *on) | ||
660 | { | ||
661 | if (on) | ||
662 | write_cr4(read_cr4() | X86_CR4_PGE); | ||
663 | else | ||
664 | write_cr4(read_cr4() & ~X86_CR4_PGE); | ||
665 | } | ||
666 | |||
667 | /*H:000 | 263 | /*H:000 |
668 | * Welcome to the Host! | 264 | * Welcome to the Host! |
669 | * | 265 | * |
@@ -705,31 +301,8 @@ static int __init init(void) | |||
705 | return err; | 301 | return err; |
706 | } | 302 | } |
707 | 303 | ||
708 | /* Finally, we need to turn off "Page Global Enable". PGE is an | 304 | /* Finally we do some architecture-specific setup. */ |
709 | * optimization where page table entries are specially marked to show | 305 | lguest_arch_host_init(); |
710 | * they never change. The Host kernel marks all the kernel pages this | ||
711 | * way because it's always present, even when userspace is running. | ||
712 | * | ||
713 | * Lguest breaks this: unbeknownst to the rest of the Host kernel, we | ||
714 | * switch to the Guest kernel. If you don't disable this on all CPUs, | ||
715 | * you'll get really weird bugs that you'll chase for two days. | ||
716 | * | ||
717 | * I used to turn PGE off every time we switched to the Guest and back | ||
718 | * on when we return, but that slowed the Switcher down noticibly. */ | ||
719 | |||
720 | /* We don't need the complexity of CPUs coming and going while we're | ||
721 | * doing this. */ | ||
722 | lock_cpu_hotplug(); | ||
723 | if (cpu_has_pge) { /* We have a broader idea of "global". */ | ||
724 | /* Remember that this was originally set (for cleanup). */ | ||
725 | cpu_had_pge = 1; | ||
726 | /* adjust_pge is a helper function which sets or unsets the PGE | ||
727 | * bit on its CPU, depending on the argument (0 == unset). */ | ||
728 | on_each_cpu(adjust_pge, (void *)0, 0, 1); | ||
729 | /* Turn off the feature in the global feature set. */ | ||
730 | clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); | ||
731 | } | ||
732 | unlock_cpu_hotplug(); | ||
733 | 306 | ||
734 | /* All good! */ | 307 | /* All good! */ |
735 | return 0; | 308 | return 0; |
@@ -742,15 +315,9 @@ static void __exit fini(void) | |||
742 | free_pagetables(); | 315 | free_pagetables(); |
743 | unmap_switcher(); | 316 | unmap_switcher(); |
744 | 317 | ||
745 | /* If we had PGE before we started, turn it back on now. */ | 318 | lguest_arch_host_fini(); |
746 | lock_cpu_hotplug(); | ||
747 | if (cpu_had_pge) { | ||
748 | set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); | ||
749 | /* adjust_pge's argument "1" means set PGE. */ | ||
750 | on_each_cpu(adjust_pge, (void *)1, 0, 1); | ||
751 | } | ||
752 | unlock_cpu_hotplug(); | ||
753 | } | 319 | } |
320 | /*:*/ | ||
754 | 321 | ||
755 | /* The Host side of lguest can be a module. This is a nice way for people to | 322 | /* The Host side of lguest can be a module. This is a nice way for people to |
756 | * play with it. */ | 323 | * play with it. */ |
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c index 0dfb0903aa69..fdefc0afc38c 100644 --- a/drivers/lguest/interrupts_and_traps.c +++ b/drivers/lguest/interrupts_and_traps.c | |||
@@ -165,7 +165,7 @@ void maybe_do_interrupt(struct lguest *lg) | |||
165 | /* Look at the IDT entry the Guest gave us for this interrupt. The | 165 | /* Look at the IDT entry the Guest gave us for this interrupt. The |
166 | * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip | 166 | * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip |
167 | * over them. */ | 167 | * over them. */ |
168 | idt = &lg->idt[FIRST_EXTERNAL_VECTOR+irq]; | 168 | idt = &lg->arch.idt[FIRST_EXTERNAL_VECTOR+irq]; |
169 | /* If they don't have a handler (yet?), we just ignore it */ | 169 | /* If they don't have a handler (yet?), we just ignore it */ |
170 | if (idt_present(idt->a, idt->b)) { | 170 | if (idt_present(idt->a, idt->b)) { |
171 | /* OK, mark it no longer pending and deliver it. */ | 171 | /* OK, mark it no longer pending and deliver it. */ |
@@ -197,14 +197,14 @@ int deliver_trap(struct lguest *lg, unsigned int num) | |||
197 | { | 197 | { |
198 | /* Trap numbers are always 8 bit, but we set an impossible trap number | 198 | /* Trap numbers are always 8 bit, but we set an impossible trap number |
199 | * for traps inside the Switcher, so check that here. */ | 199 | * for traps inside the Switcher, so check that here. */ |
200 | if (num >= ARRAY_SIZE(lg->idt)) | 200 | if (num >= ARRAY_SIZE(lg->arch.idt)) |
201 | return 0; | 201 | return 0; |
202 | 202 | ||
203 | /* Early on the Guest hasn't set the IDT entries (or maybe it put a | 203 | /* Early on the Guest hasn't set the IDT entries (or maybe it put a |
204 | * bogus one in): if we fail here, the Guest will be killed. */ | 204 | * bogus one in): if we fail here, the Guest will be killed. */ |
205 | if (!idt_present(lg->idt[num].a, lg->idt[num].b)) | 205 | if (!idt_present(lg->arch.idt[num].a, lg->arch.idt[num].b)) |
206 | return 0; | 206 | return 0; |
207 | set_guest_interrupt(lg, lg->idt[num].a, lg->idt[num].b, has_err(num)); | 207 | set_guest_interrupt(lg, lg->arch.idt[num].a, lg->arch.idt[num].b, has_err(num)); |
208 | return 1; | 208 | return 1; |
209 | } | 209 | } |
210 | 210 | ||
@@ -341,10 +341,10 @@ void load_guest_idt_entry(struct lguest *lg, unsigned int num, u32 lo, u32 hi) | |||
341 | lg->changed |= CHANGED_IDT; | 341 | lg->changed |= CHANGED_IDT; |
342 | 342 | ||
343 | /* Check that the Guest doesn't try to step outside the bounds. */ | 343 | /* Check that the Guest doesn't try to step outside the bounds. */ |
344 | if (num >= ARRAY_SIZE(lg->idt)) | 344 | if (num >= ARRAY_SIZE(lg->arch.idt)) |
345 | kill_guest(lg, "Setting idt entry %u", num); | 345 | kill_guest(lg, "Setting idt entry %u", num); |
346 | else | 346 | else |
347 | set_trap(lg, &lg->idt[num], num, lo, hi); | 347 | set_trap(lg, &lg->arch.idt[num], num, lo, hi); |
348 | } | 348 | } |
349 | 349 | ||
350 | /* The default entry for each interrupt points into the Switcher routines which | 350 | /* The default entry for each interrupt points into the Switcher routines which |
@@ -387,7 +387,7 @@ void copy_traps(const struct lguest *lg, struct desc_struct *idt, | |||
387 | 387 | ||
388 | /* We can simply copy the direct traps, otherwise we use the default | 388 | /* We can simply copy the direct traps, otherwise we use the default |
389 | * ones in the Switcher: they will return to the Host. */ | 389 | * ones in the Switcher: they will return to the Host. */ |
390 | for (i = 0; i < ARRAY_SIZE(lg->idt); i++) { | 390 | for (i = 0; i < ARRAY_SIZE(lg->arch.idt); i++) { |
391 | /* If no Guest can ever override this trap, leave it alone. */ | 391 | /* If no Guest can ever override this trap, leave it alone. */ |
392 | if (!direct_trap(i)) | 392 | if (!direct_trap(i)) |
393 | continue; | 393 | continue; |
@@ -396,8 +396,8 @@ void copy_traps(const struct lguest *lg, struct desc_struct *idt, | |||
396 | * Interrupt gates (type 14) disable interrupts as they are | 396 | * Interrupt gates (type 14) disable interrupts as they are |
397 | * entered, which we never let the Guest do. Not present | 397 | * entered, which we never let the Guest do. Not present |
398 | * entries (type 0x0) also can't go direct, of course. */ | 398 | * entries (type 0x0) also can't go direct, of course. */ |
399 | if (idt_type(lg->idt[i].a, lg->idt[i].b) == 0xF) | 399 | if (idt_type(lg->arch.idt[i].a, lg->arch.idt[i].b) == 0xF) |
400 | idt[i] = lg->idt[i]; | 400 | idt[i] = lg->arch.idt[i]; |
401 | else | 401 | else |
402 | /* Reset it to the default. */ | 402 | /* Reset it to the default. */ |
403 | default_idt_entry(&idt[i], i, def[i]); | 403 | default_idt_entry(&idt[i], i, def[i]); |
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index c1ca127ddece..203d3100c3b4 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h | |||
@@ -1,13 +1,6 @@ | |||
1 | #ifndef _LGUEST_H | 1 | #ifndef _LGUEST_H |
2 | #define _LGUEST_H | 2 | #define _LGUEST_H |
3 | 3 | ||
4 | #include <asm/desc.h> | ||
5 | |||
6 | #define GDT_ENTRY_LGUEST_CS 10 | ||
7 | #define GDT_ENTRY_LGUEST_DS 11 | ||
8 | #define LGUEST_CS (GDT_ENTRY_LGUEST_CS * 8) | ||
9 | #define LGUEST_DS (GDT_ENTRY_LGUEST_DS * 8) | ||
10 | |||
11 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
12 | #include <linux/types.h> | 5 | #include <linux/types.h> |
13 | #include <linux/init.h> | 6 | #include <linux/init.h> |
@@ -18,34 +11,12 @@ | |||
18 | #include <linux/wait.h> | 11 | #include <linux/wait.h> |
19 | #include <linux/err.h> | 12 | #include <linux/err.h> |
20 | #include <asm/semaphore.h> | 13 | #include <asm/semaphore.h> |
21 | #include "irq_vectors.h" | ||
22 | |||
23 | #define GUEST_PL 1 | ||
24 | 14 | ||
25 | struct lguest_regs | 15 | #include <asm/lguest.h> |
26 | { | ||
27 | /* Manually saved part. */ | ||
28 | unsigned long ebx, ecx, edx; | ||
29 | unsigned long esi, edi, ebp; | ||
30 | unsigned long gs; | ||
31 | unsigned long eax; | ||
32 | unsigned long fs, ds, es; | ||
33 | unsigned long trapnum, errcode; | ||
34 | /* Trap pushed part */ | ||
35 | unsigned long eip; | ||
36 | unsigned long cs; | ||
37 | unsigned long eflags; | ||
38 | unsigned long esp; | ||
39 | unsigned long ss; | ||
40 | }; | ||
41 | 16 | ||
42 | void free_pagetables(void); | 17 | void free_pagetables(void); |
43 | int init_pagetables(struct page **switcher_page, unsigned int pages); | 18 | int init_pagetables(struct page **switcher_page, unsigned int pages); |
44 | 19 | ||
45 | /* Full 4G segment descriptors, suitable for CS and DS. */ | ||
46 | #define FULL_EXEC_SEGMENT ((struct desc_struct){0x0000ffff, 0x00cf9b00}) | ||
47 | #define FULL_SEGMENT ((struct desc_struct){0x0000ffff, 0x00cf9300}) | ||
48 | |||
49 | struct lguest_dma_info | 20 | struct lguest_dma_info |
50 | { | 21 | { |
51 | struct list_head list; | 22 | struct list_head list; |
@@ -98,23 +69,6 @@ struct pgdir | |||
98 | spgd_t *pgdir; | 69 | spgd_t *pgdir; |
99 | }; | 70 | }; |
100 | 71 | ||
101 | /* This is a guest-specific page (mapped ro) into the guest. */ | ||
102 | struct lguest_ro_state | ||
103 | { | ||
104 | /* Host information we need to restore when we switch back. */ | ||
105 | u32 host_cr3; | ||
106 | struct Xgt_desc_struct host_idt_desc; | ||
107 | struct Xgt_desc_struct host_gdt_desc; | ||
108 | u32 host_sp; | ||
109 | |||
110 | /* Fields which are used when guest is running. */ | ||
111 | struct Xgt_desc_struct guest_idt_desc; | ||
112 | struct Xgt_desc_struct guest_gdt_desc; | ||
113 | struct i386_hw_tss guest_tss; | ||
114 | struct desc_struct guest_idt[IDT_ENTRIES]; | ||
115 | struct desc_struct guest_gdt[GDT_ENTRIES]; | ||
116 | }; | ||
117 | |||
118 | /* We have two pages shared with guests, per cpu. */ | 72 | /* We have two pages shared with guests, per cpu. */ |
119 | struct lguest_pages | 73 | struct lguest_pages |
120 | { | 74 | { |
@@ -180,11 +134,7 @@ struct lguest | |||
180 | /* Dead? */ | 134 | /* Dead? */ |
181 | const char *dead; | 135 | const char *dead; |
182 | 136 | ||
183 | /* The GDT entries copied into lguest_ro_state when running. */ | 137 | struct lguest_arch arch; |
184 | struct desc_struct gdt[GDT_ENTRIES]; | ||
185 | |||
186 | /* The IDT entries: some copied into lguest_ro_state when running. */ | ||
187 | struct desc_struct idt[IDT_ENTRIES]; | ||
188 | 138 | ||
189 | /* Virtual clock device */ | 139 | /* Virtual clock device */ |
190 | struct hrtimer hrt; | 140 | struct hrtimer hrt; |
@@ -239,6 +189,15 @@ void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages); | |||
239 | int demand_page(struct lguest *info, unsigned long cr2, int errcode); | 189 | int demand_page(struct lguest *info, unsigned long cr2, int errcode); |
240 | void pin_page(struct lguest *lg, unsigned long vaddr); | 190 | void pin_page(struct lguest *lg, unsigned long vaddr); |
241 | 191 | ||
192 | /* <arch>/core.c: */ | ||
193 | void lguest_arch_host_init(void); | ||
194 | void lguest_arch_host_fini(void); | ||
195 | void lguest_arch_run_guest(struct lguest *lg); | ||
196 | void lguest_arch_handle_trap(struct lguest *lg); | ||
197 | |||
198 | /* <arch>/switcher.S: */ | ||
199 | extern char start_switcher_text[], end_switcher_text[], switch_to_guest[]; | ||
200 | |||
242 | /* lguest_user.c: */ | 201 | /* lguest_user.c: */ |
243 | int lguest_device_init(void); | 202 | int lguest_device_init(void); |
244 | void lguest_device_remove(void); | 203 | void lguest_device_remove(void); |
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c index 9b81119f46e9..95eb9cf297bf 100644 --- a/drivers/lguest/segments.c +++ b/drivers/lguest/segments.c | |||
@@ -73,14 +73,14 @@ static void fixup_gdt_table(struct lguest *lg, unsigned start, unsigned end) | |||
73 | /* Segment descriptors contain a privilege level: the Guest is | 73 | /* Segment descriptors contain a privilege level: the Guest is |
74 | * sometimes careless and leaves this as 0, even though it's | 74 | * sometimes careless and leaves this as 0, even though it's |
75 | * running at privilege level 1. If so, we fix it here. */ | 75 | * running at privilege level 1. If so, we fix it here. */ |
76 | if ((lg->gdt[i].b & 0x00006000) == 0) | 76 | if ((lg->arch.gdt[i].b & 0x00006000) == 0) |
77 | lg->gdt[i].b |= (GUEST_PL << 13); | 77 | lg->arch.gdt[i].b |= (GUEST_PL << 13); |
78 | 78 | ||
79 | /* Each descriptor has an "accessed" bit. If we don't set it | 79 | /* Each descriptor has an "accessed" bit. If we don't set it |
80 | * now, the CPU will try to set it when the Guest first loads | 80 | * now, the CPU will try to set it when the Guest first loads |
81 | * that entry into a segment register. But the GDT isn't | 81 | * that entry into a segment register. But the GDT isn't |
82 | * writable by the Guest, so bad things can happen. */ | 82 | * writable by the Guest, so bad things can happen. */ |
83 | lg->gdt[i].b |= 0x00000100; | 83 | lg->arch.gdt[i].b |= 0x00000100; |
84 | } | 84 | } |
85 | } | 85 | } |
86 | 86 | ||
@@ -106,12 +106,12 @@ void setup_default_gdt_entries(struct lguest_ro_state *state) | |||
106 | void setup_guest_gdt(struct lguest *lg) | 106 | void setup_guest_gdt(struct lguest *lg) |
107 | { | 107 | { |
108 | /* Start with full 0-4G segments... */ | 108 | /* Start with full 0-4G segments... */ |
109 | lg->gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; | 109 | lg->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; |
110 | lg->gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; | 110 | lg->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; |
111 | /* ...except the Guest is allowed to use them, so set the privilege | 111 | /* ...except the Guest is allowed to use them, so set the privilege |
112 | * level appropriately in the flags. */ | 112 | * level appropriately in the flags. */ |
113 | lg->gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13); | 113 | lg->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13); |
114 | lg->gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); | 114 | lg->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); |
115 | } | 115 | } |
116 | 116 | ||
117 | /* Like the IDT, we never simply use the GDT the Guest gives us. We set up the | 117 | /* Like the IDT, we never simply use the GDT the Guest gives us. We set up the |
@@ -126,7 +126,7 @@ void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt) | |||
126 | unsigned int i; | 126 | unsigned int i; |
127 | 127 | ||
128 | for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++) | 128 | for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++) |
129 | gdt[i] = lg->gdt[i]; | 129 | gdt[i] = lg->arch.gdt[i]; |
130 | } | 130 | } |
131 | 131 | ||
132 | /* This is the full version */ | 132 | /* This is the full version */ |
@@ -138,7 +138,7 @@ void copy_gdt(const struct lguest *lg, struct desc_struct *gdt) | |||
138 | * replaced. See ignored_gdt() above. */ | 138 | * replaced. See ignored_gdt() above. */ |
139 | for (i = 0; i < GDT_ENTRIES; i++) | 139 | for (i = 0; i < GDT_ENTRIES; i++) |
140 | if (!ignored_gdt(i)) | 140 | if (!ignored_gdt(i)) |
141 | gdt[i] = lg->gdt[i]; | 141 | gdt[i] = lg->arch.gdt[i]; |
142 | } | 142 | } |
143 | 143 | ||
144 | /* This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT). */ | 144 | /* This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT). */ |
@@ -146,12 +146,12 @@ void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num) | |||
146 | { | 146 | { |
147 | /* We assume the Guest has the same number of GDT entries as the | 147 | /* We assume the Guest has the same number of GDT entries as the |
148 | * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ | 148 | * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ |
149 | if (num > ARRAY_SIZE(lg->gdt)) | 149 | if (num > ARRAY_SIZE(lg->arch.gdt)) |
150 | kill_guest(lg, "too many gdt entries %i", num); | 150 | kill_guest(lg, "too many gdt entries %i", num); |
151 | 151 | ||
152 | /* We read the whole thing in, then fix it up. */ | 152 | /* We read the whole thing in, then fix it up. */ |
153 | lgread(lg, lg->gdt, table, num * sizeof(lg->gdt[0])); | 153 | lgread(lg, lg->arch.gdt, table, num * sizeof(lg->arch.gdt[0])); |
154 | fixup_gdt_table(lg, 0, ARRAY_SIZE(lg->gdt)); | 154 | fixup_gdt_table(lg, 0, ARRAY_SIZE(lg->arch.gdt)); |
155 | /* Mark that the GDT changed so the core knows it has to copy it again, | 155 | /* Mark that the GDT changed so the core knows it has to copy it again, |
156 | * even if the Guest is run on the same CPU. */ | 156 | * even if the Guest is run on the same CPU. */ |
157 | lg->changed |= CHANGED_GDT; | 157 | lg->changed |= CHANGED_GDT; |
@@ -159,7 +159,7 @@ void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num) | |||
159 | 159 | ||
160 | void guest_load_tls(struct lguest *lg, unsigned long gtls) | 160 | void guest_load_tls(struct lguest *lg, unsigned long gtls) |
161 | { | 161 | { |
162 | struct desc_struct *tls = &lg->gdt[GDT_ENTRY_TLS_MIN]; | 162 | struct desc_struct *tls = &lg->arch.gdt[GDT_ENTRY_TLS_MIN]; |
163 | 163 | ||
164 | lgread(lg, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES); | 164 | lgread(lg, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES); |
165 | fixup_gdt_table(lg, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1); | 165 | fixup_gdt_table(lg, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1); |
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c new file mode 100644 index 000000000000..e2f46b16ce31 --- /dev/null +++ b/drivers/lguest/x86/core.c | |||
@@ -0,0 +1,476 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation. | ||
3 | * Copyright (C) 2007, Jes Sorensen <jes@sgi.com> SGI. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
13 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
14 | * details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/start_kernel.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/console.h> | ||
24 | #include <linux/screen_info.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/clocksource.h> | ||
28 | #include <linux/clockchips.h> | ||
29 | #include <linux/cpu.h> | ||
30 | #include <linux/lguest.h> | ||
31 | #include <linux/lguest_launcher.h> | ||
32 | #include <linux/lguest_bus.h> | ||
33 | #include <asm/paravirt.h> | ||
34 | #include <asm/param.h> | ||
35 | #include <asm/page.h> | ||
36 | #include <asm/pgtable.h> | ||
37 | #include <asm/desc.h> | ||
38 | #include <asm/setup.h> | ||
39 | #include <asm/lguest.h> | ||
40 | #include <asm/uaccess.h> | ||
41 | #include <asm/i387.h> | ||
42 | #include "../lg.h" | ||
43 | |||
44 | static int cpu_had_pge; | ||
45 | |||
46 | static struct { | ||
47 | unsigned long offset; | ||
48 | unsigned short segment; | ||
49 | } lguest_entry; | ||
50 | |||
51 | /* Offset from where switcher.S was compiled to where we've copied it */ | ||
52 | static unsigned long switcher_offset(void) | ||
53 | { | ||
54 | return SWITCHER_ADDR - (unsigned long)start_switcher_text; | ||
55 | } | ||
56 | |||
57 | /* This cpu's struct lguest_pages. */ | ||
58 | static struct lguest_pages *lguest_pages(unsigned int cpu) | ||
59 | { | ||
60 | return &(((struct lguest_pages *) | ||
61 | (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]); | ||
62 | } | ||
63 | |||
64 | static DEFINE_PER_CPU(struct lguest *, last_guest); | ||
65 | |||
66 | /*S:010 | ||
67 | * We are getting close to the Switcher. | ||
68 | * | ||
69 | * Remember that each CPU has two pages which are visible to the Guest when it | ||
70 | * runs on that CPU. This has to contain the state for that Guest: we copy the | ||
71 | * state in just before we run the Guest. | ||
72 | * | ||
73 | * Each Guest has "changed" flags which indicate what has changed in the Guest | ||
74 | * since it last ran. We saw this set in interrupts_and_traps.c and | ||
75 | * segments.c. | ||
76 | */ | ||
77 | static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages) | ||
78 | { | ||
79 | /* Copying all this data can be quite expensive. We usually run the | ||
80 | * same Guest we ran last time (and that Guest hasn't run anywhere else | ||
81 | * meanwhile). If that's not the case, we pretend everything in the | ||
82 | * Guest has changed. */ | ||
83 | if (__get_cpu_var(last_guest) != lg || lg->last_pages != pages) { | ||
84 | __get_cpu_var(last_guest) = lg; | ||
85 | lg->last_pages = pages; | ||
86 | lg->changed = CHANGED_ALL; | ||
87 | } | ||
88 | |||
89 | /* These copies are pretty cheap, so we do them unconditionally: */ | ||
90 | /* Save the current Host top-level page directory. */ | ||
91 | pages->state.host_cr3 = __pa(current->mm->pgd); | ||
92 | /* Set up the Guest's page tables to see this CPU's pages (and no | ||
93 | * other CPU's pages). */ | ||
94 | map_switcher_in_guest(lg, pages); | ||
95 | /* Set up the two "TSS" members which tell the CPU what stack to use | ||
96 | * for traps which do directly into the Guest (ie. traps at privilege | ||
97 | * level 1). */ | ||
98 | pages->state.guest_tss.esp1 = lg->esp1; | ||
99 | pages->state.guest_tss.ss1 = lg->ss1; | ||
100 | |||
101 | /* Copy direct-to-Guest trap entries. */ | ||
102 | if (lg->changed & CHANGED_IDT) | ||
103 | copy_traps(lg, pages->state.guest_idt, default_idt_entries); | ||
104 | |||
105 | /* Copy all GDT entries which the Guest can change. */ | ||
106 | if (lg->changed & CHANGED_GDT) | ||
107 | copy_gdt(lg, pages->state.guest_gdt); | ||
108 | /* If only the TLS entries have changed, copy them. */ | ||
109 | else if (lg->changed & CHANGED_GDT_TLS) | ||
110 | copy_gdt_tls(lg, pages->state.guest_gdt); | ||
111 | |||
112 | /* Mark the Guest as unchanged for next time. */ | ||
113 | lg->changed = 0; | ||
114 | } | ||
115 | |||
116 | /* Finally: the code to actually call into the Switcher to run the Guest. */ | ||
117 | static void run_guest_once(struct lguest *lg, struct lguest_pages *pages) | ||
118 | { | ||
119 | /* This is a dummy value we need for GCC's sake. */ | ||
120 | unsigned int clobber; | ||
121 | |||
122 | /* Copy the guest-specific information into this CPU's "struct | ||
123 | * lguest_pages". */ | ||
124 | copy_in_guest_info(lg, pages); | ||
125 | |||
126 | /* Set the trap number to 256 (impossible value). If we fault while | ||
127 | * switching to the Guest (bad segment registers or bug), this will | ||
128 | * cause us to abort the Guest. */ | ||
129 | lg->regs->trapnum = 256; | ||
130 | |||
131 | /* Now: we push the "eflags" register on the stack, then do an "lcall". | ||
132 | * This is how we change from using the kernel code segment to using | ||
133 | * the dedicated lguest code segment, as well as jumping into the | ||
134 | * Switcher. | ||
135 | * | ||
136 | * The lcall also pushes the old code segment (KERNEL_CS) onto the | ||
137 | * stack, then the address of this call. This stack layout happens to | ||
138 | * exactly match the stack of an interrupt... */ | ||
139 | asm volatile("pushf; lcall *lguest_entry" | ||
140 | /* This is how we tell GCC that %eax ("a") and %ebx ("b") | ||
141 | * are changed by this routine. The "=" means output. */ | ||
142 | : "=a"(clobber), "=b"(clobber) | ||
143 | /* %eax contains the pages pointer. ("0" refers to the | ||
144 | * 0-th argument above, ie "a"). %ebx contains the | ||
145 | * physical address of the Guest's top-level page | ||
146 | * directory. */ | ||
147 | : "0"(pages), "1"(__pa(lg->pgdirs[lg->pgdidx].pgdir)) | ||
148 | /* We tell gcc that all these registers could change, | ||
149 | * which means we don't have to save and restore them in | ||
150 | * the Switcher. */ | ||
151 | : "memory", "%edx", "%ecx", "%edi", "%esi"); | ||
152 | } | ||
153 | /*:*/ | ||
154 | |||
155 | /*H:040 This is the i386-specific code to setup and run the Guest. Interrupts | ||
156 | * are disabled: we own the CPU. */ | ||
157 | void lguest_arch_run_guest(struct lguest *lg) | ||
158 | { | ||
159 | /* Remember the awfully-named TS bit? If the Guest has asked | ||
160 | * to set it we set it now, so we can trap and pass that trap | ||
161 | * to the Guest if it uses the FPU. */ | ||
162 | if (lg->ts) | ||
163 | lguest_set_ts(); | ||
164 | |||
165 | /* SYSENTER is an optimized way of doing system calls. We | ||
166 | * can't allow it because it always jumps to privilege level 0. | ||
167 | * A normal Guest won't try it because we don't advertise it in | ||
168 | * CPUID, but a malicious Guest (or malicious Guest userspace | ||
169 | * program) could, so we tell the CPU to disable it before | ||
170 | * running the Guest. */ | ||
171 | if (boot_cpu_has(X86_FEATURE_SEP)) | ||
172 | wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); | ||
173 | |||
174 | /* Now we actually run the Guest. It will pop back out when | ||
175 | * something interesting happens, and we can examine its | ||
176 | * registers to see what it was doing. */ | ||
177 | run_guest_once(lg, lguest_pages(raw_smp_processor_id())); | ||
178 | |||
179 | /* The "regs" pointer contains two extra entries which are not | ||
180 | * really registers: a trap number which says what interrupt or | ||
181 | * trap made the switcher code come back, and an error code | ||
182 | * which some traps set. */ | ||
183 | |||
184 | /* If the Guest page faulted, then the cr2 register will tell | ||
185 | * us the bad virtual address. We have to grab this now, | ||
186 | * because once we re-enable interrupts an interrupt could | ||
187 | * fault and thus overwrite cr2, or we could even move off to a | ||
188 | * different CPU. */ | ||
189 | if (lg->regs->trapnum == 14) | ||
190 | lg->arch.last_pagefault = read_cr2(); | ||
191 | /* Similarly, if we took a trap because the Guest used the FPU, | ||
192 | * we have to restore the FPU it expects to see. */ | ||
193 | else if (lg->regs->trapnum == 7) | ||
194 | math_state_restore(); | ||
195 | |||
196 | /* Restore SYSENTER if it's supposed to be on. */ | ||
197 | if (boot_cpu_has(X86_FEATURE_SEP)) | ||
198 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); | ||
199 | } | ||
200 | |||
201 | /*H:130 Our Guest is usually so well behaved; it never tries to do things it | ||
202 | * isn't allowed to. Unfortunately, Linux's paravirtual infrastructure isn't | ||
203 | * quite complete, because it doesn't contain replacements for the Intel I/O | ||
204 | * instructions. As a result, the Guest sometimes fumbles across one during | ||
205 | * the boot process as it probes for various things which are usually attached | ||
206 | * to a PC. | ||
207 | * | ||
208 | * When the Guest uses one of these instructions, we get trap #13 (General | ||
209 | * Protection Fault) and come here. We see if it's one of those troublesome | ||
210 | * instructions and skip over it. We return true if we did. */ | ||
211 | static int emulate_insn(struct lguest *lg) | ||
212 | { | ||
213 | u8 insn; | ||
214 | unsigned int insnlen = 0, in = 0, shift = 0; | ||
215 | /* The eip contains the *virtual* address of the Guest's instruction: | ||
216 | * guest_pa just subtracts the Guest's page_offset. */ | ||
217 | unsigned long physaddr = guest_pa(lg, lg->regs->eip); | ||
218 | |||
219 | /* The guest_pa() function only works for Guest kernel addresses, but | ||
220 | * that's all we're trying to do anyway. */ | ||
221 | if (lg->regs->eip < lg->page_offset) | ||
222 | return 0; | ||
223 | |||
224 | /* Decoding x86 instructions is icky. */ | ||
225 | lgread(lg, &insn, physaddr, 1); | ||
226 | |||
227 | /* 0x66 is an "operand prefix". It means it's using the upper 16 bits | ||
228 | of the eax register. */ | ||
229 | if (insn == 0x66) { | ||
230 | shift = 16; | ||
231 | /* The instruction is 1 byte so far, read the next byte. */ | ||
232 | insnlen = 1; | ||
233 | lgread(lg, &insn, physaddr + insnlen, 1); | ||
234 | } | ||
235 | |||
236 | /* We can ignore the lower bit for the moment and decode the 4 opcodes | ||
237 | * we need to emulate. */ | ||
238 | switch (insn & 0xFE) { | ||
239 | case 0xE4: /* in <next byte>,%al */ | ||
240 | insnlen += 2; | ||
241 | in = 1; | ||
242 | break; | ||
243 | case 0xEC: /* in (%dx),%al */ | ||
244 | insnlen += 1; | ||
245 | in = 1; | ||
246 | break; | ||
247 | case 0xE6: /* out %al,<next byte> */ | ||
248 | insnlen += 2; | ||
249 | break; | ||
250 | case 0xEE: /* out %al,(%dx) */ | ||
251 | insnlen += 1; | ||
252 | break; | ||
253 | default: | ||
254 | /* OK, we don't know what this is, can't emulate. */ | ||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | /* If it was an "IN" instruction, they expect the result to be read | ||
259 | * into %eax, so we change %eax. We always return all-ones, which | ||
260 | * traditionally means "there's nothing there". */ | ||
261 | if (in) { | ||
262 | /* Lower bit tells is whether it's a 16 or 32 bit access */ | ||
263 | if (insn & 0x1) | ||
264 | lg->regs->eax = 0xFFFFFFFF; | ||
265 | else | ||
266 | lg->regs->eax |= (0xFFFF << shift); | ||
267 | } | ||
268 | /* Finally, we've "done" the instruction, so move past it. */ | ||
269 | lg->regs->eip += insnlen; | ||
270 | /* Success! */ | ||
271 | return 1; | ||
272 | } | ||
273 | |||
274 | /*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */ | ||
275 | void lguest_arch_handle_trap(struct lguest *lg) | ||
276 | { | ||
277 | switch (lg->regs->trapnum) { | ||
278 | case 13: /* We've intercepted a GPF. */ | ||
279 | /* Check if this was one of those annoying IN or OUT | ||
280 | * instructions which we need to emulate. If so, we | ||
281 | * just go back into the Guest after we've done it. */ | ||
282 | if (lg->regs->errcode == 0) { | ||
283 | if (emulate_insn(lg)) | ||
284 | return; | ||
285 | } | ||
286 | break; | ||
287 | case 14: /* We've intercepted a page fault. */ | ||
288 | /* The Guest accessed a virtual address that wasn't | ||
289 | * mapped. This happens a lot: we don't actually set | ||
290 | * up most of the page tables for the Guest at all when | ||
291 | * we start: as it runs it asks for more and more, and | ||
292 | * we set them up as required. In this case, we don't | ||
293 | * even tell the Guest that the fault happened. | ||
294 | * | ||
295 | * The errcode tells whether this was a read or a | ||
296 | * write, and whether kernel or userspace code. */ | ||
297 | if (demand_page(lg, lg->arch.last_pagefault, lg->regs->errcode)) | ||
298 | return; | ||
299 | |||
300 | /* OK, it's really not there (or not OK): the Guest | ||
301 | * needs to know. We write out the cr2 value so it | ||
302 | * knows where the fault occurred. | ||
303 | * | ||
304 | * Note that if the Guest were really messed up, this | ||
305 | * could happen before it's done the INITIALIZE | ||
306 | * hypercall, so lg->lguest_data will be NULL */ | ||
307 | if (lg->lguest_data && | ||
308 | put_user(lg->arch.last_pagefault, &lg->lguest_data->cr2)) | ||
309 | kill_guest(lg, "Writing cr2"); | ||
310 | break; | ||
311 | case 7: /* We've intercepted a Device Not Available fault. */ | ||
312 | /* If the Guest doesn't want to know, we already | ||
313 | * restored the Floating Point Unit, so we just | ||
314 | * continue without telling it. */ | ||
315 | if (!lg->ts) | ||
316 | return; | ||
317 | break; | ||
318 | case 32 ... 255: | ||
319 | /* These values mean a real interrupt occurred, in | ||
320 | * which case the Host handler has already been run. | ||
321 | * We just do a friendly check if another process | ||
322 | * should now be run, then fall through to loop | ||
323 | * around: */ | ||
324 | cond_resched(); | ||
325 | case LGUEST_TRAP_ENTRY: /* Handled before re-entering Guest */ | ||
326 | return; | ||
327 | } | ||
328 | |||
329 | /* We didn't handle the trap, so it needs to go to the Guest. */ | ||
330 | if (!deliver_trap(lg, lg->regs->trapnum)) | ||
331 | /* If the Guest doesn't have a handler (either it hasn't | ||
332 | * registered any yet, or it's one of the faults we don't let | ||
333 | * it handle), it dies with a cryptic error message. */ | ||
334 | kill_guest(lg, "unhandled trap %li at %#lx (%#lx)", | ||
335 | lg->regs->trapnum, lg->regs->eip, | ||
336 | lg->regs->trapnum == 14 ? lg->arch.last_pagefault | ||
337 | : lg->regs->errcode); | ||
338 | } | ||
339 | |||
340 | /* Now we can look at each of the routines this calls, in increasing order of | ||
341 | * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(), | ||
342 | * deliver_trap() and demand_page(). After all those, we'll be ready to | ||
343 | * examine the Switcher, and our philosophical understanding of the Host/Guest | ||
344 | * duality will be complete. :*/ | ||
345 | static void adjust_pge(void *on) | ||
346 | { | ||
347 | if (on) | ||
348 | write_cr4(read_cr4() | X86_CR4_PGE); | ||
349 | else | ||
350 | write_cr4(read_cr4() & ~X86_CR4_PGE); | ||
351 | } | ||
352 | |||
353 | /*H:020 Now the Switcher is mapped and every thing else is ready, we need to do | ||
354 | * some more i386-specific initialization. */ | ||
355 | void __init lguest_arch_host_init(void) | ||
356 | { | ||
357 | int i; | ||
358 | |||
359 | /* Most of the i386/switcher.S doesn't care that it's been moved; on | ||
360 | * Intel, jumps are relative, and it doesn't access any references to | ||
361 | * external code or data. | ||
362 | * | ||
363 | * The only exception is the interrupt handlers in switcher.S: their | ||
364 | * addresses are placed in a table (default_idt_entries), so we need to | ||
365 | * update the table with the new addresses. switcher_offset() is a | ||
366 | * convenience function which returns the distance between the builtin | ||
367 | * switcher code and the high-mapped copy we just made. */ | ||
368 | for (i = 0; i < IDT_ENTRIES; i++) | ||
369 | default_idt_entries[i] += switcher_offset(); | ||
370 | |||
371 | /* | ||
372 | * Set up the Switcher's per-cpu areas. | ||
373 | * | ||
374 | * Each CPU gets two pages of its own within the high-mapped region | ||
375 | * (aka. "struct lguest_pages"). Much of this can be initialized now, | ||
376 | * but some depends on what Guest we are running (which is set up in | ||
377 | * copy_in_guest_info()). | ||
378 | */ | ||
379 | for_each_possible_cpu(i) { | ||
380 | /* lguest_pages() returns this CPU's two pages. */ | ||
381 | struct lguest_pages *pages = lguest_pages(i); | ||
382 | /* This is a convenience pointer to make the code fit one | ||
383 | * statement to a line. */ | ||
384 | struct lguest_ro_state *state = &pages->state; | ||
385 | |||
386 | /* The Global Descriptor Table: the Host has a different one | ||
387 | * for each CPU. We keep a descriptor for the GDT which says | ||
388 | * where it is and how big it is (the size is actually the last | ||
389 | * byte, not the size, hence the "-1"). */ | ||
390 | state->host_gdt_desc.size = GDT_SIZE-1; | ||
391 | state->host_gdt_desc.address = (long)get_cpu_gdt_table(i); | ||
392 | |||
393 | /* All CPUs on the Host use the same Interrupt Descriptor | ||
394 | * Table, so we just use store_idt(), which gets this CPU's IDT | ||
395 | * descriptor. */ | ||
396 | store_idt(&state->host_idt_desc); | ||
397 | |||
398 | /* The descriptors for the Guest's GDT and IDT can be filled | ||
399 | * out now, too. We copy the GDT & IDT into ->guest_gdt and | ||
400 | * ->guest_idt before actually running the Guest. */ | ||
401 | state->guest_idt_desc.size = sizeof(state->guest_idt)-1; | ||
402 | state->guest_idt_desc.address = (long)&state->guest_idt; | ||
403 | state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1; | ||
404 | state->guest_gdt_desc.address = (long)&state->guest_gdt; | ||
405 | |||
406 | /* We know where we want the stack to be when the Guest enters | ||
407 | * the switcher: in pages->regs. The stack grows upwards, so | ||
408 | * we start it at the end of that structure. */ | ||
409 | state->guest_tss.esp0 = (long)(&pages->regs + 1); | ||
410 | /* And this is the GDT entry to use for the stack: we keep a | ||
411 | * couple of special LGUEST entries. */ | ||
412 | state->guest_tss.ss0 = LGUEST_DS; | ||
413 | |||
414 | /* x86 can have a finegrained bitmap which indicates what I/O | ||
415 | * ports the process can use. We set it to the end of our | ||
416 | * structure, meaning "none". */ | ||
417 | state->guest_tss.io_bitmap_base = sizeof(state->guest_tss); | ||
418 | |||
419 | /* Some GDT entries are the same across all Guests, so we can | ||
420 | * set them up now. */ | ||
421 | setup_default_gdt_entries(state); | ||
422 | /* Most IDT entries are the same for all Guests, too.*/ | ||
423 | setup_default_idt_entries(state, default_idt_entries); | ||
424 | |||
425 | /* The Host needs to be able to use the LGUEST segments on this | ||
426 | * CPU, too, so put them in the Host GDT. */ | ||
427 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; | ||
428 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; | ||
429 | } | ||
430 | |||
431 | /* In the Switcher, we want the %cs segment register to use the | ||
432 | * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so | ||
433 | * it will be undisturbed when we switch. To change %cs and jump we | ||
434 | * need this structure to feed to Intel's "lcall" instruction. */ | ||
435 | lguest_entry.offset = (long)switch_to_guest + switcher_offset(); | ||
436 | lguest_entry.segment = LGUEST_CS; | ||
437 | |||
438 | /* Finally, we need to turn off "Page Global Enable". PGE is an | ||
439 | * optimization where page table entries are specially marked to show | ||
440 | * they never change. The Host kernel marks all the kernel pages this | ||
441 | * way because it's always present, even when userspace is running. | ||
442 | * | ||
443 | * Lguest breaks this: unbeknownst to the rest of the Host kernel, we | ||
444 | * switch to the Guest kernel. If you don't disable this on all CPUs, | ||
445 | * you'll get really weird bugs that you'll chase for two days. | ||
446 | * | ||
447 | * I used to turn PGE off every time we switched to the Guest and back | ||
448 | * on when we return, but that slowed the Switcher down noticibly. */ | ||
449 | |||
450 | /* We don't need the complexity of CPUs coming and going while we're | ||
451 | * doing this. */ | ||
452 | lock_cpu_hotplug(); | ||
453 | if (cpu_has_pge) { /* We have a broader idea of "global". */ | ||
454 | /* Remember that this was originally set (for cleanup). */ | ||
455 | cpu_had_pge = 1; | ||
456 | /* adjust_pge is a helper function which sets or unsets the PGE | ||
457 | * bit on its CPU, depending on the argument (0 == unset). */ | ||
458 | on_each_cpu(adjust_pge, (void *)0, 0, 1); | ||
459 | /* Turn off the feature in the global feature set. */ | ||
460 | clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); | ||
461 | } | ||
462 | unlock_cpu_hotplug(); | ||
463 | }; | ||
464 | /*:*/ | ||
465 | |||
466 | void __exit lguest_arch_host_fini(void) | ||
467 | { | ||
468 | /* If we had PGE before we started, turn it back on now. */ | ||
469 | lock_cpu_hotplug(); | ||
470 | if (cpu_had_pge) { | ||
471 | set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); | ||
472 | /* adjust_pge's argument "1" means set PGE. */ | ||
473 | on_each_cpu(adjust_pge, (void *)1, 0, 1); | ||
474 | } | ||
475 | unlock_cpu_hotplug(); | ||
476 | } | ||
diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S index a3d23f79cba4..e66cec5ac240 100644 --- a/drivers/lguest/x86/switcher_32.S +++ b/drivers/lguest/x86/switcher_32.S | |||
@@ -48,7 +48,8 @@ | |||
48 | #include <linux/linkage.h> | 48 | #include <linux/linkage.h> |
49 | #include <asm/asm-offsets.h> | 49 | #include <asm/asm-offsets.h> |
50 | #include <asm/page.h> | 50 | #include <asm/page.h> |
51 | #include "../lg.h" | 51 | #include <asm/segment.h> |
52 | #include <asm/lguest.h> | ||
52 | 53 | ||
53 | // We mark the start of the code to copy | 54 | // We mark the start of the code to copy |
54 | // It's placed in .text tho it's never run here | 55 | // It's placed in .text tho it's never run here |