aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2007-10-21 21:01:54 -0400
committerRusty Russell <rusty@rustcorp.com.au>2007-10-23 01:49:50 -0400
commit34b8867a034364ca33d0adb3a1c5b9982903c719 (patch)
tree7b6385b3985e7bdcca91103d01dea9f707e8b567 /arch
parentc37ae93d597fc63bae979db76b527dcc7740dc9d (diff)
Move lguest guest support to arch/x86.
Lguest has two sides: host support (to launch guests) and guest support (replacement boot path and paravirt_ops). This moves the guest side to arch/x86/lguest where it's closer to related code. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Cc: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/Kconfig10
-rw-r--r--arch/i386/Makefile3
-rw-r--r--arch/x86/lguest/Kconfig8
-rw-r--r--arch/x86/lguest/Makefile1
-rw-r--r--arch/x86/lguest/boot.c1106
-rw-r--r--arch/x86/lguest/i386_head.S93
6 files changed, 1213 insertions, 8 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 3523e82c8412..5bed8be34ba5 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -257,14 +257,8 @@ config VMI
257 at the moment), by linking the kernel to a GPL-ed ROM module 257 at the moment), by linking the kernel to a GPL-ed ROM module
258 provided by the hypervisor. 258 provided by the hypervisor.
259 259
260config LGUEST_GUEST 260source "arch/x86/lguest/Kconfig"
261 bool "Lguest guest support" 261
262 select PARAVIRT
263 depends on !X86_PAE
264 help
265 Lguest is a tiny in-kernel hypervisor. Selecting this will
266 allow your kernel to boot under lguest. This option will increase
267 your kernel size by about 6k. If in doubt, say N.
268endif 262endif
269 263
270config ACPI_SRAT 264config ACPI_SRAT
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index b88e47ca3032..b81cb64d48e5 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -99,6 +99,9 @@ core-$(CONFIG_X86_ES7000) := arch/x86/mach-es7000/
99# Xen paravirtualization support 99# Xen paravirtualization support
100core-$(CONFIG_XEN) += arch/x86/xen/ 100core-$(CONFIG_XEN) += arch/x86/xen/
101 101
102# lguest paravirtualization support
103core-$(CONFIG_LGUEST_GUEST) += arch/x86/lguest/
104
102# default subarch .h files 105# default subarch .h files
103mflags-y += -Iinclude/asm-x86/mach-default 106mflags-y += -Iinclude/asm-x86/mach-default
104 107
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
new file mode 100644
index 000000000000..0fabf87db998
--- /dev/null
+++ b/arch/x86/lguest/Kconfig
@@ -0,0 +1,8 @@
1config LGUEST_GUEST
2 bool "Lguest guest support"
3 select PARAVIRT
4 depends on !X86_PAE
5 help
6 Lguest is a tiny in-kernel hypervisor. Selecting this will
7 allow your kernel to boot under lguest. This option will increase
8 your kernel size by about 6k. If in doubt, say N.
diff --git a/arch/x86/lguest/Makefile b/arch/x86/lguest/Makefile
new file mode 100644
index 000000000000..27f0c9ed7f60
--- /dev/null
+++ b/arch/x86/lguest/Makefile
@@ -0,0 +1 @@
obj-y := i386_head.o boot.o
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
new file mode 100644
index 000000000000..8e9e485a5cfa
--- /dev/null
+++ b/arch/x86/lguest/boot.c
@@ -0,0 +1,1106 @@
1/*P:010
2 * A hypervisor allows multiple Operating Systems to run on a single machine.
3 * To quote David Wheeler: "Any problem in computer science can be solved with
4 * another layer of indirection."
5 *
6 * We keep things simple in two ways. First, we start with a normal Linux
7 * kernel and insert a module (lg.ko) which allows us to run other Linux
8 * kernels the same way we'd run processes. We call the first kernel the Host,
9 * and the others the Guests. The program which sets up and configures Guests
10 * (such as the example in Documentation/lguest/lguest.c) is called the
11 * Launcher.
12 *
13 * Secondly, we only run specially modified Guests, not normal kernels. When
14 * you set CONFIG_LGUEST to 'y' or 'm', this automatically sets
15 * CONFIG_LGUEST_GUEST=y, which compiles this file into the kernel so it knows
16 * how to be a Guest. This means that you can use the same kernel you boot
17 * normally (ie. as a Host) as a Guest.
18 *
19 * These Guests know that they cannot do privileged operations, such as disable
20 * interrupts, and that they have to ask the Host to do such things explicitly.
21 * This file consists of all the replacements for such low-level native
22 * hardware operations: these special Guest versions call the Host.
23 *
24 * So how does the kernel know it's a Guest? The Guest starts at a special
25 * entry point marked with a magic string, which sets up a few things then
26 * calls here. We replace the native functions various "paravirt" structures
27 * with our Guest versions, then boot like normal. :*/
28
29/*
30 * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
31 *
32 * This program is free software; you can redistribute it and/or modify
33 * it under the terms of the GNU General Public License as published by
34 * the Free Software Foundation; either version 2 of the License, or
35 * (at your option) any later version.
36 *
37 * This program is distributed in the hope that it will be useful, but
38 * WITHOUT ANY WARRANTY; without even the implied warranty of
39 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
40 * NON INFRINGEMENT. See the GNU General Public License for more
41 * details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
46 */
47#include <linux/kernel.h>
48#include <linux/start_kernel.h>
49#include <linux/string.h>
50#include <linux/console.h>
51#include <linux/screen_info.h>
52#include <linux/irq.h>
53#include <linux/interrupt.h>
54#include <linux/clocksource.h>
55#include <linux/clockchips.h>
56#include <linux/lguest.h>
57#include <linux/lguest_launcher.h>
58#include <linux/lguest_bus.h>
59#include <asm/paravirt.h>
60#include <asm/param.h>
61#include <asm/page.h>
62#include <asm/pgtable.h>
63#include <asm/desc.h>
64#include <asm/setup.h>
65#include <asm/e820.h>
66#include <asm/mce.h>
67#include <asm/io.h>
68
69/*G:010 Welcome to the Guest!
70 *
71 * The Guest in our tale is a simple creature: identical to the Host but
72 * behaving in simplified but equivalent ways. In particular, the Guest is the
73 * same kernel as the Host (or at least, built from the same source code). :*/
74
75/* Declarations for definitions in lguest_guest.S */
76extern char lguest_noirq_start[], lguest_noirq_end[];
77extern const char lgstart_cli[], lgend_cli[];
78extern const char lgstart_sti[], lgend_sti[];
79extern const char lgstart_popf[], lgend_popf[];
80extern const char lgstart_pushf[], lgend_pushf[];
81extern const char lgstart_iret[], lgend_iret[];
82extern void lguest_iret(void);
83
84struct lguest_data lguest_data = {
85 .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
86 .noirq_start = (u32)lguest_noirq_start,
87 .noirq_end = (u32)lguest_noirq_end,
88 .blocked_interrupts = { 1 }, /* Block timer interrupts */
89};
90static cycle_t clock_base;
91
92/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first
93 * real optimization trick!
94 *
95 * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
96 * them as a batch when lazy_mode is eventually turned off. Because hypercalls
97 * are reasonably expensive, batching them up makes sense. For example, a
98 * large mmap might update dozens of page table entries: that code calls
99 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
100 * lguest_leave_lazy_mode().
101 *
102 * So, when we're in lazy mode, we call async_hypercall() to store the call for
103 * future processing. When lazy mode is turned off we issue a hypercall to
104 * flush the stored calls.
105 */
106static void lguest_leave_lazy_mode(void)
107{
108 paravirt_leave_lazy(paravirt_get_lazy_mode());
109 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
110}
111
112static void lazy_hcall(unsigned long call,
113 unsigned long arg1,
114 unsigned long arg2,
115 unsigned long arg3)
116{
117 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
118 hcall(call, arg1, arg2, arg3);
119 else
120 async_hcall(call, arg1, arg2, arg3);
121}
122
123/* async_hcall() is pretty simple: I'm quite proud of it really. We have a
124 * ring buffer of stored hypercalls which the Host will run though next time we
125 * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall
126 * arguments, and a "hcall_status" word which is 0 if the call is ready to go,
127 * and 255 once the Host has finished with it.
128 *
129 * If we come around to a slot which hasn't been finished, then the table is
130 * full and we just make the hypercall directly. This has the nice side
131 * effect of causing the Host to run all the stored calls in the ring buffer
132 * which empties it for next time! */
133void async_hcall(unsigned long call,
134 unsigned long arg1, unsigned long arg2, unsigned long arg3)
135{
136 /* Note: This code assumes we're uniprocessor. */
137 static unsigned int next_call;
138 unsigned long flags;
139
140 /* Disable interrupts if not already disabled: we don't want an
141 * interrupt handler making a hypercall while we're already doing
142 * one! */
143 local_irq_save(flags);
144 if (lguest_data.hcall_status[next_call] != 0xFF) {
145 /* Table full, so do normal hcall which will flush table. */
146 hcall(call, arg1, arg2, arg3);
147 } else {
148 lguest_data.hcalls[next_call].eax = call;
149 lguest_data.hcalls[next_call].edx = arg1;
150 lguest_data.hcalls[next_call].ebx = arg2;
151 lguest_data.hcalls[next_call].ecx = arg3;
152 /* Arguments must all be written before we mark it to go */
153 wmb();
154 lguest_data.hcall_status[next_call] = 0;
155 if (++next_call == LHCALL_RING_SIZE)
156 next_call = 0;
157 }
158 local_irq_restore(flags);
159}
160/*:*/
161
162/* Wrappers for the SEND_DMA and BIND_DMA hypercalls. This is mainly because
163 * Jeff Garzik complained that __pa() should never appear in drivers, and this
164 * helps remove most of them. But also, it wraps some ugliness. */
165void lguest_send_dma(unsigned long key, struct lguest_dma *dma)
166{
167 /* The hcall might not write this if something goes wrong */
168 dma->used_len = 0;
169 hcall(LHCALL_SEND_DMA, key, __pa(dma), 0);
170}
171
172int lguest_bind_dma(unsigned long key, struct lguest_dma *dmas,
173 unsigned int num, u8 irq)
174{
175 /* This is the only hypercall which actually wants 5 arguments, and we
176 * only support 4. Fortunately the interrupt number is always less
177 * than 256, so we can pack it with the number of dmas in the final
178 * argument. */
179 if (!hcall(LHCALL_BIND_DMA, key, __pa(dmas), (num << 8) | irq))
180 return -ENOMEM;
181 return 0;
182}
183
184/* Unbinding is the same hypercall as binding, but with 0 num & irq. */
185void lguest_unbind_dma(unsigned long key, struct lguest_dma *dmas)
186{
187 hcall(LHCALL_BIND_DMA, key, __pa(dmas), 0);
188}
189
190/* For guests, device memory can be used as normal memory, so we cast away the
191 * __iomem to quieten sparse. */
192void *lguest_map(unsigned long phys_addr, unsigned long pages)
193{
194 return (__force void *)ioremap(phys_addr, PAGE_SIZE*pages);
195}
196
197void lguest_unmap(void *addr)
198{
199 iounmap((__force void __iomem *)addr);
200}
201
202/*G:033
203 * Here are our first native-instruction replacements: four functions for
204 * interrupt control.
205 *
206 * The simplest way of implementing these would be to have "turn interrupts
207 * off" and "turn interrupts on" hypercalls. Unfortunately, this is too slow:
208 * these are by far the most commonly called functions of those we override.
209 *
210 * So instead we keep an "irq_enabled" field inside our "struct lguest_data",
211 * which the Guest can update with a single instruction. The Host knows to
212 * check there when it wants to deliver an interrupt.
213 */
214
215/* save_flags() is expected to return the processor state (ie. "eflags"). The
216 * eflags word contains all kind of stuff, but in practice Linux only cares
217 * about the interrupt flag. Our "save_flags()" just returns that. */
218static unsigned long save_fl(void)
219{
220 return lguest_data.irq_enabled;
221}
222
223/* "restore_flags" just sets the flags back to the value given. */
224static void restore_fl(unsigned long flags)
225{
226 lguest_data.irq_enabled = flags;
227}
228
229/* Interrupts go off... */
230static void irq_disable(void)
231{
232 lguest_data.irq_enabled = 0;
233}
234
235/* Interrupts go on... */
236static void irq_enable(void)
237{
238 lguest_data.irq_enabled = X86_EFLAGS_IF;
239}
240/*:*/
241/*M:003 Note that we don't check for outstanding interrupts when we re-enable
242 * them (or when we unmask an interrupt). This seems to work for the moment,
243 * since interrupts are rare and we'll just get the interrupt on the next timer
244 * tick, but when we turn on CONFIG_NO_HZ, we should revisit this. One way
245 * would be to put the "irq_enabled" field in a page by itself, and have the
246 * Host write-protect it when an interrupt comes in when irqs are disabled.
247 * There will then be a page fault as soon as interrupts are re-enabled. :*/
248
249/*G:034
250 * The Interrupt Descriptor Table (IDT).
251 *
252 * The IDT tells the processor what to do when an interrupt comes in. Each
253 * entry in the table is a 64-bit descriptor: this holds the privilege level,
254 * address of the handler, and... well, who cares? The Guest just asks the
255 * Host to make the change anyway, because the Host controls the real IDT.
256 */
257static void lguest_write_idt_entry(struct desc_struct *dt,
258 int entrynum, u32 low, u32 high)
259{
260 /* Keep the local copy up to date. */
261 write_dt_entry(dt, entrynum, low, high);
262 /* Tell Host about this new entry. */
263 hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, low, high);
264}
265
266/* Changing to a different IDT is very rare: we keep the IDT up-to-date every
267 * time it is written, so we can simply loop through all entries and tell the
268 * Host about them. */
269static void lguest_load_idt(const struct Xgt_desc_struct *desc)
270{
271 unsigned int i;
272 struct desc_struct *idt = (void *)desc->address;
273
274 for (i = 0; i < (desc->size+1)/8; i++)
275 hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b);
276}
277
278/*
279 * The Global Descriptor Table.
280 *
281 * The Intel architecture defines another table, called the Global Descriptor
282 * Table (GDT). You tell the CPU where it is (and its size) using the "lgdt"
283 * instruction, and then several other instructions refer to entries in the
284 * table. There are three entries which the Switcher needs, so the Host simply
285 * controls the entire thing and the Guest asks it to make changes using the
286 * LOAD_GDT hypercall.
287 *
288 * This is the opposite of the IDT code where we have a LOAD_IDT_ENTRY
289 * hypercall and use that repeatedly to load a new IDT. I don't think it
290 * really matters, but wouldn't it be nice if they were the same?
291 */
292static void lguest_load_gdt(const struct Xgt_desc_struct *desc)
293{
294 BUG_ON((desc->size+1)/8 != GDT_ENTRIES);
295 hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0);
296}
297
298/* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
299 * then tell the Host to reload the entire thing. This operation is so rare
300 * that this naive implementation is reasonable. */
301static void lguest_write_gdt_entry(struct desc_struct *dt,
302 int entrynum, u32 low, u32 high)
303{
304 write_dt_entry(dt, entrynum, low, high);
305 hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0);
306}
307
308/* OK, I lied. There are three "thread local storage" GDT entries which change
309 * on every context switch (these three entries are how glibc implements
310 * __thread variables). So we have a hypercall specifically for this case. */
311static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
312{
313 /* There's one problem which normal hardware doesn't have: the Host
314 * can't handle us removing entries we're currently using. So we clear
315 * the GS register here: if it's needed it'll be reloaded anyway. */
316 loadsegment(gs, 0);
317 lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0);
318}
319
320/*G:038 That's enough excitement for now, back to ploughing through each of
321 * the different pv_ops structures (we're about 1/3 of the way through).
322 *
323 * This is the Local Descriptor Table, another weird Intel thingy. Linux only
324 * uses this for some strange applications like Wine. We don't do anything
325 * here, so they'll get an informative and friendly Segmentation Fault. */
326static void lguest_set_ldt(const void *addr, unsigned entries)
327{
328}
329
330/* This loads a GDT entry into the "Task Register": that entry points to a
331 * structure called the Task State Segment. Some comments scattered though the
332 * kernel code indicate that this used for task switching in ages past, along
333 * with blood sacrifice and astrology.
334 *
335 * Now there's nothing interesting in here that we don't get told elsewhere.
336 * But the native version uses the "ltr" instruction, which makes the Host
337 * complain to the Guest about a Segmentation Fault and it'll oops. So we
338 * override the native version with a do-nothing version. */
339static void lguest_load_tr_desc(void)
340{
341}
342
343/* The "cpuid" instruction is a way of querying both the CPU identity
344 * (manufacturer, model, etc) and its features. It was introduced before the
345 * Pentium in 1993 and keeps getting extended by both Intel and AMD. As you
346 * might imagine, after a decade and a half this treatment, it is now a giant
347 * ball of hair. Its entry in the current Intel manual runs to 28 pages.
348 *
349 * This instruction even it has its own Wikipedia entry. The Wikipedia entry
350 * has been translated into 4 languages. I am not making this up!
351 *
352 * We could get funky here and identify ourselves as "GenuineLguest", but
353 * instead we just use the real "cpuid" instruction. Then I pretty much turned
354 * off feature bits until the Guest booted. (Don't say that: you'll damage
355 * lguest sales!) Shut up, inner voice! (Hey, just pointing out that this is
356 * hardly future proof.) Noone's listening! They don't like you anyway,
357 * parenthetic weirdo!
358 *
359 * Replacing the cpuid so we can turn features off is great for the kernel, but
360 * anyone (including userspace) can just use the raw "cpuid" instruction and
361 * the Host won't even notice since it isn't privileged. So we try not to get
362 * too worked up about it. */
363static void lguest_cpuid(unsigned int *eax, unsigned int *ebx,
364 unsigned int *ecx, unsigned int *edx)
365{
366 int function = *eax;
367
368 native_cpuid(eax, ebx, ecx, edx);
369 switch (function) {
370 case 1: /* Basic feature request. */
371 /* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */
372 *ecx &= 0x00002201;
373 /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, FPU. */
374 *edx &= 0x07808101;
375 /* The Host can do a nice optimization if it knows that the
376 * kernel mappings (addresses above 0xC0000000 or whatever
377 * PAGE_OFFSET is set to) haven't changed. But Linux calls
378 * flush_tlb_user() for both user and kernel mappings unless
379 * the Page Global Enable (PGE) feature bit is set. */
380 *edx |= 0x00002000;
381 break;
382 case 0x80000000:
383 /* Futureproof this a little: if they ask how much extended
384 * processor information there is, limit it to known fields. */
385 if (*eax > 0x80000008)
386 *eax = 0x80000008;
387 break;
388 }
389}
390
391/* Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4.
392 * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother
393 * it. The Host needs to know when the Guest wants to change them, so we have
394 * a whole series of functions like read_cr0() and write_cr0().
395 *
396 * We start with CR0. CR0 allows you to turn on and off all kinds of basic
397 * features, but Linux only really cares about one: the horrifically-named Task
398 * Switched (TS) bit at bit 3 (ie. 8)
399 *
400 * What does the TS bit do? Well, it causes the CPU to trap (interrupt 7) if
401 * the floating point unit is used. Which allows us to restore FPU state
402 * lazily after a task switch, and Linux uses that gratefully, but wouldn't a
403 * name like "FPUTRAP bit" be a little less cryptic?
404 *
405 * We store cr0 (and cr3) locally, because the Host never changes it. The
406 * Guest sometimes wants to read it and we'd prefer not to bother the Host
407 * unnecessarily. */
408static unsigned long current_cr0, current_cr3;
409static void lguest_write_cr0(unsigned long val)
410{
411 /* 8 == TS bit. */
412 lazy_hcall(LHCALL_TS, val & 8, 0, 0);
413 current_cr0 = val;
414}
415
416static unsigned long lguest_read_cr0(void)
417{
418 return current_cr0;
419}
420
421/* Intel provided a special instruction to clear the TS bit for people too cool
422 * to use write_cr0() to do it. This "clts" instruction is faster, because all
423 * the vowels have been optimized out. */
424static void lguest_clts(void)
425{
426 lazy_hcall(LHCALL_TS, 0, 0, 0);
427 current_cr0 &= ~8U;
428}
429
430/* CR2 is the virtual address of the last page fault, which the Guest only ever
431 * reads. The Host kindly writes this into our "struct lguest_data", so we
432 * just read it out of there. */
433static unsigned long lguest_read_cr2(void)
434{
435 return lguest_data.cr2;
436}
437
438/* CR3 is the current toplevel pagetable page: the principle is the same as
439 * cr0. Keep a local copy, and tell the Host when it changes. */
440static void lguest_write_cr3(unsigned long cr3)
441{
442 lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0);
443 current_cr3 = cr3;
444}
445
446static unsigned long lguest_read_cr3(void)
447{
448 return current_cr3;
449}
450
451/* CR4 is used to enable and disable PGE, but we don't care. */
452static unsigned long lguest_read_cr4(void)
453{
454 return 0;
455}
456
457static void lguest_write_cr4(unsigned long val)
458{
459}
460
461/*
462 * Page Table Handling.
463 *
464 * Now would be a good time to take a rest and grab a coffee or similarly
465 * relaxing stimulant. The easy parts are behind us, and the trek gradually
466 * winds uphill from here.
467 *
468 * Quick refresher: memory is divided into "pages" of 4096 bytes each. The CPU
469 * maps virtual addresses to physical addresses using "page tables". We could
470 * use one huge index of 1 million entries: each address is 4 bytes, so that's
471 * 1024 pages just to hold the page tables. But since most virtual addresses
472 * are unused, we use a two level index which saves space. The CR3 register
473 * contains the physical address of the top level "page directory" page, which
474 * contains physical addresses of up to 1024 second-level pages. Each of these
475 * second level pages contains up to 1024 physical addresses of actual pages,
476 * or Page Table Entries (PTEs).
477 *
478 * Here's a diagram, where arrows indicate physical addresses:
479 *
480 * CR3 ---> +---------+
481 * | --------->+---------+
482 * | | | PADDR1 |
483 * Top-level | | PADDR2 |
484 * (PMD) page | | |
485 * | | Lower-level |
486 * | | (PTE) page |
487 * | | | |
488 * .... ....
489 *
490 * So to convert a virtual address to a physical address, we look up the top
491 * level, which points us to the second level, which gives us the physical
492 * address of that page. If the top level entry was not present, or the second
493 * level entry was not present, then the virtual address is invalid (we
494 * say "the page was not mapped").
495 *
496 * Put another way, a 32-bit virtual address is divided up like so:
497 *
498 * 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
499 * |<---- 10 bits ---->|<---- 10 bits ---->|<------ 12 bits ------>|
500 * Index into top Index into second Offset within page
501 * page directory page pagetable page
502 *
503 * The kernel spends a lot of time changing both the top-level page directory
504 * and lower-level pagetable pages. The Guest doesn't know physical addresses,
505 * so while it maintains these page tables exactly like normal, it also needs
506 * to keep the Host informed whenever it makes a change: the Host will create
507 * the real page tables based on the Guests'.
508 */
509
510/* The Guest calls this to set a second-level entry (pte), ie. to map a page
511 * into a process' address space. We set the entry then tell the Host the
512 * toplevel and address this corresponds to. The Guest uses one pagetable per
513 * process, so we need to tell the Host which one we're changing (mm->pgd). */
514static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
515 pte_t *ptep, pte_t pteval)
516{
517 *ptep = pteval;
518 lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pteval.pte_low);
519}
520
521/* The Guest calls this to set a top-level entry. Again, we set the entry then
522 * tell the Host which top-level page we changed, and the index of the entry we
523 * changed. */
524static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
525{
526 *pmdp = pmdval;
527 lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK,
528 (__pa(pmdp)&(PAGE_SIZE-1))/4, 0);
529}
530
531/* There are a couple of legacy places where the kernel sets a PTE, but we
532 * don't know the top level any more. This is useless for us, since we don't
533 * know which pagetable is changing or what address, so we just tell the Host
534 * to forget all of them. Fortunately, this is very rare.
535 *
536 * ... except in early boot when the kernel sets up the initial pagetables,
537 * which makes booting astonishingly slow. So we don't even tell the Host
538 * anything changed until we've done the first page table switch.
539 */
540static void lguest_set_pte(pte_t *ptep, pte_t pteval)
541{
542 *ptep = pteval;
543 /* Don't bother with hypercall before initial setup. */
544 if (current_cr3)
545 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
546}
547
548/* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
549 * native page table operations. On native hardware you can set a new page
550 * table entry whenever you want, but if you want to remove one you have to do
551 * a TLB flush (a TLB is a little cache of page table entries kept by the CPU).
552 *
553 * So the lguest_set_pte_at() and lguest_set_pmd() functions above are only
554 * called when a valid entry is written, not when it's removed (ie. marked not
555 * present). Instead, this is where we come when the Guest wants to remove a
556 * page table entry: we tell the Host to set that entry to 0 (ie. the present
557 * bit is zero). */
558static void lguest_flush_tlb_single(unsigned long addr)
559{
560 /* Simply set it to zero: if it was not, it will fault back in. */
561 lazy_hcall(LHCALL_SET_PTE, current_cr3, addr, 0);
562}
563
564/* This is what happens after the Guest has removed a large number of entries.
565 * This tells the Host that any of the page table entries for userspace might
566 * have changed, ie. virtual addresses below PAGE_OFFSET. */
567static void lguest_flush_tlb_user(void)
568{
569 lazy_hcall(LHCALL_FLUSH_TLB, 0, 0, 0);
570}
571
572/* This is called when the kernel page tables have changed. That's not very
573 * common (unless the Guest is using highmem, which makes the Guest extremely
574 * slow), so it's worth separating this from the user flushing above. */
575static void lguest_flush_tlb_kernel(void)
576{
577 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
578}
579
580/*
581 * The Unadvanced Programmable Interrupt Controller.
582 *
583 * This is an attempt to implement the simplest possible interrupt controller.
584 * I spent some time looking though routines like set_irq_chip_and_handler,
585 * set_irq_chip_and_handler_name, set_irq_chip_data and set_phasers_to_stun and
586 * I *think* this is as simple as it gets.
587 *
588 * We can tell the Host what interrupts we want blocked ready for using the
589 * lguest_data.interrupts bitmap, so disabling (aka "masking") them is as
590 * simple as setting a bit. We don't actually "ack" interrupts as such, we
591 * just mask and unmask them. I wonder if we should be cleverer?
592 */
593static void disable_lguest_irq(unsigned int irq)
594{
595 set_bit(irq, lguest_data.blocked_interrupts);
596}
597
598static void enable_lguest_irq(unsigned int irq)
599{
600 clear_bit(irq, lguest_data.blocked_interrupts);
601}
602
603/* This structure describes the lguest IRQ controller. */
604static struct irq_chip lguest_irq_controller = {
605 .name = "lguest",
606 .mask = disable_lguest_irq,
607 .mask_ack = disable_lguest_irq,
608 .unmask = enable_lguest_irq,
609};
610
611/* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
612 * interrupt (except 128, which is used for system calls), and then tells the
613 * Linux infrastructure that each interrupt is controlled by our level-based
614 * lguest interrupt controller. */
615static void __init lguest_init_IRQ(void)
616{
617 unsigned int i;
618
619 for (i = 0; i < LGUEST_IRQS; i++) {
620 int vector = FIRST_EXTERNAL_VECTOR + i;
621 if (vector != SYSCALL_VECTOR) {
622 set_intr_gate(vector, interrupt[i]);
623 set_irq_chip_and_handler(i, &lguest_irq_controller,
624 handle_level_irq);
625 }
626 }
627 /* This call is required to set up for 4k stacks, where we have
628 * separate stacks for hard and soft interrupts. */
629 irq_ctx_init(smp_processor_id());
630}
631
632/*
633 * Time.
634 *
635 * It would be far better for everyone if the Guest had its own clock, but
636 * until then the Host gives us the time on every interrupt.
637 */
638static unsigned long lguest_get_wallclock(void)
639{
640 return lguest_data.time.tv_sec;
641}
642
643static cycle_t lguest_clock_read(void)
644{
645 unsigned long sec, nsec;
646
647 /* If the Host tells the TSC speed, we can trust that. */
648 if (lguest_data.tsc_khz)
649 return native_read_tsc();
650
651 /* If we can't use the TSC, we read the time value written by the Host.
652 * Since it's in two parts (seconds and nanoseconds), we risk reading
653 * it just as it's changing from 99 & 0.999999999 to 100 and 0, and
654 * getting 99 and 0. As Linux tends to come apart under the stress of
655 * time travel, we must be careful: */
656 do {
657 /* First we read the seconds part. */
658 sec = lguest_data.time.tv_sec;
659 /* This read memory barrier tells the compiler and the CPU that
660 * this can't be reordered: we have to complete the above
661 * before going on. */
662 rmb();
663 /* Now we read the nanoseconds part. */
664 nsec = lguest_data.time.tv_nsec;
665 /* Make sure we've done that. */
666 rmb();
667 /* Now if the seconds part has changed, try again. */
668 } while (unlikely(lguest_data.time.tv_sec != sec));
669
670 /* Our non-TSC clock is in real nanoseconds. */
671 return sec*1000000000ULL + nsec;
672}
673
674/* This is what we tell the kernel is our clocksource. */
675static struct clocksource lguest_clock = {
676 .name = "lguest",
677 .rating = 400,
678 .read = lguest_clock_read,
679 .mask = CLOCKSOURCE_MASK(64),
680 .mult = 1 << 22,
681 .shift = 22,
682 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
683};
684
685/* The "scheduler clock" is just our real clock, adjusted to start at zero */
686static unsigned long long lguest_sched_clock(void)
687{
688 return cyc2ns(&lguest_clock, lguest_clock_read() - clock_base);
689}
690
691/* We also need a "struct clock_event_device": Linux asks us to set it to go
692 * off some time in the future. Actually, James Morris figured all this out, I
693 * just applied the patch. */
694static int lguest_clockevent_set_next_event(unsigned long delta,
695 struct clock_event_device *evt)
696{
697 if (delta < LG_CLOCK_MIN_DELTA) {
698 if (printk_ratelimit())
699 printk(KERN_DEBUG "%s: small delta %lu ns\n",
700 __FUNCTION__, delta);
701 return -ETIME;
702 }
703 hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0);
704 return 0;
705}
706
707static void lguest_clockevent_set_mode(enum clock_event_mode mode,
708 struct clock_event_device *evt)
709{
710 switch (mode) {
711 case CLOCK_EVT_MODE_UNUSED:
712 case CLOCK_EVT_MODE_SHUTDOWN:
713 /* A 0 argument shuts the clock down. */
714 hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0);
715 break;
716 case CLOCK_EVT_MODE_ONESHOT:
717 /* This is what we expect. */
718 break;
719 case CLOCK_EVT_MODE_PERIODIC:
720 BUG();
721 case CLOCK_EVT_MODE_RESUME:
722 break;
723 }
724}
725
726/* This describes our primitive timer chip. */
727static struct clock_event_device lguest_clockevent = {
728 .name = "lguest",
729 .features = CLOCK_EVT_FEAT_ONESHOT,
730 .set_next_event = lguest_clockevent_set_next_event,
731 .set_mode = lguest_clockevent_set_mode,
732 .rating = INT_MAX,
733 .mult = 1,
734 .shift = 0,
735 .min_delta_ns = LG_CLOCK_MIN_DELTA,
736 .max_delta_ns = LG_CLOCK_MAX_DELTA,
737};
738
739/* This is the Guest timer interrupt handler (hardware interrupt 0). We just
740 * call the clockevent infrastructure and it does whatever needs doing. */
741static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
742{
743 unsigned long flags;
744
745 /* Don't interrupt us while this is running. */
746 local_irq_save(flags);
747 lguest_clockevent.event_handler(&lguest_clockevent);
748 local_irq_restore(flags);
749}
750
751/* At some point in the boot process, we get asked to set up our timing
752 * infrastructure. The kernel doesn't expect timer interrupts before this, but
753 * we cleverly initialized the "blocked_interrupts" field of "struct
754 * lguest_data" so that timer interrupts were blocked until now. */
755static void lguest_time_init(void)
756{
757 /* Set up the timer interrupt (0) to go to our simple timer routine */
758 set_irq_handler(0, lguest_time_irq);
759
760 /* Our clock structure look like arch/i386/kernel/tsc.c if we can use
761 * the TSC, otherwise it's a dumb nanosecond-resolution clock. Either
762 * way, the "rating" is initialized so high that it's always chosen
763 * over any other clocksource. */
764 if (lguest_data.tsc_khz)
765 lguest_clock.mult = clocksource_khz2mult(lguest_data.tsc_khz,
766 lguest_clock.shift);
767 clock_base = lguest_clock_read();
768 clocksource_register(&lguest_clock);
769
770 /* Now we've set up our clock, we can use it as the scheduler clock */
771 pv_time_ops.sched_clock = lguest_sched_clock;
772
773 /* We can't set cpumask in the initializer: damn C limitations! Set it
774 * here and register our timer device. */
775 lguest_clockevent.cpumask = cpumask_of_cpu(0);
776 clockevents_register_device(&lguest_clockevent);
777
778 /* Finally, we unblock the timer interrupt. */
779 enable_lguest_irq(0);
780}
781
782/*
783 * Miscellaneous bits and pieces.
784 *
785 * Here is an oddball collection of functions which the Guest needs for things
786 * to work. They're pretty simple.
787 */
788
789/* The Guest needs to tell the host what stack it expects traps to use. For
790 * native hardware, this is part of the Task State Segment mentioned above in
791 * lguest_load_tr_desc(), but to help hypervisors there's this special call.
792 *
793 * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data
794 * segment), the privilege level (we're privilege level 1, the Host is 0 and
795 * will not tolerate us trying to use that), the stack pointer, and the number
796 * of pages in the stack. */
797static void lguest_load_esp0(struct tss_struct *tss,
798 struct thread_struct *thread)
799{
800 lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->esp0,
801 THREAD_SIZE/PAGE_SIZE);
802}
803
804/* Let's just say, I wouldn't do debugging under a Guest. */
805static void lguest_set_debugreg(int regno, unsigned long value)
806{
807 /* FIXME: Implement */
808}
809
810/* There are times when the kernel wants to make sure that no memory writes are
811 * caught in the cache (that they've all reached real hardware devices). This
812 * doesn't matter for the Guest which has virtual hardware.
813 *
814 * On the Pentium 4 and above, cpuid() indicates that the Cache Line Flush
815 * (clflush) instruction is available and the kernel uses that. Otherwise, it
816 * uses the older "Write Back and Invalidate Cache" (wbinvd) instruction.
817 * Unlike clflush, wbinvd can only be run at privilege level 0. So we can
818 * ignore clflush, but replace wbinvd.
819 */
820static void lguest_wbinvd(void)
821{
822}
823
824/* If the Guest expects to have an Advanced Programmable Interrupt Controller,
825 * we play dumb by ignoring writes and returning 0 for reads. So it's no
826 * longer Programmable nor Controlling anything, and I don't think 8 lines of
827 * code qualifies for Advanced. It will also never interrupt anything. It
828 * does, however, allow us to get through the Linux boot code. */
829#ifdef CONFIG_X86_LOCAL_APIC
830static void lguest_apic_write(unsigned long reg, unsigned long v)
831{
832}
833
834static unsigned long lguest_apic_read(unsigned long reg)
835{
836 return 0;
837}
838#endif
839
840/* STOP! Until an interrupt comes in. */
841static void lguest_safe_halt(void)
842{
843 hcall(LHCALL_HALT, 0, 0, 0);
844}
845
846/* Perhaps CRASH isn't the best name for this hypercall, but we use it to get a
847 * message out when we're crashing as well as elegant termination like powering
848 * off.
849 *
850 * Note that the Host always prefers that the Guest speak in physical addresses
851 * rather than virtual addresses, so we use __pa() here. */
852static void lguest_power_off(void)
853{
854 hcall(LHCALL_CRASH, __pa("Power down"), 0, 0);
855}
856
857/*
858 * Panicing.
859 *
860 * Don't. But if you did, this is what happens.
861 */
862static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
863{
864 hcall(LHCALL_CRASH, __pa(p), 0, 0);
865 /* The hcall won't return, but to keep gcc happy, we're "done". */
866 return NOTIFY_DONE;
867}
868
869static struct notifier_block paniced = {
870 .notifier_call = lguest_panic
871};
872
873/* Setting up memory is fairly easy. */
874static __init char *lguest_memory_setup(void)
875{
876 /* We do this here and not earlier because lockcheck barfs if we do it
877 * before start_kernel() */
878 atomic_notifier_chain_register(&panic_notifier_list, &paniced);
879
880 /* The Linux bootloader header contains an "e820" memory map: the
881 * Launcher populated the first entry with our memory limit. */
882 add_memory_region(boot_params.e820_map[0].addr,
883 boot_params.e820_map[0].size,
884 boot_params.e820_map[0].type);
885
886 /* This string is for the boot messages. */
887 return "LGUEST";
888}
889
890/*G:050
891 * Patching (Powerfully Placating Performance Pedants)
892 *
893 * We have already seen that pv_ops structures let us replace simple
894 * native instructions with calls to the appropriate back end all throughout
895 * the kernel. This allows the same kernel to run as a Guest and as a native
896 * kernel, but it's slow because of all the indirect branches.
897 *
898 * Remember that David Wheeler quote about "Any problem in computer science can
899 * be solved with another layer of indirection"? The rest of that quote is
900 * "... But that usually will create another problem." This is the first of
901 * those problems.
902 *
903 * Our current solution is to allow the paravirt back end to optionally patch
904 * over the indirect calls to replace them with something more efficient. We
905 * patch the four most commonly called functions: disable interrupts, enable
906 * interrupts, restore interrupts and save interrupts. We usually have 10
907 * bytes to patch into: the Guest versions of these operations are small enough
908 * that we can fit comfortably.
909 *
910 * First we need assembly templates of each of the patchable Guest operations,
911 * and these are in lguest_asm.S. */
912
913/*G:060 We construct a table from the assembler templates: */
914static const struct lguest_insns
915{
916 const char *start, *end;
917} lguest_insns[] = {
918 [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
919 [PARAVIRT_PATCH(pv_irq_ops.irq_enable)] = { lgstart_sti, lgend_sti },
920 [PARAVIRT_PATCH(pv_irq_ops.restore_fl)] = { lgstart_popf, lgend_popf },
921 [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
922};
923
924/* Now our patch routine is fairly simple (based on the native one in
925 * paravirt.c). If we have a replacement, we copy it in and return how much of
926 * the available space we used. */
927static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
928 unsigned long addr, unsigned len)
929{
930 unsigned int insn_len;
931
932 /* Don't do anything special if we don't have a replacement */
933 if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start)
934 return paravirt_patch_default(type, clobber, ibuf, addr, len);
935
936 insn_len = lguest_insns[type].end - lguest_insns[type].start;
937
938 /* Similarly if we can't fit replacement (shouldn't happen, but let's
939 * be thorough). */
940 if (len < insn_len)
941 return paravirt_patch_default(type, clobber, ibuf, addr, len);
942
943 /* Copy in our instructions. */
944 memcpy(ibuf, lguest_insns[type].start, insn_len);
945 return insn_len;
946}
947
948/*G:030 Once we get to lguest_init(), we know we're a Guest. The pv_ops
949 * structures in the kernel provide points for (almost) every routine we have
950 * to override to avoid privileged instructions. */
951__init void lguest_init(void *boot)
952{
953 /* Copy boot parameters first: the Launcher put the physical location
954 * in %esi, and head.S converted that to a virtual address and handed
955 * it to us. We use "__memcpy" because "memcpy" sometimes tries to do
956 * tricky things to go faster, and we're not ready for that. */
957 __memcpy(&boot_params, boot, PARAM_SIZE);
958 /* The boot parameters also tell us where the command-line is: save
959 * that, too. */
960 __memcpy(boot_command_line, __va(boot_params.hdr.cmd_line_ptr),
961 COMMAND_LINE_SIZE);
962
963 /* We're under lguest, paravirt is enabled, and we're running at
964 * privilege level 1, not 0 as normal. */
965 pv_info.name = "lguest";
966 pv_info.paravirt_enabled = 1;
967 pv_info.kernel_rpl = 1;
968
969 /* We set up all the lguest overrides for sensitive operations. These
970 * are detailed with the operations themselves. */
971
972 /* interrupt-related operations */
973 pv_irq_ops.init_IRQ = lguest_init_IRQ;
974 pv_irq_ops.save_fl = save_fl;
975 pv_irq_ops.restore_fl = restore_fl;
976 pv_irq_ops.irq_disable = irq_disable;
977 pv_irq_ops.irq_enable = irq_enable;
978 pv_irq_ops.safe_halt = lguest_safe_halt;
979
980 /* init-time operations */
981 pv_init_ops.memory_setup = lguest_memory_setup;
982 pv_init_ops.patch = lguest_patch;
983
984 /* Intercepts of various cpu instructions */
985 pv_cpu_ops.load_gdt = lguest_load_gdt;
986 pv_cpu_ops.cpuid = lguest_cpuid;
987 pv_cpu_ops.load_idt = lguest_load_idt;
988 pv_cpu_ops.iret = lguest_iret;
989 pv_cpu_ops.load_esp0 = lguest_load_esp0;
990 pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
991 pv_cpu_ops.set_ldt = lguest_set_ldt;
992 pv_cpu_ops.load_tls = lguest_load_tls;
993 pv_cpu_ops.set_debugreg = lguest_set_debugreg;
994 pv_cpu_ops.clts = lguest_clts;
995 pv_cpu_ops.read_cr0 = lguest_read_cr0;
996 pv_cpu_ops.write_cr0 = lguest_write_cr0;
997 pv_cpu_ops.read_cr4 = lguest_read_cr4;
998 pv_cpu_ops.write_cr4 = lguest_write_cr4;
999 pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
1000 pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
1001 pv_cpu_ops.wbinvd = lguest_wbinvd;
1002 pv_cpu_ops.lazy_mode.enter = paravirt_enter_lazy_cpu;
1003 pv_cpu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
1004
1005 /* pagetable management */
1006 pv_mmu_ops.write_cr3 = lguest_write_cr3;
1007 pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user;
1008 pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single;
1009 pv_mmu_ops.flush_tlb_kernel = lguest_flush_tlb_kernel;
1010 pv_mmu_ops.set_pte = lguest_set_pte;
1011 pv_mmu_ops.set_pte_at = lguest_set_pte_at;
1012 pv_mmu_ops.set_pmd = lguest_set_pmd;
1013 pv_mmu_ops.read_cr2 = lguest_read_cr2;
1014 pv_mmu_ops.read_cr3 = lguest_read_cr3;
1015 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
1016 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
1017
1018#ifdef CONFIG_X86_LOCAL_APIC
1019 /* apic read/write intercepts */
1020 pv_apic_ops.apic_write = lguest_apic_write;
1021 pv_apic_ops.apic_write_atomic = lguest_apic_write;
1022 pv_apic_ops.apic_read = lguest_apic_read;
1023#endif
1024
1025 /* time operations */
1026 pv_time_ops.get_wallclock = lguest_get_wallclock;
1027 pv_time_ops.time_init = lguest_time_init;
1028
1029 /* Now is a good time to look at the implementations of these functions
1030 * before returning to the rest of lguest_init(). */
1031
1032 /*G:070 Now we've seen all the paravirt_ops, we return to
1033 * lguest_init() where the rest of the fairly chaotic boot setup
1034 * occurs.
1035 *
1036 * The Host expects our first hypercall to tell it where our "struct
1037 * lguest_data" is, so we do that first. */
1038 hcall(LHCALL_LGUEST_INIT, __pa(&lguest_data), 0, 0);
1039
1040 /* The native boot code sets up initial page tables immediately after
1041 * the kernel itself, and sets init_pg_tables_end so they're not
1042 * clobbered. The Launcher places our initial pagetables somewhere at
1043 * the top of our physical memory, so we don't need extra space: set
1044 * init_pg_tables_end to the end of the kernel. */
1045 init_pg_tables_end = __pa(pg0);
1046
1047 /* Load the %fs segment register (the per-cpu segment register) with
1048 * the normal data segment to get through booting. */
1049 asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory");
1050
1051 /* Clear the part of the kernel data which is expected to be zero.
1052 * Normally it will be anyway, but if we're loading from a bzImage with
1053 * CONFIG_RELOCATALE=y, the relocations will be sitting here. */
1054 memset(__bss_start, 0, __bss_stop - __bss_start);
1055
1056 /* The Host uses the top of the Guest's virtual address space for the
1057 * Host<->Guest Switcher, and it tells us how much it needs in
1058 * lguest_data.reserve_mem, set up on the LGUEST_INIT hypercall. */
1059 reserve_top_address(lguest_data.reserve_mem);
1060
1061 /* If we don't initialize the lock dependency checker now, it crashes
1062 * paravirt_disable_iospace. */
1063 lockdep_init();
1064
1065 /* The IDE code spends about 3 seconds probing for disks: if we reserve
1066 * all the I/O ports up front it can't get them and so doesn't probe.
1067 * Other device drivers are similar (but less severe). This cuts the
1068 * kernel boot time on my machine from 4.1 seconds to 0.45 seconds. */
1069 paravirt_disable_iospace();
1070
1071 /* This is messy CPU setup stuff which the native boot code does before
1072 * start_kernel, so we have to do, too: */
1073 cpu_detect(&new_cpu_data);
1074 /* head.S usually sets up the first capability word, so do it here. */
1075 new_cpu_data.x86_capability[0] = cpuid_edx(1);
1076
1077 /* Math is always hard! */
1078 new_cpu_data.hard_math = 1;
1079
1080#ifdef CONFIG_X86_MCE
1081 mce_disabled = 1;
1082#endif
1083#ifdef CONFIG_ACPI
1084 acpi_disabled = 1;
1085 acpi_ht = 0;
1086#endif
1087
1088 /* We set the perferred console to "hvc". This is the "hypervisor
1089 * virtual console" driver written by the PowerPC people, which we also
1090 * adapted for lguest's use. */
1091 add_preferred_console("hvc", 0, NULL);
1092
1093 /* Last of all, we set the power management poweroff hook to point to
1094 * the Guest routine to power off. */
1095 pm_power_off = lguest_power_off;
1096
1097 /* Now we're set up, call start_kernel() in init/main.c and we proceed
1098 * to boot as normal. It never returns. */
1099 start_kernel();
1100}
1101/*
1102 * This marks the end of stage II of our journey, The Guest.
1103 *
1104 * It is now time for us to explore the nooks and crannies of the three Guest
1105 * devices and complete our understanding of the Guest in "make Drivers".
1106 */
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S
new file mode 100644
index 000000000000..6d7a74f07c41
--- /dev/null
+++ b/arch/x86/lguest/i386_head.S
@@ -0,0 +1,93 @@
1#include <linux/linkage.h>
2#include <linux/lguest.h>
3#include <asm/asm-offsets.h>
4#include <asm/thread_info.h>
5#include <asm/processor-flags.h>
6
7/*G:020 This is where we begin: we have a magic signature which the launcher
8 * looks for. The plan is that the Linux boot protocol will be extended with a
9 * "platform type" field which will guide us here from the normal entry point,
10 * but for the moment this suffices. The normal boot code uses %esi for the
11 * boot header, so we do too. We convert it to a virtual address by adding
12 * PAGE_OFFSET, and hand it to lguest_init() as its argument (ie. %eax).
13 *
14 * The .section line puts this code in .init.text so it will be discarded after
15 * boot. */
16.section .init.text, "ax", @progbits
17.ascii "GenuineLguest"
18 /* Set up initial stack. */
19 movl $(init_thread_union+THREAD_SIZE),%esp
20 movl %esi, %eax
21 addl $__PAGE_OFFSET, %eax
22 jmp lguest_init
23
24/*G:055 We create a macro which puts the assembler code between lgstart_ and
25 * lgend_ markers. These templates are put in the .text section: they can't be
26 * discarded after boot as we may need to patch modules, too. */
27.text
28#define LGUEST_PATCH(name, insns...) \
29 lgstart_##name: insns; lgend_##name:; \
30 .globl lgstart_##name; .globl lgend_##name
31
32LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled)
33LGUEST_PATCH(sti, movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled)
34LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled)
35LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
36/*:*/
37
38/* These demark the EIP range where host should never deliver interrupts. */
39.global lguest_noirq_start
40.global lguest_noirq_end
41
42/*M:004 When the Host reflects a trap or injects an interrupt into the Guest,
43 * it sets the eflags interrupt bit on the stack based on
44 * lguest_data.irq_enabled, so the Guest iret logic does the right thing when
45 * restoring it. However, when the Host sets the Guest up for direct traps,
46 * such as system calls, the processor is the one to push eflags onto the
47 * stack, and the interrupt bit will be 1 (in reality, interrupts are always
48 * enabled in the Guest).
49 *
50 * This turns out to be harmless: the only trap which should happen under Linux
51 * with interrupts disabled is Page Fault (due to our lazy mapping of vmalloc
52 * regions), which has to be reflected through the Host anyway. If another
53 * trap *does* go off when interrupts are disabled, the Guest will panic, and
54 * we'll never get to this iret! :*/
55
56/*G:045 There is one final paravirt_op that the Guest implements, and glancing
57 * at it you can see why I left it to last. It's *cool*! It's in *assembler*!
58 *
59 * The "iret" instruction is used to return from an interrupt or trap. The
60 * stack looks like this:
61 * old address
62 * old code segment & privilege level
63 * old processor flags ("eflags")
64 *
65 * The "iret" instruction pops those values off the stack and restores them all
66 * at once. The only problem is that eflags includes the Interrupt Flag which
67 * the Guest can't change: the CPU will simply ignore it when we do an "iret".
68 * So we have to copy eflags from the stack to lguest_data.irq_enabled before
69 * we do the "iret".
70 *
71 * There are two problems with this: firstly, we need to use a register to do
72 * the copy and secondly, the whole thing needs to be atomic. The first
73 * problem is easy to solve: push %eax on the stack so we can use it, and then
74 * restore it at the end just before the real "iret".
75 *
76 * The second is harder: copying eflags to lguest_data.irq_enabled will turn
77 * interrupts on before we're finished, so we could be interrupted before we
78 * return to userspace or wherever. Our solution to this is to surround the
79 * code with lguest_noirq_start: and lguest_noirq_end: labels. We tell the
80 * Host that it is *never* to interrupt us there, even if interrupts seem to be
81 * enabled. */
82ENTRY(lguest_iret)
83 pushl %eax
84 movl 12(%esp), %eax
85lguest_noirq_start:
86 /* Note the %ss: segment prefix here. Normal data accesses use the
87 * "ds" segment, but that will have already been restored for whatever
88 * we're returning to (such as userspace): we can't trust it. The %ss:
89 * prefix makes sure we use the stack segment, which is still valid. */
90 movl %eax,%ss:lguest_data+LGUEST_DATA_irq_enabled
91 popl %eax
92 iret
93lguest_noirq_end: