aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lguest/lguest.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/lguest/lguest.c')
-rw-r--r--drivers/lguest/lguest.c535
1 files changed, 501 insertions, 34 deletions
diff --git a/drivers/lguest/lguest.c b/drivers/lguest/lguest.c
index 18dade06d4a9..1bc1546c7fd0 100644
--- a/drivers/lguest/lguest.c
+++ b/drivers/lguest/lguest.c
@@ -1,6 +1,32 @@
1/* 1/*P:010
2 * Lguest specific paravirt-ops implementation 2 * A hypervisor allows multiple Operating Systems to run on a single machine.
3 * To quote David Wheeler: "Any problem in computer science can be solved with
4 * another layer of indirection."
5 *
6 * We keep things simple in two ways. First, we start with a normal Linux
7 * kernel and insert a module (lg.ko) which allows us to run other Linux
8 * kernels the same way we'd run processes. We call the first kernel the Host,
9 * and the others the Guests. The program which sets up and configures Guests
10 * (such as the example in Documentation/lguest/lguest.c) is called the
11 * Launcher.
12 *
13 * Secondly, we only run specially modified Guests, not normal kernels. When
14 * you set CONFIG_LGUEST to 'y' or 'm', this automatically sets
15 * CONFIG_LGUEST_GUEST=y, which compiles this file into the kernel so it knows
16 * how to be a Guest. This means that you can use the same kernel you boot
17 * normally (ie. as a Host) as a Guest.
3 * 18 *
19 * These Guests know that they cannot do privileged operations, such as disable
20 * interrupts, and that they have to ask the Host to do such things explicitly.
21 * This file consists of all the replacements for such low-level native
22 * hardware operations: these special Guest versions call the Host.
23 *
24 * So how does the kernel know it's a Guest? The Guest starts at a special
25 * entry point marked with a magic string, which sets up a few things then
26 * calls here. We replace the native functions in "struct paravirt_ops"
27 * with our Guest versions, then boot like normal. :*/
28
29/*
4 * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation. 30 * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
5 * 31 *
6 * This program is free software; you can redistribute it and/or modify 32 * This program is free software; you can redistribute it and/or modify
@@ -40,6 +66,12 @@
40#include <asm/mce.h> 66#include <asm/mce.h>
41#include <asm/io.h> 67#include <asm/io.h>
42 68
69/*G:010 Welcome to the Guest!
70 *
71 * The Guest in our tale is a simple creature: identical to the Host but
72 * behaving in simplified but equivalent ways. In particular, the Guest is the
73 * same kernel as the Host (or at least, built from the same source code). :*/
74
43/* Declarations for definitions in lguest_guest.S */ 75/* Declarations for definitions in lguest_guest.S */
44extern char lguest_noirq_start[], lguest_noirq_end[]; 76extern char lguest_noirq_start[], lguest_noirq_end[];
45extern const char lgstart_cli[], lgend_cli[]; 77extern const char lgstart_cli[], lgend_cli[];
@@ -58,7 +90,26 @@ struct lguest_data lguest_data = {
58struct lguest_device_desc *lguest_devices; 90struct lguest_device_desc *lguest_devices;
59static cycle_t clock_base; 91static cycle_t clock_base;
60 92
61static enum paravirt_lazy_mode lazy_mode; 93/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first
94 * real optimization trick!
95 *
96 * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
97 * them as a batch when lazy_mode is eventually turned off. Because hypercalls
98 * are reasonably expensive, batching them up makes sense. For example, a
99 * large mmap might update dozens of page table entries: that code calls
100 * lguest_lazy_mode(PARAVIRT_LAZY_MMU), does the dozen updates, then calls
101 * lguest_lazy_mode(PARAVIRT_LAZY_NONE).
102 *
103 * So, when we're in lazy mode, we call async_hypercall() to store the call for
104 * future processing. When lazy mode is turned off we issue a hypercall to
105 * flush the stored calls.
106 *
107 * There's also a hack where "mode" is set to "PARAVIRT_LAZY_FLUSH" which
108 * indicates we're to flush any outstanding calls immediately. This is used
109 * when an interrupt handler does a kmap_atomic(): the page table changes must
110 * happen immediately even if we're in the middle of a batch. Usually we're
111 * not, though, so there's nothing to do. */
112static enum paravirt_lazy_mode lazy_mode; /* Note: not SMP-safe! */
62static void lguest_lazy_mode(enum paravirt_lazy_mode mode) 113static void lguest_lazy_mode(enum paravirt_lazy_mode mode)
63{ 114{
64 if (mode == PARAVIRT_LAZY_FLUSH) { 115 if (mode == PARAVIRT_LAZY_FLUSH) {
@@ -82,6 +133,16 @@ static void lazy_hcall(unsigned long call,
82 async_hcall(call, arg1, arg2, arg3); 133 async_hcall(call, arg1, arg2, arg3);
83} 134}
84 135
136/* async_hcall() is pretty simple: I'm quite proud of it really. We have a
137 * ring buffer of stored hypercalls which the Host will run though next time we
138 * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall
139 * arguments, and a "hcall_status" word which is 0 if the call is ready to go,
140 * and 255 once the Host has finished with it.
141 *
142 * If we come around to a slot which hasn't been finished, then the table is
143 * full and we just make the hypercall directly. This has the nice side
144 * effect of causing the Host to run all the stored calls in the ring buffer
145 * which empties it for next time! */
85void async_hcall(unsigned long call, 146void async_hcall(unsigned long call,
86 unsigned long arg1, unsigned long arg2, unsigned long arg3) 147 unsigned long arg1, unsigned long arg2, unsigned long arg3)
87{ 148{
@@ -89,6 +150,9 @@ void async_hcall(unsigned long call,
89 static unsigned int next_call; 150 static unsigned int next_call;
90 unsigned long flags; 151 unsigned long flags;
91 152
153 /* Disable interrupts if not already disabled: we don't want an
154 * interrupt handler making a hypercall while we're already doing
155 * one! */
92 local_irq_save(flags); 156 local_irq_save(flags);
93 if (lguest_data.hcall_status[next_call] != 0xFF) { 157 if (lguest_data.hcall_status[next_call] != 0xFF) {
94 /* Table full, so do normal hcall which will flush table. */ 158 /* Table full, so do normal hcall which will flush table. */
@@ -98,7 +162,7 @@ void async_hcall(unsigned long call,
98 lguest_data.hcalls[next_call].edx = arg1; 162 lguest_data.hcalls[next_call].edx = arg1;
99 lguest_data.hcalls[next_call].ebx = arg2; 163 lguest_data.hcalls[next_call].ebx = arg2;
100 lguest_data.hcalls[next_call].ecx = arg3; 164 lguest_data.hcalls[next_call].ecx = arg3;
101 /* Make sure host sees arguments before "valid" flag. */ 165 /* Arguments must all be written before we mark it to go */
102 wmb(); 166 wmb();
103 lguest_data.hcall_status[next_call] = 0; 167 lguest_data.hcall_status[next_call] = 0;
104 if (++next_call == LHCALL_RING_SIZE) 168 if (++next_call == LHCALL_RING_SIZE)
@@ -106,9 +170,14 @@ void async_hcall(unsigned long call,
106 } 170 }
107 local_irq_restore(flags); 171 local_irq_restore(flags);
108} 172}
173/*:*/
109 174
175/* Wrappers for the SEND_DMA and BIND_DMA hypercalls. This is mainly because
176 * Jeff Garzik complained that __pa() should never appear in drivers, and this
177 * helps remove most of them. But also, it wraps some ugliness. */
110void lguest_send_dma(unsigned long key, struct lguest_dma *dma) 178void lguest_send_dma(unsigned long key, struct lguest_dma *dma)
111{ 179{
180 /* The hcall might not write this if something goes wrong */
112 dma->used_len = 0; 181 dma->used_len = 0;
113 hcall(LHCALL_SEND_DMA, key, __pa(dma), 0); 182 hcall(LHCALL_SEND_DMA, key, __pa(dma), 0);
114} 183}
@@ -116,11 +185,16 @@ void lguest_send_dma(unsigned long key, struct lguest_dma *dma)
116int lguest_bind_dma(unsigned long key, struct lguest_dma *dmas, 185int lguest_bind_dma(unsigned long key, struct lguest_dma *dmas,
117 unsigned int num, u8 irq) 186 unsigned int num, u8 irq)
118{ 187{
188 /* This is the only hypercall which actually wants 5 arguments, and we
189 * only support 4. Fortunately the interrupt number is always less
190 * than 256, so we can pack it with the number of dmas in the final
191 * argument. */
119 if (!hcall(LHCALL_BIND_DMA, key, __pa(dmas), (num << 8) | irq)) 192 if (!hcall(LHCALL_BIND_DMA, key, __pa(dmas), (num << 8) | irq))
120 return -ENOMEM; 193 return -ENOMEM;
121 return 0; 194 return 0;
122} 195}
123 196
197/* Unbinding is the same hypercall as binding, but with 0 num & irq. */
124void lguest_unbind_dma(unsigned long key, struct lguest_dma *dmas) 198void lguest_unbind_dma(unsigned long key, struct lguest_dma *dmas)
125{ 199{
126 hcall(LHCALL_BIND_DMA, key, __pa(dmas), 0); 200 hcall(LHCALL_BIND_DMA, key, __pa(dmas), 0);
@@ -138,35 +212,73 @@ void lguest_unmap(void *addr)
138 iounmap((__force void __iomem *)addr); 212 iounmap((__force void __iomem *)addr);
139} 213}
140 214
215/*G:033
216 * Here are our first native-instruction replacements: four functions for
217 * interrupt control.
218 *
219 * The simplest way of implementing these would be to have "turn interrupts
220 * off" and "turn interrupts on" hypercalls. Unfortunately, this is too slow:
221 * these are by far the most commonly called functions of those we override.
222 *
223 * So instead we keep an "irq_enabled" field inside our "struct lguest_data",
224 * which the Guest can update with a single instruction. The Host knows to
225 * check there when it wants to deliver an interrupt.
226 */
227
228/* save_flags() is expected to return the processor state (ie. "eflags"). The
229 * eflags word contains all kind of stuff, but in practice Linux only cares
230 * about the interrupt flag. Our "save_flags()" just returns that. */
141static unsigned long save_fl(void) 231static unsigned long save_fl(void)
142{ 232{
143 return lguest_data.irq_enabled; 233 return lguest_data.irq_enabled;
144} 234}
145 235
236/* "restore_flags" just sets the flags back to the value given. */
146static void restore_fl(unsigned long flags) 237static void restore_fl(unsigned long flags)
147{ 238{
148 /* FIXME: Check if interrupt pending... */
149 lguest_data.irq_enabled = flags; 239 lguest_data.irq_enabled = flags;
150} 240}
151 241
242/* Interrupts go off... */
152static void irq_disable(void) 243static void irq_disable(void)
153{ 244{
154 lguest_data.irq_enabled = 0; 245 lguest_data.irq_enabled = 0;
155} 246}
156 247
248/* Interrupts go on... */
157static void irq_enable(void) 249static void irq_enable(void)
158{ 250{
159 /* FIXME: Check if interrupt pending... */
160 lguest_data.irq_enabled = X86_EFLAGS_IF; 251 lguest_data.irq_enabled = X86_EFLAGS_IF;
161} 252}
162 253/*:*/
254/*M:003 Note that we don't check for outstanding interrupts when we re-enable
255 * them (or when we unmask an interrupt). This seems to work for the moment,
256 * since interrupts are rare and we'll just get the interrupt on the next timer
257 * tick, but when we turn on CONFIG_NO_HZ, we should revisit this. One way
258 * would be to put the "irq_enabled" field in a page by itself, and have the
259 * Host write-protect it when an interrupt comes in when irqs are disabled.
260 * There will then be a page fault as soon as interrupts are re-enabled. :*/
261
262/*G:034
263 * The Interrupt Descriptor Table (IDT).
264 *
265 * The IDT tells the processor what to do when an interrupt comes in. Each
266 * entry in the table is a 64-bit descriptor: this holds the privilege level,
267 * address of the handler, and... well, who cares? The Guest just asks the
268 * Host to make the change anyway, because the Host controls the real IDT.
269 */
163static void lguest_write_idt_entry(struct desc_struct *dt, 270static void lguest_write_idt_entry(struct desc_struct *dt,
164 int entrynum, u32 low, u32 high) 271 int entrynum, u32 low, u32 high)
165{ 272{
273 /* Keep the local copy up to date. */
166 write_dt_entry(dt, entrynum, low, high); 274 write_dt_entry(dt, entrynum, low, high);
275 /* Tell Host about this new entry. */
167 hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, low, high); 276 hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, low, high);
168} 277}
169 278
279/* Changing to a different IDT is very rare: we keep the IDT up-to-date every
280 * time it is written, so we can simply loop through all entries and tell the
281 * Host about them. */
170static void lguest_load_idt(const struct Xgt_desc_struct *desc) 282static void lguest_load_idt(const struct Xgt_desc_struct *desc)
171{ 283{
172 unsigned int i; 284 unsigned int i;
@@ -176,12 +288,29 @@ static void lguest_load_idt(const struct Xgt_desc_struct *desc)
176 hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b); 288 hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b);
177} 289}
178 290
291/*
292 * The Global Descriptor Table.
293 *
294 * The Intel architecture defines another table, called the Global Descriptor
295 * Table (GDT). You tell the CPU where it is (and its size) using the "lgdt"
296 * instruction, and then several other instructions refer to entries in the
297 * table. There are three entries which the Switcher needs, so the Host simply
298 * controls the entire thing and the Guest asks it to make changes using the
299 * LOAD_GDT hypercall.
300 *
301 * This is the opposite of the IDT code where we have a LOAD_IDT_ENTRY
302 * hypercall and use that repeatedly to load a new IDT. I don't think it
303 * really matters, but wouldn't it be nice if they were the same?
304 */
179static void lguest_load_gdt(const struct Xgt_desc_struct *desc) 305static void lguest_load_gdt(const struct Xgt_desc_struct *desc)
180{ 306{
181 BUG_ON((desc->size+1)/8 != GDT_ENTRIES); 307 BUG_ON((desc->size+1)/8 != GDT_ENTRIES);
182 hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0); 308 hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0);
183} 309}
184 310
311/* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
312 * then tell the Host to reload the entire thing. This operation is so rare
313 * that this naive implementation is reasonable. */
185static void lguest_write_gdt_entry(struct desc_struct *dt, 314static void lguest_write_gdt_entry(struct desc_struct *dt,
186 int entrynum, u32 low, u32 high) 315 int entrynum, u32 low, u32 high)
187{ 316{
@@ -189,19 +318,58 @@ static void lguest_write_gdt_entry(struct desc_struct *dt,
189 hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0); 318 hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0);
190} 319}
191 320
321/* OK, I lied. There are three "thread local storage" GDT entries which change
322 * on every context switch (these three entries are how glibc implements
323 * __thread variables). So we have a hypercall specifically for this case. */
192static void lguest_load_tls(struct thread_struct *t, unsigned int cpu) 324static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
193{ 325{
194 lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0); 326 lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0);
195} 327}
328/*:*/
196 329
330/*G:038 That's enough excitement for now, back to ploughing through each of
331 * the paravirt_ops (we're about 1/3 of the way through).
332 *
333 * This is the Local Descriptor Table, another weird Intel thingy. Linux only
334 * uses this for some strange applications like Wine. We don't do anything
335 * here, so they'll get an informative and friendly Segmentation Fault. */
197static void lguest_set_ldt(const void *addr, unsigned entries) 336static void lguest_set_ldt(const void *addr, unsigned entries)
198{ 337{
199} 338}
200 339
340/* This loads a GDT entry into the "Task Register": that entry points to a
341 * structure called the Task State Segment. Some comments scattered though the
342 * kernel code indicate that this used for task switching in ages past, along
343 * with blood sacrifice and astrology.
344 *
345 * Now there's nothing interesting in here that we don't get told elsewhere.
346 * But the native version uses the "ltr" instruction, which makes the Host
347 * complain to the Guest about a Segmentation Fault and it'll oops. So we
348 * override the native version with a do-nothing version. */
201static void lguest_load_tr_desc(void) 349static void lguest_load_tr_desc(void)
202{ 350{
203} 351}
204 352
353/* The "cpuid" instruction is a way of querying both the CPU identity
354 * (manufacturer, model, etc) and its features. It was introduced before the
355 * Pentium in 1993 and keeps getting extended by both Intel and AMD. As you
356 * might imagine, after a decade and a half this treatment, it is now a giant
357 * ball of hair. Its entry in the current Intel manual runs to 28 pages.
358 *
359 * This instruction even it has its own Wikipedia entry. The Wikipedia entry
360 * has been translated into 4 languages. I am not making this up!
361 *
362 * We could get funky here and identify ourselves as "GenuineLguest", but
363 * instead we just use the real "cpuid" instruction. Then I pretty much turned
364 * off feature bits until the Guest booted. (Don't say that: you'll damage
365 * lguest sales!) Shut up, inner voice! (Hey, just pointing out that this is
366 * hardly future proof.) Noone's listening! They don't like you anyway,
367 * parenthetic weirdo!
368 *
369 * Replacing the cpuid so we can turn features off is great for the kernel, but
370 * anyone (including userspace) can just use the raw "cpuid" instruction and
371 * the Host won't even notice since it isn't privileged. So we try not to get
372 * too worked up about it. */
205static void lguest_cpuid(unsigned int *eax, unsigned int *ebx, 373static void lguest_cpuid(unsigned int *eax, unsigned int *ebx,
206 unsigned int *ecx, unsigned int *edx) 374 unsigned int *ecx, unsigned int *edx)
207{ 375{
@@ -214,21 +382,43 @@ static void lguest_cpuid(unsigned int *eax, unsigned int *ebx,
214 *ecx &= 0x00002201; 382 *ecx &= 0x00002201;
215 /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, FPU. */ 383 /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, FPU. */
216 *edx &= 0x07808101; 384 *edx &= 0x07808101;
217 /* Host wants to know when we flush kernel pages: set PGE. */ 385 /* The Host can do a nice optimization if it knows that the
386 * kernel mappings (addresses above 0xC0000000 or whatever
387 * PAGE_OFFSET is set to) haven't changed. But Linux calls
388 * flush_tlb_user() for both user and kernel mappings unless
389 * the Page Global Enable (PGE) feature bit is set. */
218 *edx |= 0x00002000; 390 *edx |= 0x00002000;
219 break; 391 break;
220 case 0x80000000: 392 case 0x80000000:
221 /* Futureproof this a little: if they ask how much extended 393 /* Futureproof this a little: if they ask how much extended
222 * processor information, limit it to known fields. */ 394 * processor information there is, limit it to known fields. */
223 if (*eax > 0x80000008) 395 if (*eax > 0x80000008)
224 *eax = 0x80000008; 396 *eax = 0x80000008;
225 break; 397 break;
226 } 398 }
227} 399}
228 400
401/* Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4.
402 * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother
403 * it. The Host needs to know when the Guest wants to change them, so we have
404 * a whole series of functions like read_cr0() and write_cr0().
405 *
406 * We start with CR0. CR0 allows you to turn on and off all kinds of basic
407 * features, but Linux only really cares about one: the horrifically-named Task
408 * Switched (TS) bit at bit 3 (ie. 8)
409 *
410 * What does the TS bit do? Well, it causes the CPU to trap (interrupt 7) if
411 * the floating point unit is used. Which allows us to restore FPU state
412 * lazily after a task switch, and Linux uses that gratefully, but wouldn't a
413 * name like "FPUTRAP bit" be a little less cryptic?
414 *
415 * We store cr0 (and cr3) locally, because the Host never changes it. The
416 * Guest sometimes wants to read it and we'd prefer not to bother the Host
417 * unnecessarily. */
229static unsigned long current_cr0, current_cr3; 418static unsigned long current_cr0, current_cr3;
230static void lguest_write_cr0(unsigned long val) 419static void lguest_write_cr0(unsigned long val)
231{ 420{
421 /* 8 == TS bit. */
232 lazy_hcall(LHCALL_TS, val & 8, 0, 0); 422 lazy_hcall(LHCALL_TS, val & 8, 0, 0);
233 current_cr0 = val; 423 current_cr0 = val;
234} 424}
@@ -238,17 +428,25 @@ static unsigned long lguest_read_cr0(void)
238 return current_cr0; 428 return current_cr0;
239} 429}
240 430
431/* Intel provided a special instruction to clear the TS bit for people too cool
432 * to use write_cr0() to do it. This "clts" instruction is faster, because all
433 * the vowels have been optimized out. */
241static void lguest_clts(void) 434static void lguest_clts(void)
242{ 435{
243 lazy_hcall(LHCALL_TS, 0, 0, 0); 436 lazy_hcall(LHCALL_TS, 0, 0, 0);
244 current_cr0 &= ~8U; 437 current_cr0 &= ~8U;
245} 438}
246 439
440/* CR2 is the virtual address of the last page fault, which the Guest only ever
441 * reads. The Host kindly writes this into our "struct lguest_data", so we
442 * just read it out of there. */
247static unsigned long lguest_read_cr2(void) 443static unsigned long lguest_read_cr2(void)
248{ 444{
249 return lguest_data.cr2; 445 return lguest_data.cr2;
250} 446}
251 447
448/* CR3 is the current toplevel pagetable page: the principle is the same as
449 * cr0. Keep a local copy, and tell the Host when it changes. */
252static void lguest_write_cr3(unsigned long cr3) 450static void lguest_write_cr3(unsigned long cr3)
253{ 451{
254 lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0); 452 lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0);
@@ -260,7 +458,7 @@ static unsigned long lguest_read_cr3(void)
260 return current_cr3; 458 return current_cr3;
261} 459}
262 460
263/* Used to enable/disable PGE, but we don't care. */ 461/* CR4 is used to enable and disable PGE, but we don't care. */
264static unsigned long lguest_read_cr4(void) 462static unsigned long lguest_read_cr4(void)
265{ 463{
266 return 0; 464 return 0;
@@ -270,6 +468,59 @@ static void lguest_write_cr4(unsigned long val)
270{ 468{
271} 469}
272 470
471/*
472 * Page Table Handling.
473 *
474 * Now would be a good time to take a rest and grab a coffee or similarly
475 * relaxing stimulant. The easy parts are behind us, and the trek gradually
476 * winds uphill from here.
477 *
478 * Quick refresher: memory is divided into "pages" of 4096 bytes each. The CPU
479 * maps virtual addresses to physical addresses using "page tables". We could
480 * use one huge index of 1 million entries: each address is 4 bytes, so that's
481 * 1024 pages just to hold the page tables. But since most virtual addresses
482 * are unused, we use a two level index which saves space. The CR3 register
483 * contains the physical address of the top level "page directory" page, which
484 * contains physical addresses of up to 1024 second-level pages. Each of these
485 * second level pages contains up to 1024 physical addresses of actual pages,
486 * or Page Table Entries (PTEs).
487 *
488 * Here's a diagram, where arrows indicate physical addresses:
489 *
490 * CR3 ---> +---------+
491 * | --------->+---------+
492 * | | | PADDR1 |
493 * Top-level | | PADDR2 |
494 * (PMD) page | | |
495 * | | Lower-level |
496 * | | (PTE) page |
497 * | | | |
498 * .... ....
499 *
500 * So to convert a virtual address to a physical address, we look up the top
501 * level, which points us to the second level, which gives us the physical
502 * address of that page. If the top level entry was not present, or the second
503 * level entry was not present, then the virtual address is invalid (we
504 * say "the page was not mapped").
505 *
506 * Put another way, a 32-bit virtual address is divided up like so:
507 *
508 * 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
509 * |<---- 10 bits ---->|<---- 10 bits ---->|<------ 12 bits ------>|
510 * Index into top Index into second Offset within page
511 * page directory page pagetable page
512 *
513 * The kernel spends a lot of time changing both the top-level page directory
514 * and lower-level pagetable pages. The Guest doesn't know physical addresses,
515 * so while it maintains these page tables exactly like normal, it also needs
516 * to keep the Host informed whenever it makes a change: the Host will create
517 * the real page tables based on the Guests'.
518 */
519
520/* The Guest calls this to set a second-level entry (pte), ie. to map a page
521 * into a process' address space. We set the entry then tell the Host the
522 * toplevel and address this corresponds to. The Guest uses one pagetable per
523 * process, so we need to tell the Host which one we're changing (mm->pgd). */
273static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, 524static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
274 pte_t *ptep, pte_t pteval) 525 pte_t *ptep, pte_t pteval)
275{ 526{
@@ -277,7 +528,9 @@ static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
277 lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pteval.pte_low); 528 lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pteval.pte_low);
278} 529}
279 530
280/* We only support two-level pagetables at the moment. */ 531/* The Guest calls this to set a top-level entry. Again, we set the entry then
532 * tell the Host which top-level page we changed, and the index of the entry we
533 * changed. */
281static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) 534static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
282{ 535{
283 *pmdp = pmdval; 536 *pmdp = pmdval;
@@ -285,7 +538,15 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
285 (__pa(pmdp)&(PAGE_SIZE-1))/4, 0); 538 (__pa(pmdp)&(PAGE_SIZE-1))/4, 0);
286} 539}
287 540
288/* FIXME: Eliminate all callers of this. */ 541/* There are a couple of legacy places where the kernel sets a PTE, but we
542 * don't know the top level any more. This is useless for us, since we don't
543 * know which pagetable is changing or what address, so we just tell the Host
544 * to forget all of them. Fortunately, this is very rare.
545 *
546 * ... except in early boot when the kernel sets up the initial pagetables,
547 * which makes booting astonishingly slow. So we don't even tell the Host
548 * anything changed until we've done the first page table switch.
549 */
289static void lguest_set_pte(pte_t *ptep, pte_t pteval) 550static void lguest_set_pte(pte_t *ptep, pte_t pteval)
290{ 551{
291 *ptep = pteval; 552 *ptep = pteval;
@@ -294,22 +555,51 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval)
294 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); 555 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
295} 556}
296 557
558/* Unfortunately for Lguest, the paravirt_ops for page tables were based on
559 * native page table operations. On native hardware you can set a new page
560 * table entry whenever you want, but if you want to remove one you have to do
561 * a TLB flush (a TLB is a little cache of page table entries kept by the CPU).
562 *
563 * So the lguest_set_pte_at() and lguest_set_pmd() functions above are only
564 * called when a valid entry is written, not when it's removed (ie. marked not
565 * present). Instead, this is where we come when the Guest wants to remove a
566 * page table entry: we tell the Host to set that entry to 0 (ie. the present
567 * bit is zero). */
297static void lguest_flush_tlb_single(unsigned long addr) 568static void lguest_flush_tlb_single(unsigned long addr)
298{ 569{
299 /* Simply set it to zero, and it will fault back in. */ 570 /* Simply set it to zero: if it was not, it will fault back in. */
300 lazy_hcall(LHCALL_SET_PTE, current_cr3, addr, 0); 571 lazy_hcall(LHCALL_SET_PTE, current_cr3, addr, 0);
301} 572}
302 573
574/* This is what happens after the Guest has removed a large number of entries.
575 * This tells the Host that any of the page table entries for userspace might
576 * have changed, ie. virtual addresses below PAGE_OFFSET. */
303static void lguest_flush_tlb_user(void) 577static void lguest_flush_tlb_user(void)
304{ 578{
305 lazy_hcall(LHCALL_FLUSH_TLB, 0, 0, 0); 579 lazy_hcall(LHCALL_FLUSH_TLB, 0, 0, 0);
306} 580}
307 581
582/* This is called when the kernel page tables have changed. That's not very
583 * common (unless the Guest is using highmem, which makes the Guest extremely
584 * slow), so it's worth separating this from the user flushing above. */
308static void lguest_flush_tlb_kernel(void) 585static void lguest_flush_tlb_kernel(void)
309{ 586{
310 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); 587 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
311} 588}
312 589
590/*
591 * The Unadvanced Programmable Interrupt Controller.
592 *
593 * This is an attempt to implement the simplest possible interrupt controller.
594 * I spent some time looking though routines like set_irq_chip_and_handler,
595 * set_irq_chip_and_handler_name, set_irq_chip_data and set_phasers_to_stun and
596 * I *think* this is as simple as it gets.
597 *
598 * We can tell the Host what interrupts we want blocked ready for using the
599 * lguest_data.interrupts bitmap, so disabling (aka "masking") them is as
600 * simple as setting a bit. We don't actually "ack" interrupts as such, we
601 * just mask and unmask them. I wonder if we should be cleverer?
602 */
313static void disable_lguest_irq(unsigned int irq) 603static void disable_lguest_irq(unsigned int irq)
314{ 604{
315 set_bit(irq, lguest_data.blocked_interrupts); 605 set_bit(irq, lguest_data.blocked_interrupts);
@@ -318,9 +608,9 @@ static void disable_lguest_irq(unsigned int irq)
318static void enable_lguest_irq(unsigned int irq) 608static void enable_lguest_irq(unsigned int irq)
319{ 609{
320 clear_bit(irq, lguest_data.blocked_interrupts); 610 clear_bit(irq, lguest_data.blocked_interrupts);
321 /* FIXME: If it's pending? */
322} 611}
323 612
613/* This structure describes the lguest IRQ controller. */
324static struct irq_chip lguest_irq_controller = { 614static struct irq_chip lguest_irq_controller = {
325 .name = "lguest", 615 .name = "lguest",
326 .mask = disable_lguest_irq, 616 .mask = disable_lguest_irq,
@@ -328,6 +618,10 @@ static struct irq_chip lguest_irq_controller = {
328 .unmask = enable_lguest_irq, 618 .unmask = enable_lguest_irq,
329}; 619};
330 620
621/* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
622 * interrupt (except 128, which is used for system calls), and then tells the
623 * Linux infrastructure that each interrupt is controlled by our level-based
624 * lguest interrupt controller. */
331static void __init lguest_init_IRQ(void) 625static void __init lguest_init_IRQ(void)
332{ 626{
333 unsigned int i; 627 unsigned int i;
@@ -340,20 +634,51 @@ static void __init lguest_init_IRQ(void)
340 handle_level_irq); 634 handle_level_irq);
341 } 635 }
342 } 636 }
637 /* This call is required to set up for 4k stacks, where we have
638 * separate stacks for hard and soft interrupts. */
343 irq_ctx_init(smp_processor_id()); 639 irq_ctx_init(smp_processor_id());
344} 640}
345 641
642/*
643 * Time.
644 *
645 * It would be far better for everyone if the Guest had its own clock, but
646 * until then the Host gives us the time on every interrupt.
647 */
346static unsigned long lguest_get_wallclock(void) 648static unsigned long lguest_get_wallclock(void)
347{ 649{
348 return hcall(LHCALL_GET_WALLCLOCK, 0, 0, 0); 650 return lguest_data.time.tv_sec;
349} 651}
350 652
351static cycle_t lguest_clock_read(void) 653static cycle_t lguest_clock_read(void)
352{ 654{
655 unsigned long sec, nsec;
656
657 /* If the Host tells the TSC speed, we can trust that. */
353 if (lguest_data.tsc_khz) 658 if (lguest_data.tsc_khz)
354 return native_read_tsc(); 659 return native_read_tsc();
355 else 660
356 return jiffies; 661 /* If we can't use the TSC, we read the time value written by the Host.
662 * Since it's in two parts (seconds and nanoseconds), we risk reading
663 * it just as it's changing from 99 & 0.999999999 to 100 and 0, and
664 * getting 99 and 0. As Linux tends to come apart under the stress of
665 * time travel, we must be careful: */
666 do {
667 /* First we read the seconds part. */
668 sec = lguest_data.time.tv_sec;
669 /* This read memory barrier tells the compiler and the CPU that
670 * this can't be reordered: we have to complete the above
671 * before going on. */
672 rmb();
673 /* Now we read the nanoseconds part. */
674 nsec = lguest_data.time.tv_nsec;
675 /* Make sure we've done that. */
676 rmb();
677 /* Now if the seconds part has changed, try again. */
678 } while (unlikely(lguest_data.time.tv_sec != sec));
679
680 /* Our non-TSC clock is in real nanoseconds. */
681 return sec*1000000000ULL + nsec;
357} 682}
358 683
359/* This is what we tell the kernel is our clocksource. */ 684/* This is what we tell the kernel is our clocksource. */
@@ -361,8 +686,11 @@ static struct clocksource lguest_clock = {
361 .name = "lguest", 686 .name = "lguest",
362 .rating = 400, 687 .rating = 400,
363 .read = lguest_clock_read, 688 .read = lguest_clock_read,
689 .mask = CLOCKSOURCE_MASK(64),
690 .mult = 1,
364}; 691};
365 692
693/* The "scheduler clock" is just our real clock, adjusted to start at zero */
366static unsigned long long lguest_sched_clock(void) 694static unsigned long long lguest_sched_clock(void)
367{ 695{
368 return cyc2ns(&lguest_clock, lguest_clock_read() - clock_base); 696 return cyc2ns(&lguest_clock, lguest_clock_read() - clock_base);
@@ -428,34 +756,55 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
428 local_irq_restore(flags); 756 local_irq_restore(flags);
429} 757}
430 758
759/* At some point in the boot process, we get asked to set up our timing
760 * infrastructure. The kernel doesn't expect timer interrupts before this, but
761 * we cleverly initialized the "blocked_interrupts" field of "struct
762 * lguest_data" so that timer interrupts were blocked until now. */
431static void lguest_time_init(void) 763static void lguest_time_init(void)
432{ 764{
765 /* Set up the timer interrupt (0) to go to our simple timer routine */
433 set_irq_handler(0, lguest_time_irq); 766 set_irq_handler(0, lguest_time_irq);
434 767
435 /* We use the TSC if the Host tells us we can, otherwise a dumb 768 /* Our clock structure look like arch/i386/kernel/tsc.c if we can use
436 * jiffies-based clock. */ 769 * the TSC, otherwise it's a dumb nanosecond-resolution clock. Either
770 * way, the "rating" is initialized so high that it's always chosen
771 * over any other clocksource. */
437 if (lguest_data.tsc_khz) { 772 if (lguest_data.tsc_khz) {
438 lguest_clock.shift = 22; 773 lguest_clock.shift = 22;
439 lguest_clock.mult = clocksource_khz2mult(lguest_data.tsc_khz, 774 lguest_clock.mult = clocksource_khz2mult(lguest_data.tsc_khz,
440 lguest_clock.shift); 775 lguest_clock.shift);
441 lguest_clock.mask = CLOCKSOURCE_MASK(64);
442 lguest_clock.flags = CLOCK_SOURCE_IS_CONTINUOUS; 776 lguest_clock.flags = CLOCK_SOURCE_IS_CONTINUOUS;
443 } else {
444 /* To understand this, start at kernel/time/jiffies.c... */
445 lguest_clock.shift = 8;
446 lguest_clock.mult = (((u64)NSEC_PER_SEC<<8)/ACTHZ) << 8;
447 lguest_clock.mask = CLOCKSOURCE_MASK(32);
448 } 777 }
449 clock_base = lguest_clock_read(); 778 clock_base = lguest_clock_read();
450 clocksource_register(&lguest_clock); 779 clocksource_register(&lguest_clock);
451 780
452 /* We can't set cpumask in the initializer: damn C limitations! */ 781 /* Now we've set up our clock, we can use it as the scheduler clock */
782 paravirt_ops.sched_clock = lguest_sched_clock;
783
784 /* We can't set cpumask in the initializer: damn C limitations! Set it
785 * here and register our timer device. */
453 lguest_clockevent.cpumask = cpumask_of_cpu(0); 786 lguest_clockevent.cpumask = cpumask_of_cpu(0);
454 clockevents_register_device(&lguest_clockevent); 787 clockevents_register_device(&lguest_clockevent);
455 788
789 /* Finally, we unblock the timer interrupt. */
456 enable_lguest_irq(0); 790 enable_lguest_irq(0);
457} 791}
458 792
793/*
794 * Miscellaneous bits and pieces.
795 *
796 * Here is an oddball collection of functions which the Guest needs for things
797 * to work. They're pretty simple.
798 */
799
800/* The Guest needs to tell the host what stack it expects traps to use. For
801 * native hardware, this is part of the Task State Segment mentioned above in
802 * lguest_load_tr_desc(), but to help hypervisors there's this special call.
803 *
804 * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data
805 * segment), the privilege level (we're privilege level 1, the Host is 0 and
806 * will not tolerate us trying to use that), the stack pointer, and the number
807 * of pages in the stack. */
459static void lguest_load_esp0(struct tss_struct *tss, 808static void lguest_load_esp0(struct tss_struct *tss,
460 struct thread_struct *thread) 809 struct thread_struct *thread)
461{ 810{
@@ -463,15 +812,31 @@ static void lguest_load_esp0(struct tss_struct *tss,
463 THREAD_SIZE/PAGE_SIZE); 812 THREAD_SIZE/PAGE_SIZE);
464} 813}
465 814
815/* Let's just say, I wouldn't do debugging under a Guest. */
466static void lguest_set_debugreg(int regno, unsigned long value) 816static void lguest_set_debugreg(int regno, unsigned long value)
467{ 817{
468 /* FIXME: Implement */ 818 /* FIXME: Implement */
469} 819}
470 820
821/* There are times when the kernel wants to make sure that no memory writes are
822 * caught in the cache (that they've all reached real hardware devices). This
823 * doesn't matter for the Guest which has virtual hardware.
824 *
825 * On the Pentium 4 and above, cpuid() indicates that the Cache Line Flush
826 * (clflush) instruction is available and the kernel uses that. Otherwise, it
827 * uses the older "Write Back and Invalidate Cache" (wbinvd) instruction.
828 * Unlike clflush, wbinvd can only be run at privilege level 0. So we can
829 * ignore clflush, but replace wbinvd.
830 */
471static void lguest_wbinvd(void) 831static void lguest_wbinvd(void)
472{ 832{
473} 833}
474 834
835/* If the Guest expects to have an Advanced Programmable Interrupt Controller,
836 * we play dumb by ignoring writes and returning 0 for reads. So it's no
837 * longer Programmable nor Controlling anything, and I don't think 8 lines of
838 * code qualifies for Advanced. It will also never interrupt anything. It
839 * does, however, allow us to get through the Linux boot code. */
475#ifdef CONFIG_X86_LOCAL_APIC 840#ifdef CONFIG_X86_LOCAL_APIC
476static void lguest_apic_write(unsigned long reg, unsigned long v) 841static void lguest_apic_write(unsigned long reg, unsigned long v)
477{ 842{
@@ -483,19 +848,32 @@ static unsigned long lguest_apic_read(unsigned long reg)
483} 848}
484#endif 849#endif
485 850
851/* STOP! Until an interrupt comes in. */
486static void lguest_safe_halt(void) 852static void lguest_safe_halt(void)
487{ 853{
488 hcall(LHCALL_HALT, 0, 0, 0); 854 hcall(LHCALL_HALT, 0, 0, 0);
489} 855}
490 856
857/* Perhaps CRASH isn't the best name for this hypercall, but we use it to get a
858 * message out when we're crashing as well as elegant termination like powering
859 * off.
860 *
861 * Note that the Host always prefers that the Guest speak in physical addresses
862 * rather than virtual addresses, so we use __pa() here. */
491static void lguest_power_off(void) 863static void lguest_power_off(void)
492{ 864{
493 hcall(LHCALL_CRASH, __pa("Power down"), 0, 0); 865 hcall(LHCALL_CRASH, __pa("Power down"), 0, 0);
494} 866}
495 867
868/*
869 * Panicing.
870 *
871 * Don't. But if you did, this is what happens.
872 */
496static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p) 873static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
497{ 874{
498 hcall(LHCALL_CRASH, __pa(p), 0, 0); 875 hcall(LHCALL_CRASH, __pa(p), 0, 0);
876 /* The hcall won't return, but to keep gcc happy, we're "done". */
499 return NOTIFY_DONE; 877 return NOTIFY_DONE;
500} 878}
501 879
@@ -503,15 +881,45 @@ static struct notifier_block paniced = {
503 .notifier_call = lguest_panic 881 .notifier_call = lguest_panic
504}; 882};
505 883
884/* Setting up memory is fairly easy. */
506static __init char *lguest_memory_setup(void) 885static __init char *lguest_memory_setup(void)
507{ 886{
508 /* We do this here because lockcheck barfs if before start_kernel */ 887 /* We do this here and not earlier because lockcheck barfs if we do it
888 * before start_kernel() */
509 atomic_notifier_chain_register(&panic_notifier_list, &paniced); 889 atomic_notifier_chain_register(&panic_notifier_list, &paniced);
510 890
891 /* The Linux bootloader header contains an "e820" memory map: the
892 * Launcher populated the first entry with our memory limit. */
511 add_memory_region(E820_MAP->addr, E820_MAP->size, E820_MAP->type); 893 add_memory_region(E820_MAP->addr, E820_MAP->size, E820_MAP->type);
894
895 /* This string is for the boot messages. */
512 return "LGUEST"; 896 return "LGUEST";
513} 897}
514 898
899/*G:050
900 * Patching (Powerfully Placating Performance Pedants)
901 *
902 * We have already seen that "struct paravirt_ops" lets us replace simple
903 * native instructions with calls to the appropriate back end all throughout
904 * the kernel. This allows the same kernel to run as a Guest and as a native
905 * kernel, but it's slow because of all the indirect branches.
906 *
907 * Remember that David Wheeler quote about "Any problem in computer science can
908 * be solved with another layer of indirection"? The rest of that quote is
909 * "... But that usually will create another problem." This is the first of
910 * those problems.
911 *
912 * Our current solution is to allow the paravirt back end to optionally patch
913 * over the indirect calls to replace them with something more efficient. We
914 * patch the four most commonly called functions: disable interrupts, enable
915 * interrupts, restore interrupts and save interrupts. We usually have 10
916 * bytes to patch into: the Guest versions of these operations are small enough
917 * that we can fit comfortably.
918 *
919 * First we need assembly templates of each of the patchable Guest operations,
920 * and these are in lguest_asm.S. */
921
922/*G:060 We construct a table from the assembler templates: */
515static const struct lguest_insns 923static const struct lguest_insns
516{ 924{
517 const char *start, *end; 925 const char *start, *end;
@@ -521,35 +929,52 @@ static const struct lguest_insns
521 [PARAVIRT_PATCH(restore_fl)] = { lgstart_popf, lgend_popf }, 929 [PARAVIRT_PATCH(restore_fl)] = { lgstart_popf, lgend_popf },
522 [PARAVIRT_PATCH(save_fl)] = { lgstart_pushf, lgend_pushf }, 930 [PARAVIRT_PATCH(save_fl)] = { lgstart_pushf, lgend_pushf },
523}; 931};
932
933/* Now our patch routine is fairly simple (based on the native one in
934 * paravirt.c). If we have a replacement, we copy it in and return how much of
935 * the available space we used. */
524static unsigned lguest_patch(u8 type, u16 clobber, void *insns, unsigned len) 936static unsigned lguest_patch(u8 type, u16 clobber, void *insns, unsigned len)
525{ 937{
526 unsigned int insn_len; 938 unsigned int insn_len;
527 939
528 /* Don't touch it if we don't have a replacement */ 940 /* Don't do anything special if we don't have a replacement */
529 if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start) 941 if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start)
530 return paravirt_patch_default(type, clobber, insns, len); 942 return paravirt_patch_default(type, clobber, insns, len);
531 943
532 insn_len = lguest_insns[type].end - lguest_insns[type].start; 944 insn_len = lguest_insns[type].end - lguest_insns[type].start;
533 945
534 /* Similarly if we can't fit replacement. */ 946 /* Similarly if we can't fit replacement (shouldn't happen, but let's
947 * be thorough). */
535 if (len < insn_len) 948 if (len < insn_len)
536 return paravirt_patch_default(type, clobber, insns, len); 949 return paravirt_patch_default(type, clobber, insns, len);
537 950
951 /* Copy in our instructions. */
538 memcpy(insns, lguest_insns[type].start, insn_len); 952 memcpy(insns, lguest_insns[type].start, insn_len);
539 return insn_len; 953 return insn_len;
540} 954}
541 955
956/*G:030 Once we get to lguest_init(), we know we're a Guest. The paravirt_ops
957 * structure in the kernel provides a single point for (almost) every routine
958 * we have to override to avoid privileged instructions. */
542__init void lguest_init(void *boot) 959__init void lguest_init(void *boot)
543{ 960{
544 /* Copy boot parameters first. */ 961 /* Copy boot parameters first: the Launcher put the physical location
962 * in %esi, and head.S converted that to a virtual address and handed
963 * it to us. */
545 memcpy(&boot_params, boot, PARAM_SIZE); 964 memcpy(&boot_params, boot, PARAM_SIZE);
965 /* The boot parameters also tell us where the command-line is: save
966 * that, too. */
546 memcpy(boot_command_line, __va(boot_params.hdr.cmd_line_ptr), 967 memcpy(boot_command_line, __va(boot_params.hdr.cmd_line_ptr),
547 COMMAND_LINE_SIZE); 968 COMMAND_LINE_SIZE);
548 969
970 /* We're under lguest, paravirt is enabled, and we're running at
971 * privilege level 1, not 0 as normal. */
549 paravirt_ops.name = "lguest"; 972 paravirt_ops.name = "lguest";
550 paravirt_ops.paravirt_enabled = 1; 973 paravirt_ops.paravirt_enabled = 1;
551 paravirt_ops.kernel_rpl = 1; 974 paravirt_ops.kernel_rpl = 1;
552 975
976 /* We set up all the lguest overrides for sensitive operations. These
977 * are detailed with the operations themselves. */
553 paravirt_ops.save_fl = save_fl; 978 paravirt_ops.save_fl = save_fl;
554 paravirt_ops.restore_fl = restore_fl; 979 paravirt_ops.restore_fl = restore_fl;
555 paravirt_ops.irq_disable = irq_disable; 980 paravirt_ops.irq_disable = irq_disable;
@@ -592,21 +1017,50 @@ __init void lguest_init(void *boot)
592 paravirt_ops.time_init = lguest_time_init; 1017 paravirt_ops.time_init = lguest_time_init;
593 paravirt_ops.set_lazy_mode = lguest_lazy_mode; 1018 paravirt_ops.set_lazy_mode = lguest_lazy_mode;
594 paravirt_ops.wbinvd = lguest_wbinvd; 1019 paravirt_ops.wbinvd = lguest_wbinvd;
595 paravirt_ops.sched_clock = lguest_sched_clock; 1020 /* Now is a good time to look at the implementations of these functions
596 1021 * before returning to the rest of lguest_init(). */
1022
1023 /*G:070 Now we've seen all the paravirt_ops, we return to
1024 * lguest_init() where the rest of the fairly chaotic boot setup
1025 * occurs.
1026 *
1027 * The Host expects our first hypercall to tell it where our "struct
1028 * lguest_data" is, so we do that first. */
597 hcall(LHCALL_LGUEST_INIT, __pa(&lguest_data), 0, 0); 1029 hcall(LHCALL_LGUEST_INIT, __pa(&lguest_data), 0, 0);
598 1030
599 /* We use top of mem for initial pagetables. */ 1031 /* The native boot code sets up initial page tables immediately after
1032 * the kernel itself, and sets init_pg_tables_end so they're not
1033 * clobbered. The Launcher places our initial pagetables somewhere at
1034 * the top of our physical memory, so we don't need extra space: set
1035 * init_pg_tables_end to the end of the kernel. */
600 init_pg_tables_end = __pa(pg0); 1036 init_pg_tables_end = __pa(pg0);
601 1037
1038 /* Load the %fs segment register (the per-cpu segment register) with
1039 * the normal data segment to get through booting. */
602 asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory"); 1040 asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory");
603 1041
1042 /* Clear the part of the kernel data which is expected to be zero.
1043 * Normally it will be anyway, but if we're loading from a bzImage with
1044 * CONFIG_RELOCATALE=y, the relocations will be sitting here. */
1045 memset(__bss_start, 0, __bss_stop - __bss_start);
1046
1047 /* The Host uses the top of the Guest's virtual address space for the
1048 * Host<->Guest Switcher, and it tells us how much it needs in
1049 * lguest_data.reserve_mem, set up on the LGUEST_INIT hypercall. */
604 reserve_top_address(lguest_data.reserve_mem); 1050 reserve_top_address(lguest_data.reserve_mem);
605 1051
1052 /* If we don't initialize the lock dependency checker now, it crashes
1053 * paravirt_disable_iospace. */
606 lockdep_init(); 1054 lockdep_init();
607 1055
1056 /* The IDE code spends about 3 seconds probing for disks: if we reserve
1057 * all the I/O ports up front it can't get them and so doesn't probe.
1058 * Other device drivers are similar (but less severe). This cuts the
1059 * kernel boot time on my machine from 4.1 seconds to 0.45 seconds. */
608 paravirt_disable_iospace(); 1060 paravirt_disable_iospace();
609 1061
1062 /* This is messy CPU setup stuff which the native boot code does before
1063 * start_kernel, so we have to do, too: */
610 cpu_detect(&new_cpu_data); 1064 cpu_detect(&new_cpu_data);
611 /* head.S usually sets up the first capability word, so do it here. */ 1065 /* head.S usually sets up the first capability word, so do it here. */
612 new_cpu_data.x86_capability[0] = cpuid_edx(1); 1066 new_cpu_data.x86_capability[0] = cpuid_edx(1);
@@ -617,14 +1071,27 @@ __init void lguest_init(void *boot)
617#ifdef CONFIG_X86_MCE 1071#ifdef CONFIG_X86_MCE
618 mce_disabled = 1; 1072 mce_disabled = 1;
619#endif 1073#endif
620
621#ifdef CONFIG_ACPI 1074#ifdef CONFIG_ACPI
622 acpi_disabled = 1; 1075 acpi_disabled = 1;
623 acpi_ht = 0; 1076 acpi_ht = 0;
624#endif 1077#endif
625 1078
1079 /* We set the perferred console to "hvc". This is the "hypervisor
1080 * virtual console" driver written by the PowerPC people, which we also
1081 * adapted for lguest's use. */
626 add_preferred_console("hvc", 0, NULL); 1082 add_preferred_console("hvc", 0, NULL);
627 1083
1084 /* Last of all, we set the power management poweroff hook to point to
1085 * the Guest routine to power off. */
628 pm_power_off = lguest_power_off; 1086 pm_power_off = lguest_power_off;
1087
1088 /* Now we're set up, call start_kernel() in init/main.c and we proceed
1089 * to boot as normal. It never returns. */
629 start_kernel(); 1090 start_kernel();
630} 1091}
1092/*
1093 * This marks the end of stage II of our journey, The Guest.
1094 *
1095 * It is now time for us to explore the nooks and crannies of the three Guest
1096 * devices and complete our understanding of the Guest in "make Drivers".
1097 */