diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-07-30 19:45:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-07-30 19:45:03 -0400 |
commit | 6ae7d6f0195a0ec7e5d07821e62c79898cd33fdc (patch) | |
tree | ed7975b5ae042e16500c1f5cb8b5756a6bf8d643 | |
parent | ec30c5f3a18722f8fcf8c83146a10b03ac4d9ff1 (diff) | |
parent | 1842f23c05b6a866be831aa60bc8a8731c58ddd0 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
lguest and virtio: cleanup struct definitions to Linux style.
lguest: update commentry
lguest: fix comment style
virtio: refactor find_vqs
virtio: delete vq from list
virtio: fix memory leak on device removal
lguest: fix descriptor corruption in example launcher
lguest: dereferencing freed mem in add_eventfd()
-rw-r--r-- | Documentation/lguest/lguest.c | 721 | ||||
-rw-r--r-- | arch/x86/include/asm/lguest.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/lguest_hcall.h | 18 | ||||
-rw-r--r-- | arch/x86/lguest/boot.c | 509 | ||||
-rw-r--r-- | arch/x86/lguest/i386_head.S | 112 | ||||
-rw-r--r-- | drivers/lguest/core.c | 119 | ||||
-rw-r--r-- | drivers/lguest/hypercalls.c | 145 | ||||
-rw-r--r-- | drivers/lguest/interrupts_and_traps.c | 288 | ||||
-rw-r--r-- | drivers/lguest/lg.h | 32 | ||||
-rw-r--r-- | drivers/lguest/lguest_device.c | 160 | ||||
-rw-r--r-- | drivers/lguest/lguest_user.c | 232 | ||||
-rw-r--r-- | drivers/lguest/page_tables.c | 489 | ||||
-rw-r--r-- | drivers/lguest/segments.c | 106 | ||||
-rw-r--r-- | drivers/lguest/x86/core.c | 374 | ||||
-rw-r--r-- | drivers/lguest/x86/switcher_32.S | 22 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci.c | 240 | ||||
-rw-r--r-- | include/linux/lguest.h | 39 | ||||
-rw-r--r-- | include/linux/lguest_launcher.h | 18 | ||||
-rw-r--r-- | include/linux/virtio_blk.h | 6 | ||||
-rw-r--r-- | include/linux/virtio_config.h | 3 | ||||
-rw-r--r-- | include/linux/virtio_net.h | 6 | ||||
-rw-r--r-- | include/linux/virtio_ring.h | 12 |
22 files changed, 2422 insertions, 1232 deletions
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c index 9ebcd6ef361b..950cde6d6e58 100644 --- a/Documentation/lguest/lguest.c +++ b/Documentation/lguest/lguest.c | |||
@@ -1,7 +1,9 @@ | |||
1 | /*P:100 This is the Launcher code, a simple program which lays out the | 1 | /*P:100 |
2 | * "physical" memory for the new Guest by mapping the kernel image and | 2 | * This is the Launcher code, a simple program which lays out the "physical" |
3 | * the virtual devices, then opens /dev/lguest to tell the kernel | 3 | * memory for the new Guest by mapping the kernel image and the virtual |
4 | * about the Guest and control it. :*/ | 4 | * devices, then opens /dev/lguest to tell the kernel about the Guest and |
5 | * control it. | ||
6 | :*/ | ||
5 | #define _LARGEFILE64_SOURCE | 7 | #define _LARGEFILE64_SOURCE |
6 | #define _GNU_SOURCE | 8 | #define _GNU_SOURCE |
7 | #include <stdio.h> | 9 | #include <stdio.h> |
@@ -46,13 +48,15 @@ | |||
46 | #include "linux/virtio_rng.h" | 48 | #include "linux/virtio_rng.h" |
47 | #include "linux/virtio_ring.h" | 49 | #include "linux/virtio_ring.h" |
48 | #include "asm/bootparam.h" | 50 | #include "asm/bootparam.h" |
49 | /*L:110 We can ignore the 39 include files we need for this program, but I do | 51 | /*L:110 |
50 | * want to draw attention to the use of kernel-style types. | 52 | * We can ignore the 42 include files we need for this program, but I do want |
53 | * to draw attention to the use of kernel-style types. | ||
51 | * | 54 | * |
52 | * As Linus said, "C is a Spartan language, and so should your naming be." I | 55 | * As Linus said, "C is a Spartan language, and so should your naming be." I |
53 | * like these abbreviations, so we define them here. Note that u64 is always | 56 | * like these abbreviations, so we define them here. Note that u64 is always |
54 | * unsigned long long, which works on all Linux systems: this means that we can | 57 | * unsigned long long, which works on all Linux systems: this means that we can |
55 | * use %llu in printf for any u64. */ | 58 | * use %llu in printf for any u64. |
59 | */ | ||
56 | typedef unsigned long long u64; | 60 | typedef unsigned long long u64; |
57 | typedef uint32_t u32; | 61 | typedef uint32_t u32; |
58 | typedef uint16_t u16; | 62 | typedef uint16_t u16; |
@@ -69,8 +73,10 @@ typedef uint8_t u8; | |||
69 | /* This will occupy 3 pages: it must be a power of 2. */ | 73 | /* This will occupy 3 pages: it must be a power of 2. */ |
70 | #define VIRTQUEUE_NUM 256 | 74 | #define VIRTQUEUE_NUM 256 |
71 | 75 | ||
72 | /*L:120 verbose is both a global flag and a macro. The C preprocessor allows | 76 | /*L:120 |
73 | * this, and although I wouldn't recommend it, it works quite nicely here. */ | 77 | * verbose is both a global flag and a macro. The C preprocessor allows |
78 | * this, and although I wouldn't recommend it, it works quite nicely here. | ||
79 | */ | ||
74 | static bool verbose; | 80 | static bool verbose; |
75 | #define verbose(args...) \ | 81 | #define verbose(args...) \ |
76 | do { if (verbose) printf(args); } while(0) | 82 | do { if (verbose) printf(args); } while(0) |
@@ -87,8 +93,7 @@ static int lguest_fd; | |||
87 | static unsigned int __thread cpu_id; | 93 | static unsigned int __thread cpu_id; |
88 | 94 | ||
89 | /* This is our list of devices. */ | 95 | /* This is our list of devices. */ |
90 | struct device_list | 96 | struct device_list { |
91 | { | ||
92 | /* Counter to assign interrupt numbers. */ | 97 | /* Counter to assign interrupt numbers. */ |
93 | unsigned int next_irq; | 98 | unsigned int next_irq; |
94 | 99 | ||
@@ -100,8 +105,7 @@ struct device_list | |||
100 | 105 | ||
101 | /* A single linked list of devices. */ | 106 | /* A single linked list of devices. */ |
102 | struct device *dev; | 107 | struct device *dev; |
103 | /* And a pointer to the last device for easy append and also for | 108 | /* And a pointer to the last device for easy append. */ |
104 | * configuration appending. */ | ||
105 | struct device *lastdev; | 109 | struct device *lastdev; |
106 | }; | 110 | }; |
107 | 111 | ||
@@ -109,8 +113,7 @@ struct device_list | |||
109 | static struct device_list devices; | 113 | static struct device_list devices; |
110 | 114 | ||
111 | /* The device structure describes a single device. */ | 115 | /* The device structure describes a single device. */ |
112 | struct device | 116 | struct device { |
113 | { | ||
114 | /* The linked-list pointer. */ | 117 | /* The linked-list pointer. */ |
115 | struct device *next; | 118 | struct device *next; |
116 | 119 | ||
@@ -135,8 +138,7 @@ struct device | |||
135 | }; | 138 | }; |
136 | 139 | ||
137 | /* The virtqueue structure describes a queue attached to a device. */ | 140 | /* The virtqueue structure describes a queue attached to a device. */ |
138 | struct virtqueue | 141 | struct virtqueue { |
139 | { | ||
140 | struct virtqueue *next; | 142 | struct virtqueue *next; |
141 | 143 | ||
142 | /* Which device owns me. */ | 144 | /* Which device owns me. */ |
@@ -168,20 +170,24 @@ static char **main_args; | |||
168 | /* The original tty settings to restore on exit. */ | 170 | /* The original tty settings to restore on exit. */ |
169 | static struct termios orig_term; | 171 | static struct termios orig_term; |
170 | 172 | ||
171 | /* We have to be careful with barriers: our devices are all run in separate | 173 | /* |
174 | * We have to be careful with barriers: our devices are all run in separate | ||
172 | * threads and so we need to make sure that changes visible to the Guest happen | 175 | * threads and so we need to make sure that changes visible to the Guest happen |
173 | * in precise order. */ | 176 | * in precise order. |
177 | */ | ||
174 | #define wmb() __asm__ __volatile__("" : : : "memory") | 178 | #define wmb() __asm__ __volatile__("" : : : "memory") |
175 | #define mb() __asm__ __volatile__("" : : : "memory") | 179 | #define mb() __asm__ __volatile__("" : : : "memory") |
176 | 180 | ||
177 | /* Convert an iovec element to the given type. | 181 | /* |
182 | * Convert an iovec element to the given type. | ||
178 | * | 183 | * |
179 | * This is a fairly ugly trick: we need to know the size of the type and | 184 | * This is a fairly ugly trick: we need to know the size of the type and |
180 | * alignment requirement to check the pointer is kosher. It's also nice to | 185 | * alignment requirement to check the pointer is kosher. It's also nice to |
181 | * have the name of the type in case we report failure. | 186 | * have the name of the type in case we report failure. |
182 | * | 187 | * |
183 | * Typing those three things all the time is cumbersome and error prone, so we | 188 | * Typing those three things all the time is cumbersome and error prone, so we |
184 | * have a macro which sets them all up and passes to the real function. */ | 189 | * have a macro which sets them all up and passes to the real function. |
190 | */ | ||
185 | #define convert(iov, type) \ | 191 | #define convert(iov, type) \ |
186 | ((type *)_convert((iov), sizeof(type), __alignof__(type), #type)) | 192 | ((type *)_convert((iov), sizeof(type), __alignof__(type), #type)) |
187 | 193 | ||
@@ -198,8 +204,10 @@ static void *_convert(struct iovec *iov, size_t size, size_t align, | |||
198 | /* Wrapper for the last available index. Makes it easier to change. */ | 204 | /* Wrapper for the last available index. Makes it easier to change. */ |
199 | #define lg_last_avail(vq) ((vq)->last_avail_idx) | 205 | #define lg_last_avail(vq) ((vq)->last_avail_idx) |
200 | 206 | ||
201 | /* The virtio configuration space is defined to be little-endian. x86 is | 207 | /* |
202 | * little-endian too, but it's nice to be explicit so we have these helpers. */ | 208 | * The virtio configuration space is defined to be little-endian. x86 is |
209 | * little-endian too, but it's nice to be explicit so we have these helpers. | ||
210 | */ | ||
203 | #define cpu_to_le16(v16) (v16) | 211 | #define cpu_to_le16(v16) (v16) |
204 | #define cpu_to_le32(v32) (v32) | 212 | #define cpu_to_le32(v32) (v32) |
205 | #define cpu_to_le64(v64) (v64) | 213 | #define cpu_to_le64(v64) (v64) |
@@ -241,11 +249,12 @@ static u8 *get_feature_bits(struct device *dev) | |||
241 | + dev->num_vq * sizeof(struct lguest_vqconfig); | 249 | + dev->num_vq * sizeof(struct lguest_vqconfig); |
242 | } | 250 | } |
243 | 251 | ||
244 | /*L:100 The Launcher code itself takes us out into userspace, that scary place | 252 | /*L:100 |
245 | * where pointers run wild and free! Unfortunately, like most userspace | 253 | * The Launcher code itself takes us out into userspace, that scary place where |
246 | * programs, it's quite boring (which is why everyone likes to hack on the | 254 | * pointers run wild and free! Unfortunately, like most userspace programs, |
247 | * kernel!). Perhaps if you make up an Lguest Drinking Game at this point, it | 255 | * it's quite boring (which is why everyone likes to hack on the kernel!). |
248 | * will get you through this section. Or, maybe not. | 256 | * Perhaps if you make up an Lguest Drinking Game at this point, it will get |
257 | * you through this section. Or, maybe not. | ||
249 | * | 258 | * |
250 | * The Launcher sets up a big chunk of memory to be the Guest's "physical" | 259 | * The Launcher sets up a big chunk of memory to be the Guest's "physical" |
251 | * memory and stores it in "guest_base". In other words, Guest physical == | 260 | * memory and stores it in "guest_base". In other words, Guest physical == |
@@ -253,7 +262,8 @@ static u8 *get_feature_bits(struct device *dev) | |||
253 | * | 262 | * |
254 | * This can be tough to get your head around, but usually it just means that we | 263 | * This can be tough to get your head around, but usually it just means that we |
255 | * use these trivial conversion functions when the Guest gives us it's | 264 | * use these trivial conversion functions when the Guest gives us it's |
256 | * "physical" addresses: */ | 265 | * "physical" addresses: |
266 | */ | ||
257 | static void *from_guest_phys(unsigned long addr) | 267 | static void *from_guest_phys(unsigned long addr) |
258 | { | 268 | { |
259 | return guest_base + addr; | 269 | return guest_base + addr; |
@@ -268,7 +278,8 @@ static unsigned long to_guest_phys(const void *addr) | |||
268 | * Loading the Kernel. | 278 | * Loading the Kernel. |
269 | * | 279 | * |
270 | * We start with couple of simple helper routines. open_or_die() avoids | 280 | * We start with couple of simple helper routines. open_or_die() avoids |
271 | * error-checking code cluttering the callers: */ | 281 | * error-checking code cluttering the callers: |
282 | */ | ||
272 | static int open_or_die(const char *name, int flags) | 283 | static int open_or_die(const char *name, int flags) |
273 | { | 284 | { |
274 | int fd = open(name, flags); | 285 | int fd = open(name, flags); |
@@ -283,12 +294,19 @@ static void *map_zeroed_pages(unsigned int num) | |||
283 | int fd = open_or_die("/dev/zero", O_RDONLY); | 294 | int fd = open_or_die("/dev/zero", O_RDONLY); |
284 | void *addr; | 295 | void *addr; |
285 | 296 | ||
286 | /* We use a private mapping (ie. if we write to the page, it will be | 297 | /* |
287 | * copied). */ | 298 | * We use a private mapping (ie. if we write to the page, it will be |
299 | * copied). | ||
300 | */ | ||
288 | addr = mmap(NULL, getpagesize() * num, | 301 | addr = mmap(NULL, getpagesize() * num, |
289 | PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, fd, 0); | 302 | PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, fd, 0); |
290 | if (addr == MAP_FAILED) | 303 | if (addr == MAP_FAILED) |
291 | err(1, "Mmaping %u pages of /dev/zero", num); | 304 | err(1, "Mmaping %u pages of /dev/zero", num); |
305 | |||
306 | /* | ||
307 | * One neat mmap feature is that you can close the fd, and it | ||
308 | * stays mapped. | ||
309 | */ | ||
292 | close(fd); | 310 | close(fd); |
293 | 311 | ||
294 | return addr; | 312 | return addr; |
@@ -305,20 +323,24 @@ static void *get_pages(unsigned int num) | |||
305 | return addr; | 323 | return addr; |
306 | } | 324 | } |
307 | 325 | ||
308 | /* This routine is used to load the kernel or initrd. It tries mmap, but if | 326 | /* |
327 | * This routine is used to load the kernel or initrd. It tries mmap, but if | ||
309 | * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries), | 328 | * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries), |
310 | * it falls back to reading the memory in. */ | 329 | * it falls back to reading the memory in. |
330 | */ | ||
311 | static void map_at(int fd, void *addr, unsigned long offset, unsigned long len) | 331 | static void map_at(int fd, void *addr, unsigned long offset, unsigned long len) |
312 | { | 332 | { |
313 | ssize_t r; | 333 | ssize_t r; |
314 | 334 | ||
315 | /* We map writable even though for some segments are marked read-only. | 335 | /* |
336 | * We map writable even though for some segments are marked read-only. | ||
316 | * The kernel really wants to be writable: it patches its own | 337 | * The kernel really wants to be writable: it patches its own |
317 | * instructions. | 338 | * instructions. |
318 | * | 339 | * |
319 | * MAP_PRIVATE means that the page won't be copied until a write is | 340 | * MAP_PRIVATE means that the page won't be copied until a write is |
320 | * done to it. This allows us to share untouched memory between | 341 | * done to it. This allows us to share untouched memory between |
321 | * Guests. */ | 342 | * Guests. |
343 | */ | ||
322 | if (mmap(addr, len, PROT_READ|PROT_WRITE|PROT_EXEC, | 344 | if (mmap(addr, len, PROT_READ|PROT_WRITE|PROT_EXEC, |
323 | MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED) | 345 | MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED) |
324 | return; | 346 | return; |
@@ -329,7 +351,8 @@ static void map_at(int fd, void *addr, unsigned long offset, unsigned long len) | |||
329 | err(1, "Reading offset %lu len %lu gave %zi", offset, len, r); | 351 | err(1, "Reading offset %lu len %lu gave %zi", offset, len, r); |
330 | } | 352 | } |
331 | 353 | ||
332 | /* This routine takes an open vmlinux image, which is in ELF, and maps it into | 354 | /* |
355 | * This routine takes an open vmlinux image, which is in ELF, and maps it into | ||
333 | * the Guest memory. ELF = Embedded Linking Format, which is the format used | 356 | * the Guest memory. ELF = Embedded Linking Format, which is the format used |
334 | * by all modern binaries on Linux including the kernel. | 357 | * by all modern binaries on Linux including the kernel. |
335 | * | 358 | * |
@@ -337,23 +360,28 @@ static void map_at(int fd, void *addr, unsigned long offset, unsigned long len) | |||
337 | * address. We use the physical address; the Guest will map itself to the | 360 | * address. We use the physical address; the Guest will map itself to the |
338 | * virtual address. | 361 | * virtual address. |
339 | * | 362 | * |
340 | * We return the starting address. */ | 363 | * We return the starting address. |
364 | */ | ||
341 | static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) | 365 | static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) |
342 | { | 366 | { |
343 | Elf32_Phdr phdr[ehdr->e_phnum]; | 367 | Elf32_Phdr phdr[ehdr->e_phnum]; |
344 | unsigned int i; | 368 | unsigned int i; |
345 | 369 | ||
346 | /* Sanity checks on the main ELF header: an x86 executable with a | 370 | /* |
347 | * reasonable number of correctly-sized program headers. */ | 371 | * Sanity checks on the main ELF header: an x86 executable with a |
372 | * reasonable number of correctly-sized program headers. | ||
373 | */ | ||
348 | if (ehdr->e_type != ET_EXEC | 374 | if (ehdr->e_type != ET_EXEC |
349 | || ehdr->e_machine != EM_386 | 375 | || ehdr->e_machine != EM_386 |
350 | || ehdr->e_phentsize != sizeof(Elf32_Phdr) | 376 | || ehdr->e_phentsize != sizeof(Elf32_Phdr) |
351 | || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr)) | 377 | || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr)) |
352 | errx(1, "Malformed elf header"); | 378 | errx(1, "Malformed elf header"); |
353 | 379 | ||
354 | /* An ELF executable contains an ELF header and a number of "program" | 380 | /* |
381 | * An ELF executable contains an ELF header and a number of "program" | ||
355 | * headers which indicate which parts ("segments") of the program to | 382 | * headers which indicate which parts ("segments") of the program to |
356 | * load where. */ | 383 | * load where. |
384 | */ | ||
357 | 385 | ||
358 | /* We read in all the program headers at once: */ | 386 | /* We read in all the program headers at once: */ |
359 | if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0) | 387 | if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0) |
@@ -361,8 +389,10 @@ static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) | |||
361 | if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr)) | 389 | if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr)) |
362 | err(1, "Reading program headers"); | 390 | err(1, "Reading program headers"); |
363 | 391 | ||
364 | /* Try all the headers: there are usually only three. A read-only one, | 392 | /* |
365 | * a read-write one, and a "note" section which we don't load. */ | 393 | * Try all the headers: there are usually only three. A read-only one, |
394 | * a read-write one, and a "note" section which we don't load. | ||
395 | */ | ||
366 | for (i = 0; i < ehdr->e_phnum; i++) { | 396 | for (i = 0; i < ehdr->e_phnum; i++) { |
367 | /* If this isn't a loadable segment, we ignore it */ | 397 | /* If this isn't a loadable segment, we ignore it */ |
368 | if (phdr[i].p_type != PT_LOAD) | 398 | if (phdr[i].p_type != PT_LOAD) |
@@ -380,13 +410,15 @@ static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) | |||
380 | return ehdr->e_entry; | 410 | return ehdr->e_entry; |
381 | } | 411 | } |
382 | 412 | ||
383 | /*L:150 A bzImage, unlike an ELF file, is not meant to be loaded. You're | 413 | /*L:150 |
384 | * supposed to jump into it and it will unpack itself. We used to have to | 414 | * A bzImage, unlike an ELF file, is not meant to be loaded. You're supposed |
385 | * perform some hairy magic because the unpacking code scared me. | 415 | * to jump into it and it will unpack itself. We used to have to perform some |
416 | * hairy magic because the unpacking code scared me. | ||
386 | * | 417 | * |
387 | * Fortunately, Jeremy Fitzhardinge convinced me it wasn't that hard and wrote | 418 | * Fortunately, Jeremy Fitzhardinge convinced me it wasn't that hard and wrote |
388 | * a small patch to jump over the tricky bits in the Guest, so now we just read | 419 | * a small patch to jump over the tricky bits in the Guest, so now we just read |
389 | * the funky header so we know where in the file to load, and away we go! */ | 420 | * the funky header so we know where in the file to load, and away we go! |
421 | */ | ||
390 | static unsigned long load_bzimage(int fd) | 422 | static unsigned long load_bzimage(int fd) |
391 | { | 423 | { |
392 | struct boot_params boot; | 424 | struct boot_params boot; |
@@ -394,8 +426,10 @@ static unsigned long load_bzimage(int fd) | |||
394 | /* Modern bzImages get loaded at 1M. */ | 426 | /* Modern bzImages get loaded at 1M. */ |
395 | void *p = from_guest_phys(0x100000); | 427 | void *p = from_guest_phys(0x100000); |
396 | 428 | ||
397 | /* Go back to the start of the file and read the header. It should be | 429 | /* |
398 | * a Linux boot header (see Documentation/x86/i386/boot.txt) */ | 430 | * Go back to the start of the file and read the header. It should be |
431 | * a Linux boot header (see Documentation/x86/i386/boot.txt) | ||
432 | */ | ||
399 | lseek(fd, 0, SEEK_SET); | 433 | lseek(fd, 0, SEEK_SET); |
400 | read(fd, &boot, sizeof(boot)); | 434 | read(fd, &boot, sizeof(boot)); |
401 | 435 | ||
@@ -414,9 +448,11 @@ static unsigned long load_bzimage(int fd) | |||
414 | return boot.hdr.code32_start; | 448 | return boot.hdr.code32_start; |
415 | } | 449 | } |
416 | 450 | ||
417 | /*L:140 Loading the kernel is easy when it's a "vmlinux", but most kernels | 451 | /*L:140 |
452 | * Loading the kernel is easy when it's a "vmlinux", but most kernels | ||
418 | * come wrapped up in the self-decompressing "bzImage" format. With a little | 453 | * come wrapped up in the self-decompressing "bzImage" format. With a little |
419 | * work, we can load those, too. */ | 454 | * work, we can load those, too. |
455 | */ | ||
420 | static unsigned long load_kernel(int fd) | 456 | static unsigned long load_kernel(int fd) |
421 | { | 457 | { |
422 | Elf32_Ehdr hdr; | 458 | Elf32_Ehdr hdr; |
@@ -433,24 +469,28 @@ static unsigned long load_kernel(int fd) | |||
433 | return load_bzimage(fd); | 469 | return load_bzimage(fd); |
434 | } | 470 | } |
435 | 471 | ||
436 | /* This is a trivial little helper to align pages. Andi Kleen hated it because | 472 | /* |
473 | * This is a trivial little helper to align pages. Andi Kleen hated it because | ||
437 | * it calls getpagesize() twice: "it's dumb code." | 474 | * it calls getpagesize() twice: "it's dumb code." |
438 | * | 475 | * |
439 | * Kernel guys get really het up about optimization, even when it's not | 476 | * Kernel guys get really het up about optimization, even when it's not |
440 | * necessary. I leave this code as a reaction against that. */ | 477 | * necessary. I leave this code as a reaction against that. |
478 | */ | ||
441 | static inline unsigned long page_align(unsigned long addr) | 479 | static inline unsigned long page_align(unsigned long addr) |
442 | { | 480 | { |
443 | /* Add upwards and truncate downwards. */ | 481 | /* Add upwards and truncate downwards. */ |
444 | return ((addr + getpagesize()-1) & ~(getpagesize()-1)); | 482 | return ((addr + getpagesize()-1) & ~(getpagesize()-1)); |
445 | } | 483 | } |
446 | 484 | ||
447 | /*L:180 An "initial ram disk" is a disk image loaded into memory along with | 485 | /*L:180 |
448 | * the kernel which the kernel can use to boot from without needing any | 486 | * An "initial ram disk" is a disk image loaded into memory along with the |
449 | * drivers. Most distributions now use this as standard: the initrd contains | 487 | * kernel which the kernel can use to boot from without needing any drivers. |
450 | * the code to load the appropriate driver modules for the current machine. | 488 | * Most distributions now use this as standard: the initrd contains the code to |
489 | * load the appropriate driver modules for the current machine. | ||
451 | * | 490 | * |
452 | * Importantly, James Morris works for RedHat, and Fedora uses initrds for its | 491 | * Importantly, James Morris works for RedHat, and Fedora uses initrds for its |
453 | * kernels. He sent me this (and tells me when I break it). */ | 492 | * kernels. He sent me this (and tells me when I break it). |
493 | */ | ||
454 | static unsigned long load_initrd(const char *name, unsigned long mem) | 494 | static unsigned long load_initrd(const char *name, unsigned long mem) |
455 | { | 495 | { |
456 | int ifd; | 496 | int ifd; |
@@ -462,12 +502,16 @@ static unsigned long load_initrd(const char *name, unsigned long mem) | |||
462 | if (fstat(ifd, &st) < 0) | 502 | if (fstat(ifd, &st) < 0) |
463 | err(1, "fstat() on initrd '%s'", name); | 503 | err(1, "fstat() on initrd '%s'", name); |
464 | 504 | ||
465 | /* We map the initrd at the top of memory, but mmap wants it to be | 505 | /* |
466 | * page-aligned, so we round the size up for that. */ | 506 | * We map the initrd at the top of memory, but mmap wants it to be |
507 | * page-aligned, so we round the size up for that. | ||
508 | */ | ||
467 | len = page_align(st.st_size); | 509 | len = page_align(st.st_size); |
468 | map_at(ifd, from_guest_phys(mem - len), 0, st.st_size); | 510 | map_at(ifd, from_guest_phys(mem - len), 0, st.st_size); |
469 | /* Once a file is mapped, you can close the file descriptor. It's a | 511 | /* |
470 | * little odd, but quite useful. */ | 512 | * Once a file is mapped, you can close the file descriptor. It's a |
513 | * little odd, but quite useful. | ||
514 | */ | ||
471 | close(ifd); | 515 | close(ifd); |
472 | verbose("mapped initrd %s size=%lu @ %p\n", name, len, (void*)mem-len); | 516 | verbose("mapped initrd %s size=%lu @ %p\n", name, len, (void*)mem-len); |
473 | 517 | ||
@@ -476,8 +520,10 @@ static unsigned long load_initrd(const char *name, unsigned long mem) | |||
476 | } | 520 | } |
477 | /*:*/ | 521 | /*:*/ |
478 | 522 | ||
479 | /* Simple routine to roll all the commandline arguments together with spaces | 523 | /* |
480 | * between them. */ | 524 | * Simple routine to roll all the commandline arguments together with spaces |
525 | * between them. | ||
526 | */ | ||
481 | static void concat(char *dst, char *args[]) | 527 | static void concat(char *dst, char *args[]) |
482 | { | 528 | { |
483 | unsigned int i, len = 0; | 529 | unsigned int i, len = 0; |
@@ -494,10 +540,12 @@ static void concat(char *dst, char *args[]) | |||
494 | dst[len] = '\0'; | 540 | dst[len] = '\0'; |
495 | } | 541 | } |
496 | 542 | ||
497 | /*L:185 This is where we actually tell the kernel to initialize the Guest. We | 543 | /*L:185 |
544 | * This is where we actually tell the kernel to initialize the Guest. We | ||
498 | * saw the arguments it expects when we looked at initialize() in lguest_user.c: | 545 | * saw the arguments it expects when we looked at initialize() in lguest_user.c: |
499 | * the base of Guest "physical" memory, the top physical page to allow and the | 546 | * the base of Guest "physical" memory, the top physical page to allow and the |
500 | * entry point for the Guest. */ | 547 | * entry point for the Guest. |
548 | */ | ||
501 | static void tell_kernel(unsigned long start) | 549 | static void tell_kernel(unsigned long start) |
502 | { | 550 | { |
503 | unsigned long args[] = { LHREQ_INITIALIZE, | 551 | unsigned long args[] = { LHREQ_INITIALIZE, |
@@ -511,7 +559,7 @@ static void tell_kernel(unsigned long start) | |||
511 | } | 559 | } |
512 | /*:*/ | 560 | /*:*/ |
513 | 561 | ||
514 | /* | 562 | /*L:200 |
515 | * Device Handling. | 563 | * Device Handling. |
516 | * | 564 | * |
517 | * When the Guest gives us a buffer, it sends an array of addresses and sizes. | 565 | * When the Guest gives us a buffer, it sends an array of addresses and sizes. |
@@ -522,20 +570,26 @@ static void tell_kernel(unsigned long start) | |||
522 | static void *_check_pointer(unsigned long addr, unsigned int size, | 570 | static void *_check_pointer(unsigned long addr, unsigned int size, |
523 | unsigned int line) | 571 | unsigned int line) |
524 | { | 572 | { |
525 | /* We have to separately check addr and addr+size, because size could | 573 | /* |
526 | * be huge and addr + size might wrap around. */ | 574 | * We have to separately check addr and addr+size, because size could |
575 | * be huge and addr + size might wrap around. | ||
576 | */ | ||
527 | if (addr >= guest_limit || addr + size >= guest_limit) | 577 | if (addr >= guest_limit || addr + size >= guest_limit) |
528 | errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr); | 578 | errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr); |
529 | /* We return a pointer for the caller's convenience, now we know it's | 579 | /* |
530 | * safe to use. */ | 580 | * We return a pointer for the caller's convenience, now we know it's |
581 | * safe to use. | ||
582 | */ | ||
531 | return from_guest_phys(addr); | 583 | return from_guest_phys(addr); |
532 | } | 584 | } |
533 | /* A macro which transparently hands the line number to the real function. */ | 585 | /* A macro which transparently hands the line number to the real function. */ |
534 | #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) | 586 | #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) |
535 | 587 | ||
536 | /* Each buffer in the virtqueues is actually a chain of descriptors. This | 588 | /* |
589 | * Each buffer in the virtqueues is actually a chain of descriptors. This | ||
537 | * function returns the next descriptor in the chain, or vq->vring.num if we're | 590 | * function returns the next descriptor in the chain, or vq->vring.num if we're |
538 | * at the end. */ | 591 | * at the end. |
592 | */ | ||
539 | static unsigned next_desc(struct vring_desc *desc, | 593 | static unsigned next_desc(struct vring_desc *desc, |
540 | unsigned int i, unsigned int max) | 594 | unsigned int i, unsigned int max) |
541 | { | 595 | { |
@@ -556,7 +610,10 @@ static unsigned next_desc(struct vring_desc *desc, | |||
556 | return next; | 610 | return next; |
557 | } | 611 | } |
558 | 612 | ||
559 | /* This actually sends the interrupt for this virtqueue */ | 613 | /* |
614 | * This actually sends the interrupt for this virtqueue, if we've used a | ||
615 | * buffer. | ||
616 | */ | ||
560 | static void trigger_irq(struct virtqueue *vq) | 617 | static void trigger_irq(struct virtqueue *vq) |
561 | { | 618 | { |
562 | unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; | 619 | unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; |
@@ -576,12 +633,14 @@ static void trigger_irq(struct virtqueue *vq) | |||
576 | err(1, "Triggering irq %i", vq->config.irq); | 633 | err(1, "Triggering irq %i", vq->config.irq); |
577 | } | 634 | } |
578 | 635 | ||
579 | /* This looks in the virtqueue and for the first available buffer, and converts | 636 | /* |
637 | * This looks in the virtqueue for the first available buffer, and converts | ||
580 | * it to an iovec for convenient access. Since descriptors consist of some | 638 | * it to an iovec for convenient access. Since descriptors consist of some |
581 | * number of output then some number of input descriptors, it's actually two | 639 | * number of output then some number of input descriptors, it's actually two |
582 | * iovecs, but we pack them into one and note how many of each there were. | 640 | * iovecs, but we pack them into one and note how many of each there were. |
583 | * | 641 | * |
584 | * This function returns the descriptor number found. */ | 642 | * This function waits if necessary, and returns the descriptor number found. |
643 | */ | ||
585 | static unsigned wait_for_vq_desc(struct virtqueue *vq, | 644 | static unsigned wait_for_vq_desc(struct virtqueue *vq, |
586 | struct iovec iov[], | 645 | struct iovec iov[], |
587 | unsigned int *out_num, unsigned int *in_num) | 646 | unsigned int *out_num, unsigned int *in_num) |
@@ -590,17 +649,23 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
590 | struct vring_desc *desc; | 649 | struct vring_desc *desc; |
591 | u16 last_avail = lg_last_avail(vq); | 650 | u16 last_avail = lg_last_avail(vq); |
592 | 651 | ||
652 | /* There's nothing available? */ | ||
593 | while (last_avail == vq->vring.avail->idx) { | 653 | while (last_avail == vq->vring.avail->idx) { |
594 | u64 event; | 654 | u64 event; |
595 | 655 | ||
596 | /* OK, tell Guest about progress up to now. */ | 656 | /* |
657 | * Since we're about to sleep, now is a good time to tell the | ||
658 | * Guest about what we've used up to now. | ||
659 | */ | ||
597 | trigger_irq(vq); | 660 | trigger_irq(vq); |
598 | 661 | ||
599 | /* OK, now we need to know about added descriptors. */ | 662 | /* OK, now we need to know about added descriptors. */ |
600 | vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; | 663 | vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; |
601 | 664 | ||
602 | /* They could have slipped one in as we were doing that: make | 665 | /* |
603 | * sure it's written, then check again. */ | 666 | * They could have slipped one in as we were doing that: make |
667 | * sure it's written, then check again. | ||
668 | */ | ||
604 | mb(); | 669 | mb(); |
605 | if (last_avail != vq->vring.avail->idx) { | 670 | if (last_avail != vq->vring.avail->idx) { |
606 | vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; | 671 | vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; |
@@ -620,8 +685,10 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
620 | errx(1, "Guest moved used index from %u to %u", | 685 | errx(1, "Guest moved used index from %u to %u", |
621 | last_avail, vq->vring.avail->idx); | 686 | last_avail, vq->vring.avail->idx); |
622 | 687 | ||
623 | /* Grab the next descriptor number they're advertising, and increment | 688 | /* |
624 | * the index we've seen. */ | 689 | * Grab the next descriptor number they're advertising, and increment |
690 | * the index we've seen. | ||
691 | */ | ||
625 | head = vq->vring.avail->ring[last_avail % vq->vring.num]; | 692 | head = vq->vring.avail->ring[last_avail % vq->vring.num]; |
626 | lg_last_avail(vq)++; | 693 | lg_last_avail(vq)++; |
627 | 694 | ||
@@ -636,8 +703,10 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
636 | desc = vq->vring.desc; | 703 | desc = vq->vring.desc; |
637 | i = head; | 704 | i = head; |
638 | 705 | ||
639 | /* If this is an indirect entry, then this buffer contains a descriptor | 706 | /* |
640 | * table which we handle as if it's any normal descriptor chain. */ | 707 | * If this is an indirect entry, then this buffer contains a descriptor |
708 | * table which we handle as if it's any normal descriptor chain. | ||
709 | */ | ||
641 | if (desc[i].flags & VRING_DESC_F_INDIRECT) { | 710 | if (desc[i].flags & VRING_DESC_F_INDIRECT) { |
642 | if (desc[i].len % sizeof(struct vring_desc)) | 711 | if (desc[i].len % sizeof(struct vring_desc)) |
643 | errx(1, "Invalid size for indirect buffer table"); | 712 | errx(1, "Invalid size for indirect buffer table"); |
@@ -656,8 +725,10 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
656 | if (desc[i].flags & VRING_DESC_F_WRITE) | 725 | if (desc[i].flags & VRING_DESC_F_WRITE) |
657 | (*in_num)++; | 726 | (*in_num)++; |
658 | else { | 727 | else { |
659 | /* If it's an output descriptor, they're all supposed | 728 | /* |
660 | * to come before any input descriptors. */ | 729 | * If it's an output descriptor, they're all supposed |
730 | * to come before any input descriptors. | ||
731 | */ | ||
661 | if (*in_num) | 732 | if (*in_num) |
662 | errx(1, "Descriptor has out after in"); | 733 | errx(1, "Descriptor has out after in"); |
663 | (*out_num)++; | 734 | (*out_num)++; |
@@ -671,14 +742,19 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
671 | return head; | 742 | return head; |
672 | } | 743 | } |
673 | 744 | ||
674 | /* After we've used one of their buffers, we tell them about it. We'll then | 745 | /* |
675 | * want to send them an interrupt, using trigger_irq(). */ | 746 | * After we've used one of their buffers, we tell the Guest about it. Sometime |
747 | * later we'll want to send them an interrupt using trigger_irq(); note that | ||
748 | * wait_for_vq_desc() does that for us if it has to wait. | ||
749 | */ | ||
676 | static void add_used(struct virtqueue *vq, unsigned int head, int len) | 750 | static void add_used(struct virtqueue *vq, unsigned int head, int len) |
677 | { | 751 | { |
678 | struct vring_used_elem *used; | 752 | struct vring_used_elem *used; |
679 | 753 | ||
680 | /* The virtqueue contains a ring of used buffers. Get a pointer to the | 754 | /* |
681 | * next entry in that used ring. */ | 755 | * The virtqueue contains a ring of used buffers. Get a pointer to the |
756 | * next entry in that used ring. | ||
757 | */ | ||
682 | used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num]; | 758 | used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num]; |
683 | used->id = head; | 759 | used->id = head; |
684 | used->len = len; | 760 | used->len = len; |
@@ -698,9 +774,9 @@ static void add_used_and_trigger(struct virtqueue *vq, unsigned head, int len) | |||
698 | /* | 774 | /* |
699 | * The Console | 775 | * The Console |
700 | * | 776 | * |
701 | * We associate some data with the console for our exit hack. */ | 777 | * We associate some data with the console for our exit hack. |
702 | struct console_abort | 778 | */ |
703 | { | 779 | struct console_abort { |
704 | /* How many times have they hit ^C? */ | 780 | /* How many times have they hit ^C? */ |
705 | int count; | 781 | int count; |
706 | /* When did they start? */ | 782 | /* When did they start? */ |
@@ -715,30 +791,35 @@ static void console_input(struct virtqueue *vq) | |||
715 | struct console_abort *abort = vq->dev->priv; | 791 | struct console_abort *abort = vq->dev->priv; |
716 | struct iovec iov[vq->vring.num]; | 792 | struct iovec iov[vq->vring.num]; |
717 | 793 | ||
718 | /* Make sure there's a descriptor waiting. */ | 794 | /* Make sure there's a descriptor available. */ |
719 | head = wait_for_vq_desc(vq, iov, &out_num, &in_num); | 795 | head = wait_for_vq_desc(vq, iov, &out_num, &in_num); |
720 | if (out_num) | 796 | if (out_num) |
721 | errx(1, "Output buffers in console in queue?"); | 797 | errx(1, "Output buffers in console in queue?"); |
722 | 798 | ||
723 | /* Read it in. */ | 799 | /* Read into it. This is where we usually wait. */ |
724 | len = readv(STDIN_FILENO, iov, in_num); | 800 | len = readv(STDIN_FILENO, iov, in_num); |
725 | if (len <= 0) { | 801 | if (len <= 0) { |
726 | /* Ran out of input? */ | 802 | /* Ran out of input? */ |
727 | warnx("Failed to get console input, ignoring console."); | 803 | warnx("Failed to get console input, ignoring console."); |
728 | /* For simplicity, dying threads kill the whole Launcher. So | 804 | /* |
729 | * just nap here. */ | 805 | * For simplicity, dying threads kill the whole Launcher. So |
806 | * just nap here. | ||
807 | */ | ||
730 | for (;;) | 808 | for (;;) |
731 | pause(); | 809 | pause(); |
732 | } | 810 | } |
733 | 811 | ||
812 | /* Tell the Guest we used a buffer. */ | ||
734 | add_used_and_trigger(vq, head, len); | 813 | add_used_and_trigger(vq, head, len); |
735 | 814 | ||
736 | /* Three ^C within one second? Exit. | 815 | /* |
816 | * Three ^C within one second? Exit. | ||
737 | * | 817 | * |
738 | * This is such a hack, but works surprisingly well. Each ^C has to | 818 | * This is such a hack, but works surprisingly well. Each ^C has to |
739 | * be in a buffer by itself, so they can't be too fast. But we check | 819 | * be in a buffer by itself, so they can't be too fast. But we check |
740 | * that we get three within about a second, so they can't be too | 820 | * that we get three within about a second, so they can't be too |
741 | * slow. */ | 821 | * slow. |
822 | */ | ||
742 | if (len != 1 || ((char *)iov[0].iov_base)[0] != 3) { | 823 | if (len != 1 || ((char *)iov[0].iov_base)[0] != 3) { |
743 | abort->count = 0; | 824 | abort->count = 0; |
744 | return; | 825 | return; |
@@ -763,15 +844,23 @@ static void console_output(struct virtqueue *vq) | |||
763 | unsigned int head, out, in; | 844 | unsigned int head, out, in; |
764 | struct iovec iov[vq->vring.num]; | 845 | struct iovec iov[vq->vring.num]; |
765 | 846 | ||
847 | /* We usually wait in here, for the Guest to give us something. */ | ||
766 | head = wait_for_vq_desc(vq, iov, &out, &in); | 848 | head = wait_for_vq_desc(vq, iov, &out, &in); |
767 | if (in) | 849 | if (in) |
768 | errx(1, "Input buffers in console output queue?"); | 850 | errx(1, "Input buffers in console output queue?"); |
851 | |||
852 | /* writev can return a partial write, so we loop here. */ | ||
769 | while (!iov_empty(iov, out)) { | 853 | while (!iov_empty(iov, out)) { |
770 | int len = writev(STDOUT_FILENO, iov, out); | 854 | int len = writev(STDOUT_FILENO, iov, out); |
771 | if (len <= 0) | 855 | if (len <= 0) |
772 | err(1, "Write to stdout gave %i", len); | 856 | err(1, "Write to stdout gave %i", len); |
773 | iov_consume(iov, out, len); | 857 | iov_consume(iov, out, len); |
774 | } | 858 | } |
859 | |||
860 | /* | ||
861 | * We're finished with that buffer: if we're going to sleep, | ||
862 | * wait_for_vq_desc() will prod the Guest with an interrupt. | ||
863 | */ | ||
775 | add_used(vq, head, 0); | 864 | add_used(vq, head, 0); |
776 | } | 865 | } |
777 | 866 | ||
@@ -791,15 +880,30 @@ static void net_output(struct virtqueue *vq) | |||
791 | unsigned int head, out, in; | 880 | unsigned int head, out, in; |
792 | struct iovec iov[vq->vring.num]; | 881 | struct iovec iov[vq->vring.num]; |
793 | 882 | ||
883 | /* We usually wait in here for the Guest to give us a packet. */ | ||
794 | head = wait_for_vq_desc(vq, iov, &out, &in); | 884 | head = wait_for_vq_desc(vq, iov, &out, &in); |
795 | if (in) | 885 | if (in) |
796 | errx(1, "Input buffers in net output queue?"); | 886 | errx(1, "Input buffers in net output queue?"); |
887 | /* | ||
888 | * Send the whole thing through to /dev/net/tun. It expects the exact | ||
889 | * same format: what a coincidence! | ||
890 | */ | ||
797 | if (writev(net_info->tunfd, iov, out) < 0) | 891 | if (writev(net_info->tunfd, iov, out) < 0) |
798 | errx(1, "Write to tun failed?"); | 892 | errx(1, "Write to tun failed?"); |
893 | |||
894 | /* | ||
895 | * Done with that one; wait_for_vq_desc() will send the interrupt if | ||
896 | * all packets are processed. | ||
897 | */ | ||
799 | add_used(vq, head, 0); | 898 | add_used(vq, head, 0); |
800 | } | 899 | } |
801 | 900 | ||
802 | /* Will reading from this file descriptor block? */ | 901 | /* |
902 | * Handling network input is a bit trickier, because I've tried to optimize it. | ||
903 | * | ||
904 | * First we have a helper routine which tells is if from this file descriptor | ||
905 | * (ie. the /dev/net/tun device) will block: | ||
906 | */ | ||
803 | static bool will_block(int fd) | 907 | static bool will_block(int fd) |
804 | { | 908 | { |
805 | fd_set fdset; | 909 | fd_set fdset; |
@@ -809,8 +913,11 @@ static bool will_block(int fd) | |||
809 | return select(fd+1, &fdset, NULL, NULL, &zero) != 1; | 913 | return select(fd+1, &fdset, NULL, NULL, &zero) != 1; |
810 | } | 914 | } |
811 | 915 | ||
812 | /* This is where we handle packets coming in from the tun device to our | 916 | /* |
813 | * Guest. */ | 917 | * This handles packets coming in from the tun device to our Guest. Like all |
918 | * service routines, it gets called again as soon as it returns, so you don't | ||
919 | * see a while(1) loop here. | ||
920 | */ | ||
814 | static void net_input(struct virtqueue *vq) | 921 | static void net_input(struct virtqueue *vq) |
815 | { | 922 | { |
816 | int len; | 923 | int len; |
@@ -818,21 +925,38 @@ static void net_input(struct virtqueue *vq) | |||
818 | struct iovec iov[vq->vring.num]; | 925 | struct iovec iov[vq->vring.num]; |
819 | struct net_info *net_info = vq->dev->priv; | 926 | struct net_info *net_info = vq->dev->priv; |
820 | 927 | ||
928 | /* | ||
929 | * Get a descriptor to write an incoming packet into. This will also | ||
930 | * send an interrupt if they're out of descriptors. | ||
931 | */ | ||
821 | head = wait_for_vq_desc(vq, iov, &out, &in); | 932 | head = wait_for_vq_desc(vq, iov, &out, &in); |
822 | if (out) | 933 | if (out) |
823 | errx(1, "Output buffers in net input queue?"); | 934 | errx(1, "Output buffers in net input queue?"); |
824 | 935 | ||
825 | /* Deliver interrupt now, since we're about to sleep. */ | 936 | /* |
937 | * If it looks like we'll block reading from the tun device, send them | ||
938 | * an interrupt. | ||
939 | */ | ||
826 | if (vq->pending_used && will_block(net_info->tunfd)) | 940 | if (vq->pending_used && will_block(net_info->tunfd)) |
827 | trigger_irq(vq); | 941 | trigger_irq(vq); |
828 | 942 | ||
943 | /* | ||
944 | * Read in the packet. This is where we normally wait (when there's no | ||
945 | * incoming network traffic). | ||
946 | */ | ||
829 | len = readv(net_info->tunfd, iov, in); | 947 | len = readv(net_info->tunfd, iov, in); |
830 | if (len <= 0) | 948 | if (len <= 0) |
831 | err(1, "Failed to read from tun."); | 949 | err(1, "Failed to read from tun."); |
950 | |||
951 | /* | ||
952 | * Mark that packet buffer as used, but don't interrupt here. We want | ||
953 | * to wait until we've done as much work as we can. | ||
954 | */ | ||
832 | add_used(vq, head, len); | 955 | add_used(vq, head, len); |
833 | } | 956 | } |
957 | /*:*/ | ||
834 | 958 | ||
835 | /* This is the helper to create threads. */ | 959 | /* This is the helper to create threads: run the service routine in a loop. */ |
836 | static int do_thread(void *_vq) | 960 | static int do_thread(void *_vq) |
837 | { | 961 | { |
838 | struct virtqueue *vq = _vq; | 962 | struct virtqueue *vq = _vq; |
@@ -842,8 +966,10 @@ static int do_thread(void *_vq) | |||
842 | return 0; | 966 | return 0; |
843 | } | 967 | } |
844 | 968 | ||
845 | /* When a child dies, we kill our entire process group with SIGTERM. This | 969 | /* |
846 | * also has the side effect that the shell restores the console for us! */ | 970 | * When a child dies, we kill our entire process group with SIGTERM. This |
971 | * also has the side effect that the shell restores the console for us! | ||
972 | */ | ||
847 | static void kill_launcher(int signal) | 973 | static void kill_launcher(int signal) |
848 | { | 974 | { |
849 | kill(0, SIGTERM); | 975 | kill(0, SIGTERM); |
@@ -878,11 +1004,15 @@ static void reset_device(struct device *dev) | |||
878 | signal(SIGCHLD, (void *)kill_launcher); | 1004 | signal(SIGCHLD, (void *)kill_launcher); |
879 | } | 1005 | } |
880 | 1006 | ||
1007 | /*L:216 | ||
1008 | * This actually creates the thread which services the virtqueue for a device. | ||
1009 | */ | ||
881 | static void create_thread(struct virtqueue *vq) | 1010 | static void create_thread(struct virtqueue *vq) |
882 | { | 1011 | { |
883 | /* Create stack for thread and run it. Since stack grows | 1012 | /* |
884 | * upwards, we point the stack pointer to the end of this | 1013 | * Create stack for thread. Since the stack grows upwards, we point |
885 | * region. */ | 1014 | * the stack pointer to the end of this region. |
1015 | */ | ||
886 | char *stack = malloc(32768); | 1016 | char *stack = malloc(32768); |
887 | unsigned long args[] = { LHREQ_EVENTFD, | 1017 | unsigned long args[] = { LHREQ_EVENTFD, |
888 | vq->config.pfn*getpagesize(), 0 }; | 1018 | vq->config.pfn*getpagesize(), 0 }; |
@@ -893,17 +1023,22 @@ static void create_thread(struct virtqueue *vq) | |||
893 | err(1, "Creating eventfd"); | 1023 | err(1, "Creating eventfd"); |
894 | args[2] = vq->eventfd; | 1024 | args[2] = vq->eventfd; |
895 | 1025 | ||
896 | /* Attach an eventfd to this virtqueue: it will go off | 1026 | /* |
897 | * when the Guest does an LHCALL_NOTIFY for this vq. */ | 1027 | * Attach an eventfd to this virtqueue: it will go off when the Guest |
1028 | * does an LHCALL_NOTIFY for this vq. | ||
1029 | */ | ||
898 | if (write(lguest_fd, &args, sizeof(args)) != 0) | 1030 | if (write(lguest_fd, &args, sizeof(args)) != 0) |
899 | err(1, "Attaching eventfd"); | 1031 | err(1, "Attaching eventfd"); |
900 | 1032 | ||
901 | /* CLONE_VM: because it has to access the Guest memory, and | 1033 | /* |
902 | * SIGCHLD so we get a signal if it dies. */ | 1034 | * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so |
1035 | * we get a signal if it dies. | ||
1036 | */ | ||
903 | vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq); | 1037 | vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq); |
904 | if (vq->thread == (pid_t)-1) | 1038 | if (vq->thread == (pid_t)-1) |
905 | err(1, "Creating clone"); | 1039 | err(1, "Creating clone"); |
906 | /* We close our local copy, now the child has it. */ | 1040 | |
1041 | /* We close our local copy now the child has it. */ | ||
907 | close(vq->eventfd); | 1042 | close(vq->eventfd); |
908 | } | 1043 | } |
909 | 1044 | ||
@@ -955,7 +1090,10 @@ static void update_device_status(struct device *dev) | |||
955 | } | 1090 | } |
956 | } | 1091 | } |
957 | 1092 | ||
958 | /* This is the generic routine we call when the Guest uses LHCALL_NOTIFY. */ | 1093 | /*L:215 |
1094 | * This is the generic routine we call when the Guest uses LHCALL_NOTIFY. In | ||
1095 | * particular, it's used to notify us of device status changes during boot. | ||
1096 | */ | ||
959 | static void handle_output(unsigned long addr) | 1097 | static void handle_output(unsigned long addr) |
960 | { | 1098 | { |
961 | struct device *i; | 1099 | struct device *i; |
@@ -964,25 +1102,42 @@ static void handle_output(unsigned long addr) | |||
964 | for (i = devices.dev; i; i = i->next) { | 1102 | for (i = devices.dev; i; i = i->next) { |
965 | struct virtqueue *vq; | 1103 | struct virtqueue *vq; |
966 | 1104 | ||
967 | /* Notifications to device descriptors update device status. */ | 1105 | /* |
1106 | * Notifications to device descriptors mean they updated the | ||
1107 | * device status. | ||
1108 | */ | ||
968 | if (from_guest_phys(addr) == i->desc) { | 1109 | if (from_guest_phys(addr) == i->desc) { |
969 | update_device_status(i); | 1110 | update_device_status(i); |
970 | return; | 1111 | return; |
971 | } | 1112 | } |
972 | 1113 | ||
973 | /* Devices *can* be used before status is set to DRIVER_OK. */ | 1114 | /* |
1115 | * Devices *can* be used before status is set to DRIVER_OK. | ||
1116 | * The original plan was that they would never do this: they | ||
1117 | * would always finish setting up their status bits before | ||
1118 | * actually touching the virtqueues. In practice, we allowed | ||
1119 | * them to, and they do (eg. the disk probes for partition | ||
1120 | * tables as part of initialization). | ||
1121 | * | ||
1122 | * If we see this, we start the device: once it's running, we | ||
1123 | * expect the device to catch all the notifications. | ||
1124 | */ | ||
974 | for (vq = i->vq; vq; vq = vq->next) { | 1125 | for (vq = i->vq; vq; vq = vq->next) { |
975 | if (addr != vq->config.pfn*getpagesize()) | 1126 | if (addr != vq->config.pfn*getpagesize()) |
976 | continue; | 1127 | continue; |
977 | if (i->running) | 1128 | if (i->running) |
978 | errx(1, "Notification on running %s", i->name); | 1129 | errx(1, "Notification on running %s", i->name); |
1130 | /* This just calls create_thread() for each virtqueue */ | ||
979 | start_device(i); | 1131 | start_device(i); |
980 | return; | 1132 | return; |
981 | } | 1133 | } |
982 | } | 1134 | } |
983 | 1135 | ||
984 | /* Early console write is done using notify on a nul-terminated string | 1136 | /* |
985 | * in Guest memory. */ | 1137 | * Early console write is done using notify on a nul-terminated string |
1138 | * in Guest memory. It's also great for hacking debugging messages | ||
1139 | * into a Guest. | ||
1140 | */ | ||
986 | if (addr >= guest_limit) | 1141 | if (addr >= guest_limit) |
987 | errx(1, "Bad NOTIFY %#lx", addr); | 1142 | errx(1, "Bad NOTIFY %#lx", addr); |
988 | 1143 | ||
@@ -998,10 +1153,12 @@ static void handle_output(unsigned long addr) | |||
998 | * routines to allocate and manage them. | 1153 | * routines to allocate and manage them. |
999 | */ | 1154 | */ |
1000 | 1155 | ||
1001 | /* The layout of the device page is a "struct lguest_device_desc" followed by a | 1156 | /* |
1157 | * The layout of the device page is a "struct lguest_device_desc" followed by a | ||
1002 | * number of virtqueue descriptors, then two sets of feature bits, then an | 1158 | * number of virtqueue descriptors, then two sets of feature bits, then an |
1003 | * array of configuration bytes. This routine returns the configuration | 1159 | * array of configuration bytes. This routine returns the configuration |
1004 | * pointer. */ | 1160 | * pointer. |
1161 | */ | ||
1005 | static u8 *device_config(const struct device *dev) | 1162 | static u8 *device_config(const struct device *dev) |
1006 | { | 1163 | { |
1007 | return (void *)(dev->desc + 1) | 1164 | return (void *)(dev->desc + 1) |
@@ -1009,9 +1166,11 @@ static u8 *device_config(const struct device *dev) | |||
1009 | + dev->feature_len * 2; | 1166 | + dev->feature_len * 2; |
1010 | } | 1167 | } |
1011 | 1168 | ||
1012 | /* This routine allocates a new "struct lguest_device_desc" from descriptor | 1169 | /* |
1170 | * This routine allocates a new "struct lguest_device_desc" from descriptor | ||
1013 | * table page just above the Guest's normal memory. It returns a pointer to | 1171 | * table page just above the Guest's normal memory. It returns a pointer to |
1014 | * that descriptor. */ | 1172 | * that descriptor. |
1173 | */ | ||
1015 | static struct lguest_device_desc *new_dev_desc(u16 type) | 1174 | static struct lguest_device_desc *new_dev_desc(u16 type) |
1016 | { | 1175 | { |
1017 | struct lguest_device_desc d = { .type = type }; | 1176 | struct lguest_device_desc d = { .type = type }; |
@@ -1032,8 +1191,10 @@ static struct lguest_device_desc *new_dev_desc(u16 type) | |||
1032 | return memcpy(p, &d, sizeof(d)); | 1191 | return memcpy(p, &d, sizeof(d)); |
1033 | } | 1192 | } |
1034 | 1193 | ||
1035 | /* Each device descriptor is followed by the description of its virtqueues. We | 1194 | /* |
1036 | * specify how many descriptors the virtqueue is to have. */ | 1195 | * Each device descriptor is followed by the description of its virtqueues. We |
1196 | * specify how many descriptors the virtqueue is to have. | ||
1197 | */ | ||
1037 | static void add_virtqueue(struct device *dev, unsigned int num_descs, | 1198 | static void add_virtqueue(struct device *dev, unsigned int num_descs, |
1038 | void (*service)(struct virtqueue *)) | 1199 | void (*service)(struct virtqueue *)) |
1039 | { | 1200 | { |
@@ -1050,6 +1211,11 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, | |||
1050 | vq->next = NULL; | 1211 | vq->next = NULL; |
1051 | vq->last_avail_idx = 0; | 1212 | vq->last_avail_idx = 0; |
1052 | vq->dev = dev; | 1213 | vq->dev = dev; |
1214 | |||
1215 | /* | ||
1216 | * This is the routine the service thread will run, and its Process ID | ||
1217 | * once it's running. | ||
1218 | */ | ||
1053 | vq->service = service; | 1219 | vq->service = service; |
1054 | vq->thread = (pid_t)-1; | 1220 | vq->thread = (pid_t)-1; |
1055 | 1221 | ||
@@ -1061,10 +1227,12 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, | |||
1061 | /* Initialize the vring. */ | 1227 | /* Initialize the vring. */ |
1062 | vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN); | 1228 | vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN); |
1063 | 1229 | ||
1064 | /* Append virtqueue to this device's descriptor. We use | 1230 | /* |
1231 | * Append virtqueue to this device's descriptor. We use | ||
1065 | * device_config() to get the end of the device's current virtqueues; | 1232 | * device_config() to get the end of the device's current virtqueues; |
1066 | * we check that we haven't added any config or feature information | 1233 | * we check that we haven't added any config or feature information |
1067 | * yet, otherwise we'd be overwriting them. */ | 1234 | * yet, otherwise we'd be overwriting them. |
1235 | */ | ||
1068 | assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); | 1236 | assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); |
1069 | memcpy(device_config(dev), &vq->config, sizeof(vq->config)); | 1237 | memcpy(device_config(dev), &vq->config, sizeof(vq->config)); |
1070 | dev->num_vq++; | 1238 | dev->num_vq++; |
@@ -1072,14 +1240,18 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, | |||
1072 | 1240 | ||
1073 | verbose("Virtqueue page %#lx\n", to_guest_phys(p)); | 1241 | verbose("Virtqueue page %#lx\n", to_guest_phys(p)); |
1074 | 1242 | ||
1075 | /* Add to tail of list, so dev->vq is first vq, dev->vq->next is | 1243 | /* |
1076 | * second. */ | 1244 | * Add to tail of list, so dev->vq is first vq, dev->vq->next is |
1245 | * second. | ||
1246 | */ | ||
1077 | for (i = &dev->vq; *i; i = &(*i)->next); | 1247 | for (i = &dev->vq; *i; i = &(*i)->next); |
1078 | *i = vq; | 1248 | *i = vq; |
1079 | } | 1249 | } |
1080 | 1250 | ||
1081 | /* The first half of the feature bitmask is for us to advertise features. The | 1251 | /* |
1082 | * second half is for the Guest to accept features. */ | 1252 | * The first half of the feature bitmask is for us to advertise features. The |
1253 | * second half is for the Guest to accept features. | ||
1254 | */ | ||
1083 | static void add_feature(struct device *dev, unsigned bit) | 1255 | static void add_feature(struct device *dev, unsigned bit) |
1084 | { | 1256 | { |
1085 | u8 *features = get_feature_bits(dev); | 1257 | u8 *features = get_feature_bits(dev); |
@@ -1093,9 +1265,11 @@ static void add_feature(struct device *dev, unsigned bit) | |||
1093 | features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT)); | 1265 | features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT)); |
1094 | } | 1266 | } |
1095 | 1267 | ||
1096 | /* This routine sets the configuration fields for an existing device's | 1268 | /* |
1269 | * This routine sets the configuration fields for an existing device's | ||
1097 | * descriptor. It only works for the last device, but that's OK because that's | 1270 | * descriptor. It only works for the last device, but that's OK because that's |
1098 | * how we use it. */ | 1271 | * how we use it. |
1272 | */ | ||
1099 | static void set_config(struct device *dev, unsigned len, const void *conf) | 1273 | static void set_config(struct device *dev, unsigned len, const void *conf) |
1100 | { | 1274 | { |
1101 | /* Check we haven't overflowed our single page. */ | 1275 | /* Check we haven't overflowed our single page. */ |
@@ -1105,12 +1279,18 @@ static void set_config(struct device *dev, unsigned len, const void *conf) | |||
1105 | /* Copy in the config information, and store the length. */ | 1279 | /* Copy in the config information, and store the length. */ |
1106 | memcpy(device_config(dev), conf, len); | 1280 | memcpy(device_config(dev), conf, len); |
1107 | dev->desc->config_len = len; | 1281 | dev->desc->config_len = len; |
1282 | |||
1283 | /* Size must fit in config_len field (8 bits)! */ | ||
1284 | assert(dev->desc->config_len == len); | ||
1108 | } | 1285 | } |
1109 | 1286 | ||
1110 | /* This routine does all the creation and setup of a new device, including | 1287 | /* |
1111 | * calling new_dev_desc() to allocate the descriptor and device memory. | 1288 | * This routine does all the creation and setup of a new device, including |
1289 | * calling new_dev_desc() to allocate the descriptor and device memory. We | ||
1290 | * don't actually start the service threads until later. | ||
1112 | * | 1291 | * |
1113 | * See what I mean about userspace being boring? */ | 1292 | * See what I mean about userspace being boring? |
1293 | */ | ||
1114 | static struct device *new_device(const char *name, u16 type) | 1294 | static struct device *new_device(const char *name, u16 type) |
1115 | { | 1295 | { |
1116 | struct device *dev = malloc(sizeof(*dev)); | 1296 | struct device *dev = malloc(sizeof(*dev)); |
@@ -1123,10 +1303,12 @@ static struct device *new_device(const char *name, u16 type) | |||
1123 | dev->num_vq = 0; | 1303 | dev->num_vq = 0; |
1124 | dev->running = false; | 1304 | dev->running = false; |
1125 | 1305 | ||
1126 | /* Append to device list. Prepending to a single-linked list is | 1306 | /* |
1307 | * Append to device list. Prepending to a single-linked list is | ||
1127 | * easier, but the user expects the devices to be arranged on the bus | 1308 | * easier, but the user expects the devices to be arranged on the bus |
1128 | * in command-line order. The first network device on the command line | 1309 | * in command-line order. The first network device on the command line |
1129 | * is eth0, the first block device /dev/vda, etc. */ | 1310 | * is eth0, the first block device /dev/vda, etc. |
1311 | */ | ||
1130 | if (devices.lastdev) | 1312 | if (devices.lastdev) |
1131 | devices.lastdev->next = dev; | 1313 | devices.lastdev->next = dev; |
1132 | else | 1314 | else |
@@ -1136,8 +1318,10 @@ static struct device *new_device(const char *name, u16 type) | |||
1136 | return dev; | 1318 | return dev; |
1137 | } | 1319 | } |
1138 | 1320 | ||
1139 | /* Our first setup routine is the console. It's a fairly simple device, but | 1321 | /* |
1140 | * UNIX tty handling makes it uglier than it could be. */ | 1322 | * Our first setup routine is the console. It's a fairly simple device, but |
1323 | * UNIX tty handling makes it uglier than it could be. | ||
1324 | */ | ||
1141 | static void setup_console(void) | 1325 | static void setup_console(void) |
1142 | { | 1326 | { |
1143 | struct device *dev; | 1327 | struct device *dev; |
@@ -1145,8 +1329,10 @@ static void setup_console(void) | |||
1145 | /* If we can save the initial standard input settings... */ | 1329 | /* If we can save the initial standard input settings... */ |
1146 | if (tcgetattr(STDIN_FILENO, &orig_term) == 0) { | 1330 | if (tcgetattr(STDIN_FILENO, &orig_term) == 0) { |
1147 | struct termios term = orig_term; | 1331 | struct termios term = orig_term; |
1148 | /* Then we turn off echo, line buffering and ^C etc. We want a | 1332 | /* |
1149 | * raw input stream to the Guest. */ | 1333 | * Then we turn off echo, line buffering and ^C etc: We want a |
1334 | * raw input stream to the Guest. | ||
1335 | */ | ||
1150 | term.c_lflag &= ~(ISIG|ICANON|ECHO); | 1336 | term.c_lflag &= ~(ISIG|ICANON|ECHO); |
1151 | tcsetattr(STDIN_FILENO, TCSANOW, &term); | 1337 | tcsetattr(STDIN_FILENO, TCSANOW, &term); |
1152 | } | 1338 | } |
@@ -1157,10 +1343,12 @@ static void setup_console(void) | |||
1157 | dev->priv = malloc(sizeof(struct console_abort)); | 1343 | dev->priv = malloc(sizeof(struct console_abort)); |
1158 | ((struct console_abort *)dev->priv)->count = 0; | 1344 | ((struct console_abort *)dev->priv)->count = 0; |
1159 | 1345 | ||
1160 | /* The console needs two virtqueues: the input then the output. When | 1346 | /* |
1347 | * The console needs two virtqueues: the input then the output. When | ||
1161 | * they put something the input queue, we make sure we're listening to | 1348 | * they put something the input queue, we make sure we're listening to |
1162 | * stdin. When they put something in the output queue, we write it to | 1349 | * stdin. When they put something in the output queue, we write it to |
1163 | * stdout. */ | 1350 | * stdout. |
1351 | */ | ||
1164 | add_virtqueue(dev, VIRTQUEUE_NUM, console_input); | 1352 | add_virtqueue(dev, VIRTQUEUE_NUM, console_input); |
1165 | add_virtqueue(dev, VIRTQUEUE_NUM, console_output); | 1353 | add_virtqueue(dev, VIRTQUEUE_NUM, console_output); |
1166 | 1354 | ||
@@ -1168,7 +1356,8 @@ static void setup_console(void) | |||
1168 | } | 1356 | } |
1169 | /*:*/ | 1357 | /*:*/ |
1170 | 1358 | ||
1171 | /*M:010 Inter-guest networking is an interesting area. Simplest is to have a | 1359 | /*M:010 |
1360 | * Inter-guest networking is an interesting area. Simplest is to have a | ||
1172 | * --sharenet=<name> option which opens or creates a named pipe. This can be | 1361 | * --sharenet=<name> option which opens or creates a named pipe. This can be |
1173 | * used to send packets to another guest in a 1:1 manner. | 1362 | * used to send packets to another guest in a 1:1 manner. |
1174 | * | 1363 | * |
@@ -1182,7 +1371,8 @@ static void setup_console(void) | |||
1182 | * multiple inter-guest channels behind one interface, although it would | 1371 | * multiple inter-guest channels behind one interface, although it would |
1183 | * require some manner of hotplugging new virtio channels. | 1372 | * require some manner of hotplugging new virtio channels. |
1184 | * | 1373 | * |
1185 | * Finally, we could implement a virtio network switch in the kernel. :*/ | 1374 | * Finally, we could implement a virtio network switch in the kernel. |
1375 | :*/ | ||
1186 | 1376 | ||
1187 | static u32 str2ip(const char *ipaddr) | 1377 | static u32 str2ip(const char *ipaddr) |
1188 | { | 1378 | { |
@@ -1207,11 +1397,13 @@ static void str2mac(const char *macaddr, unsigned char mac[6]) | |||
1207 | mac[5] = m[5]; | 1397 | mac[5] = m[5]; |
1208 | } | 1398 | } |
1209 | 1399 | ||
1210 | /* This code is "adapted" from libbridge: it attaches the Host end of the | 1400 | /* |
1401 | * This code is "adapted" from libbridge: it attaches the Host end of the | ||
1211 | * network device to the bridge device specified by the command line. | 1402 | * network device to the bridge device specified by the command line. |
1212 | * | 1403 | * |
1213 | * This is yet another James Morris contribution (I'm an IP-level guy, so I | 1404 | * This is yet another James Morris contribution (I'm an IP-level guy, so I |
1214 | * dislike bridging), and I just try not to break it. */ | 1405 | * dislike bridging), and I just try not to break it. |
1406 | */ | ||
1215 | static void add_to_bridge(int fd, const char *if_name, const char *br_name) | 1407 | static void add_to_bridge(int fd, const char *if_name, const char *br_name) |
1216 | { | 1408 | { |
1217 | int ifidx; | 1409 | int ifidx; |
@@ -1231,9 +1423,11 @@ static void add_to_bridge(int fd, const char *if_name, const char *br_name) | |||
1231 | err(1, "can't add %s to bridge %s", if_name, br_name); | 1423 | err(1, "can't add %s to bridge %s", if_name, br_name); |
1232 | } | 1424 | } |
1233 | 1425 | ||
1234 | /* This sets up the Host end of the network device with an IP address, brings | 1426 | /* |
1427 | * This sets up the Host end of the network device with an IP address, brings | ||
1235 | * it up so packets will flow, the copies the MAC address into the hwaddr | 1428 | * it up so packets will flow, the copies the MAC address into the hwaddr |
1236 | * pointer. */ | 1429 | * pointer. |
1430 | */ | ||
1237 | static void configure_device(int fd, const char *tapif, u32 ipaddr) | 1431 | static void configure_device(int fd, const char *tapif, u32 ipaddr) |
1238 | { | 1432 | { |
1239 | struct ifreq ifr; | 1433 | struct ifreq ifr; |
@@ -1260,10 +1454,12 @@ static int get_tun_device(char tapif[IFNAMSIZ]) | |||
1260 | /* Start with this zeroed. Messy but sure. */ | 1454 | /* Start with this zeroed. Messy but sure. */ |
1261 | memset(&ifr, 0, sizeof(ifr)); | 1455 | memset(&ifr, 0, sizeof(ifr)); |
1262 | 1456 | ||
1263 | /* We open the /dev/net/tun device and tell it we want a tap device. A | 1457 | /* |
1458 | * We open the /dev/net/tun device and tell it we want a tap device. A | ||
1264 | * tap device is like a tun device, only somehow different. To tell | 1459 | * tap device is like a tun device, only somehow different. To tell |
1265 | * the truth, I completely blundered my way through this code, but it | 1460 | * the truth, I completely blundered my way through this code, but it |
1266 | * works now! */ | 1461 | * works now! |
1462 | */ | ||
1267 | netfd = open_or_die("/dev/net/tun", O_RDWR); | 1463 | netfd = open_or_die("/dev/net/tun", O_RDWR); |
1268 | ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR; | 1464 | ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR; |
1269 | strcpy(ifr.ifr_name, "tap%d"); | 1465 | strcpy(ifr.ifr_name, "tap%d"); |
@@ -1274,18 +1470,22 @@ static int get_tun_device(char tapif[IFNAMSIZ]) | |||
1274 | TUN_F_CSUM|TUN_F_TSO4|TUN_F_TSO6|TUN_F_TSO_ECN) != 0) | 1470 | TUN_F_CSUM|TUN_F_TSO4|TUN_F_TSO6|TUN_F_TSO_ECN) != 0) |
1275 | err(1, "Could not set features for tun device"); | 1471 | err(1, "Could not set features for tun device"); |
1276 | 1472 | ||
1277 | /* We don't need checksums calculated for packets coming in this | 1473 | /* |
1278 | * device: trust us! */ | 1474 | * We don't need checksums calculated for packets coming in this |
1475 | * device: trust us! | ||
1476 | */ | ||
1279 | ioctl(netfd, TUNSETNOCSUM, 1); | 1477 | ioctl(netfd, TUNSETNOCSUM, 1); |
1280 | 1478 | ||
1281 | memcpy(tapif, ifr.ifr_name, IFNAMSIZ); | 1479 | memcpy(tapif, ifr.ifr_name, IFNAMSIZ); |
1282 | return netfd; | 1480 | return netfd; |
1283 | } | 1481 | } |
1284 | 1482 | ||
1285 | /*L:195 Our network is a Host<->Guest network. This can either use bridging or | 1483 | /*L:195 |
1484 | * Our network is a Host<->Guest network. This can either use bridging or | ||
1286 | * routing, but the principle is the same: it uses the "tun" device to inject | 1485 | * routing, but the principle is the same: it uses the "tun" device to inject |
1287 | * packets into the Host as if they came in from a normal network card. We | 1486 | * packets into the Host as if they came in from a normal network card. We |
1288 | * just shunt packets between the Guest and the tun device. */ | 1487 | * just shunt packets between the Guest and the tun device. |
1488 | */ | ||
1289 | static void setup_tun_net(char *arg) | 1489 | static void setup_tun_net(char *arg) |
1290 | { | 1490 | { |
1291 | struct device *dev; | 1491 | struct device *dev; |
@@ -1302,13 +1502,14 @@ static void setup_tun_net(char *arg) | |||
1302 | dev = new_device("net", VIRTIO_ID_NET); | 1502 | dev = new_device("net", VIRTIO_ID_NET); |
1303 | dev->priv = net_info; | 1503 | dev->priv = net_info; |
1304 | 1504 | ||
1305 | /* Network devices need a receive and a send queue, just like | 1505 | /* Network devices need a recv and a send queue, just like console. */ |
1306 | * console. */ | ||
1307 | add_virtqueue(dev, VIRTQUEUE_NUM, net_input); | 1506 | add_virtqueue(dev, VIRTQUEUE_NUM, net_input); |
1308 | add_virtqueue(dev, VIRTQUEUE_NUM, net_output); | 1507 | add_virtqueue(dev, VIRTQUEUE_NUM, net_output); |
1309 | 1508 | ||
1310 | /* We need a socket to perform the magic network ioctls to bring up the | 1509 | /* |
1311 | * tap interface, connect to the bridge etc. Any socket will do! */ | 1510 | * We need a socket to perform the magic network ioctls to bring up the |
1511 | * tap interface, connect to the bridge etc. Any socket will do! | ||
1512 | */ | ||
1312 | ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); | 1513 | ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); |
1313 | if (ipfd < 0) | 1514 | if (ipfd < 0) |
1314 | err(1, "opening IP socket"); | 1515 | err(1, "opening IP socket"); |
@@ -1362,39 +1563,31 @@ static void setup_tun_net(char *arg) | |||
1362 | verbose("device %u: tun %s: %s\n", | 1563 | verbose("device %u: tun %s: %s\n", |
1363 | devices.device_num, tapif, arg); | 1564 | devices.device_num, tapif, arg); |
1364 | } | 1565 | } |
1365 | 1566 | /*:*/ | |
1366 | /* Our block (disk) device should be really simple: the Guest asks for a block | ||
1367 | * number and we read or write that position in the file. Unfortunately, that | ||
1368 | * was amazingly slow: the Guest waits until the read is finished before | ||
1369 | * running anything else, even if it could have been doing useful work. | ||
1370 | * | ||
1371 | * We could use async I/O, except it's reputed to suck so hard that characters | ||
1372 | * actually go missing from your code when you try to use it. | ||
1373 | * | ||
1374 | * So we farm the I/O out to thread, and communicate with it via a pipe. */ | ||
1375 | 1567 | ||
1376 | /* This hangs off device->priv. */ | 1568 | /* This hangs off device->priv. */ |
1377 | struct vblk_info | 1569 | struct vblk_info { |
1378 | { | ||
1379 | /* The size of the file. */ | 1570 | /* The size of the file. */ |
1380 | off64_t len; | 1571 | off64_t len; |
1381 | 1572 | ||
1382 | /* The file descriptor for the file. */ | 1573 | /* The file descriptor for the file. */ |
1383 | int fd; | 1574 | int fd; |
1384 | 1575 | ||
1385 | /* IO thread listens on this file descriptor [0]. */ | ||
1386 | int workpipe[2]; | ||
1387 | |||
1388 | /* IO thread writes to this file descriptor to mark it done, then | ||
1389 | * Launcher triggers interrupt to Guest. */ | ||
1390 | int done_fd; | ||
1391 | }; | 1576 | }; |
1392 | 1577 | ||
1393 | /*L:210 | 1578 | /*L:210 |
1394 | * The Disk | 1579 | * The Disk |
1395 | * | 1580 | * |
1396 | * Remember that the block device is handled by a separate I/O thread. We head | 1581 | * The disk only has one virtqueue, so it only has one thread. It is really |
1397 | * straight into the core of that thread here: | 1582 | * simple: the Guest asks for a block number and we read or write that position |
1583 | * in the file. | ||
1584 | * | ||
1585 | * Before we serviced each virtqueue in a separate thread, that was unacceptably | ||
1586 | * slow: the Guest waits until the read is finished before running anything | ||
1587 | * else, even if it could have been doing useful work. | ||
1588 | * | ||
1589 | * We could have used async I/O, except it's reputed to suck so hard that | ||
1590 | * characters actually go missing from your code when you try to use it. | ||
1398 | */ | 1591 | */ |
1399 | static void blk_request(struct virtqueue *vq) | 1592 | static void blk_request(struct virtqueue *vq) |
1400 | { | 1593 | { |
@@ -1406,47 +1599,64 @@ static void blk_request(struct virtqueue *vq) | |||
1406 | struct iovec iov[vq->vring.num]; | 1599 | struct iovec iov[vq->vring.num]; |
1407 | off64_t off; | 1600 | off64_t off; |
1408 | 1601 | ||
1409 | /* Get the next request. */ | 1602 | /* |
1603 | * Get the next request, where we normally wait. It triggers the | ||
1604 | * interrupt to acknowledge previously serviced requests (if any). | ||
1605 | */ | ||
1410 | head = wait_for_vq_desc(vq, iov, &out_num, &in_num); | 1606 | head = wait_for_vq_desc(vq, iov, &out_num, &in_num); |
1411 | 1607 | ||
1412 | /* Every block request should contain at least one output buffer | 1608 | /* |
1609 | * Every block request should contain at least one output buffer | ||
1413 | * (detailing the location on disk and the type of request) and one | 1610 | * (detailing the location on disk and the type of request) and one |
1414 | * input buffer (to hold the result). */ | 1611 | * input buffer (to hold the result). |
1612 | */ | ||
1415 | if (out_num == 0 || in_num == 0) | 1613 | if (out_num == 0 || in_num == 0) |
1416 | errx(1, "Bad virtblk cmd %u out=%u in=%u", | 1614 | errx(1, "Bad virtblk cmd %u out=%u in=%u", |
1417 | head, out_num, in_num); | 1615 | head, out_num, in_num); |
1418 | 1616 | ||
1419 | out = convert(&iov[0], struct virtio_blk_outhdr); | 1617 | out = convert(&iov[0], struct virtio_blk_outhdr); |
1420 | in = convert(&iov[out_num+in_num-1], u8); | 1618 | in = convert(&iov[out_num+in_num-1], u8); |
1619 | /* | ||
1620 | * For historical reasons, block operations are expressed in 512 byte | ||
1621 | * "sectors". | ||
1622 | */ | ||
1421 | off = out->sector * 512; | 1623 | off = out->sector * 512; |
1422 | 1624 | ||
1423 | /* The block device implements "barriers", where the Guest indicates | 1625 | /* |
1626 | * The block device implements "barriers", where the Guest indicates | ||
1424 | * that it wants all previous writes to occur before this write. We | 1627 | * that it wants all previous writes to occur before this write. We |
1425 | * don't have a way of asking our kernel to do a barrier, so we just | 1628 | * don't have a way of asking our kernel to do a barrier, so we just |
1426 | * synchronize all the data in the file. Pretty poor, no? */ | 1629 | * synchronize all the data in the file. Pretty poor, no? |
1630 | */ | ||
1427 | if (out->type & VIRTIO_BLK_T_BARRIER) | 1631 | if (out->type & VIRTIO_BLK_T_BARRIER) |
1428 | fdatasync(vblk->fd); | 1632 | fdatasync(vblk->fd); |
1429 | 1633 | ||
1430 | /* In general the virtio block driver is allowed to try SCSI commands. | 1634 | /* |
1431 | * It'd be nice if we supported eject, for example, but we don't. */ | 1635 | * In general the virtio block driver is allowed to try SCSI commands. |
1636 | * It'd be nice if we supported eject, for example, but we don't. | ||
1637 | */ | ||
1432 | if (out->type & VIRTIO_BLK_T_SCSI_CMD) { | 1638 | if (out->type & VIRTIO_BLK_T_SCSI_CMD) { |
1433 | fprintf(stderr, "Scsi commands unsupported\n"); | 1639 | fprintf(stderr, "Scsi commands unsupported\n"); |
1434 | *in = VIRTIO_BLK_S_UNSUPP; | 1640 | *in = VIRTIO_BLK_S_UNSUPP; |
1435 | wlen = sizeof(*in); | 1641 | wlen = sizeof(*in); |
1436 | } else if (out->type & VIRTIO_BLK_T_OUT) { | 1642 | } else if (out->type & VIRTIO_BLK_T_OUT) { |
1437 | /* Write */ | 1643 | /* |
1438 | 1644 | * Write | |
1439 | /* Move to the right location in the block file. This can fail | 1645 | * |
1440 | * if they try to write past end. */ | 1646 | * Move to the right location in the block file. This can fail |
1647 | * if they try to write past end. | ||
1648 | */ | ||
1441 | if (lseek64(vblk->fd, off, SEEK_SET) != off) | 1649 | if (lseek64(vblk->fd, off, SEEK_SET) != off) |
1442 | err(1, "Bad seek to sector %llu", out->sector); | 1650 | err(1, "Bad seek to sector %llu", out->sector); |
1443 | 1651 | ||
1444 | ret = writev(vblk->fd, iov+1, out_num-1); | 1652 | ret = writev(vblk->fd, iov+1, out_num-1); |
1445 | verbose("WRITE to sector %llu: %i\n", out->sector, ret); | 1653 | verbose("WRITE to sector %llu: %i\n", out->sector, ret); |
1446 | 1654 | ||
1447 | /* Grr... Now we know how long the descriptor they sent was, we | 1655 | /* |
1656 | * Grr... Now we know how long the descriptor they sent was, we | ||
1448 | * make sure they didn't try to write over the end of the block | 1657 | * make sure they didn't try to write over the end of the block |
1449 | * file (possibly extending it). */ | 1658 | * file (possibly extending it). |
1659 | */ | ||
1450 | if (ret > 0 && off + ret > vblk->len) { | 1660 | if (ret > 0 && off + ret > vblk->len) { |
1451 | /* Trim it back to the correct length */ | 1661 | /* Trim it back to the correct length */ |
1452 | ftruncate64(vblk->fd, vblk->len); | 1662 | ftruncate64(vblk->fd, vblk->len); |
@@ -1456,10 +1666,12 @@ static void blk_request(struct virtqueue *vq) | |||
1456 | wlen = sizeof(*in); | 1666 | wlen = sizeof(*in); |
1457 | *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); | 1667 | *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); |
1458 | } else { | 1668 | } else { |
1459 | /* Read */ | 1669 | /* |
1460 | 1670 | * Read | |
1461 | /* Move to the right location in the block file. This can fail | 1671 | * |
1462 | * if they try to read past end. */ | 1672 | * Move to the right location in the block file. This can fail |
1673 | * if they try to read past end. | ||
1674 | */ | ||
1463 | if (lseek64(vblk->fd, off, SEEK_SET) != off) | 1675 | if (lseek64(vblk->fd, off, SEEK_SET) != off) |
1464 | err(1, "Bad seek to sector %llu", out->sector); | 1676 | err(1, "Bad seek to sector %llu", out->sector); |
1465 | 1677 | ||
@@ -1474,13 +1686,16 @@ static void blk_request(struct virtqueue *vq) | |||
1474 | } | 1686 | } |
1475 | } | 1687 | } |
1476 | 1688 | ||
1477 | /* OK, so we noted that it was pretty poor to use an fdatasync as a | 1689 | /* |
1690 | * OK, so we noted that it was pretty poor to use an fdatasync as a | ||
1478 | * barrier. But Christoph Hellwig points out that we need a sync | 1691 | * barrier. But Christoph Hellwig points out that we need a sync |
1479 | * *afterwards* as well: "Barriers specify no reordering to the front | 1692 | * *afterwards* as well: "Barriers specify no reordering to the front |
1480 | * or the back." And Jens Axboe confirmed it, so here we are: */ | 1693 | * or the back." And Jens Axboe confirmed it, so here we are: |
1694 | */ | ||
1481 | if (out->type & VIRTIO_BLK_T_BARRIER) | 1695 | if (out->type & VIRTIO_BLK_T_BARRIER) |
1482 | fdatasync(vblk->fd); | 1696 | fdatasync(vblk->fd); |
1483 | 1697 | ||
1698 | /* Finished that request. */ | ||
1484 | add_used(vq, head, wlen); | 1699 | add_used(vq, head, wlen); |
1485 | } | 1700 | } |
1486 | 1701 | ||
@@ -1491,7 +1706,7 @@ static void setup_block_file(const char *filename) | |||
1491 | struct vblk_info *vblk; | 1706 | struct vblk_info *vblk; |
1492 | struct virtio_blk_config conf; | 1707 | struct virtio_blk_config conf; |
1493 | 1708 | ||
1494 | /* The device responds to return from I/O thread. */ | 1709 | /* Creat the device. */ |
1495 | dev = new_device("block", VIRTIO_ID_BLOCK); | 1710 | dev = new_device("block", VIRTIO_ID_BLOCK); |
1496 | 1711 | ||
1497 | /* The device has one virtqueue, where the Guest places requests. */ | 1712 | /* The device has one virtqueue, where the Guest places requests. */ |
@@ -1510,27 +1725,32 @@ static void setup_block_file(const char *filename) | |||
1510 | /* Tell Guest how many sectors this device has. */ | 1725 | /* Tell Guest how many sectors this device has. */ |
1511 | conf.capacity = cpu_to_le64(vblk->len / 512); | 1726 | conf.capacity = cpu_to_le64(vblk->len / 512); |
1512 | 1727 | ||
1513 | /* Tell Guest not to put in too many descriptors at once: two are used | 1728 | /* |
1514 | * for the in and out elements. */ | 1729 | * Tell Guest not to put in too many descriptors at once: two are used |
1730 | * for the in and out elements. | ||
1731 | */ | ||
1515 | add_feature(dev, VIRTIO_BLK_F_SEG_MAX); | 1732 | add_feature(dev, VIRTIO_BLK_F_SEG_MAX); |
1516 | conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2); | 1733 | conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2); |
1517 | 1734 | ||
1518 | set_config(dev, sizeof(conf), &conf); | 1735 | /* Don't try to put whole struct: we have 8 bit limit. */ |
1736 | set_config(dev, offsetof(struct virtio_blk_config, geometry), &conf); | ||
1519 | 1737 | ||
1520 | verbose("device %u: virtblock %llu sectors\n", | 1738 | verbose("device %u: virtblock %llu sectors\n", |
1521 | ++devices.device_num, le64_to_cpu(conf.capacity)); | 1739 | ++devices.device_num, le64_to_cpu(conf.capacity)); |
1522 | } | 1740 | } |
1523 | 1741 | ||
1524 | struct rng_info { | 1742 | /*L:211 |
1525 | int rfd; | 1743 | * Our random number generator device reads from /dev/random into the Guest's |
1526 | }; | ||
1527 | |||
1528 | /* Our random number generator device reads from /dev/random into the Guest's | ||
1529 | * input buffers. The usual case is that the Guest doesn't want random numbers | 1744 | * input buffers. The usual case is that the Guest doesn't want random numbers |
1530 | * and so has no buffers although /dev/random is still readable, whereas | 1745 | * and so has no buffers although /dev/random is still readable, whereas |
1531 | * console is the reverse. | 1746 | * console is the reverse. |
1532 | * | 1747 | * |
1533 | * The same logic applies, however. */ | 1748 | * The same logic applies, however. |
1749 | */ | ||
1750 | struct rng_info { | ||
1751 | int rfd; | ||
1752 | }; | ||
1753 | |||
1534 | static void rng_input(struct virtqueue *vq) | 1754 | static void rng_input(struct virtqueue *vq) |
1535 | { | 1755 | { |
1536 | int len; | 1756 | int len; |
@@ -1543,9 +1763,10 @@ static void rng_input(struct virtqueue *vq) | |||
1543 | if (out_num) | 1763 | if (out_num) |
1544 | errx(1, "Output buffers in rng?"); | 1764 | errx(1, "Output buffers in rng?"); |
1545 | 1765 | ||
1546 | /* This is why we convert to iovecs: the readv() call uses them, and so | 1766 | /* |
1547 | * it reads straight into the Guest's buffer. We loop to make sure we | 1767 | * Just like the console write, we loop to cover the whole iovec. |
1548 | * fill it. */ | 1768 | * In this case, short reads actually happen quite a bit. |
1769 | */ | ||
1549 | while (!iov_empty(iov, in_num)) { | 1770 | while (!iov_empty(iov, in_num)) { |
1550 | len = readv(rng_info->rfd, iov, in_num); | 1771 | len = readv(rng_info->rfd, iov, in_num); |
1551 | if (len <= 0) | 1772 | if (len <= 0) |
@@ -1558,15 +1779,18 @@ static void rng_input(struct virtqueue *vq) | |||
1558 | add_used(vq, head, totlen); | 1779 | add_used(vq, head, totlen); |
1559 | } | 1780 | } |
1560 | 1781 | ||
1561 | /* And this creates a "hardware" random number device for the Guest. */ | 1782 | /*L:199 |
1783 | * This creates a "hardware" random number device for the Guest. | ||
1784 | */ | ||
1562 | static void setup_rng(void) | 1785 | static void setup_rng(void) |
1563 | { | 1786 | { |
1564 | struct device *dev; | 1787 | struct device *dev; |
1565 | struct rng_info *rng_info = malloc(sizeof(*rng_info)); | 1788 | struct rng_info *rng_info = malloc(sizeof(*rng_info)); |
1566 | 1789 | ||
1790 | /* Our device's privat info simply contains the /dev/random fd. */ | ||
1567 | rng_info->rfd = open_or_die("/dev/random", O_RDONLY); | 1791 | rng_info->rfd = open_or_die("/dev/random", O_RDONLY); |
1568 | 1792 | ||
1569 | /* The device responds to return from I/O thread. */ | 1793 | /* Create the new device. */ |
1570 | dev = new_device("rng", VIRTIO_ID_RNG); | 1794 | dev = new_device("rng", VIRTIO_ID_RNG); |
1571 | dev->priv = rng_info; | 1795 | dev->priv = rng_info; |
1572 | 1796 | ||
@@ -1582,8 +1806,10 @@ static void __attribute__((noreturn)) restart_guest(void) | |||
1582 | { | 1806 | { |
1583 | unsigned int i; | 1807 | unsigned int i; |
1584 | 1808 | ||
1585 | /* Since we don't track all open fds, we simply close everything beyond | 1809 | /* |
1586 | * stderr. */ | 1810 | * Since we don't track all open fds, we simply close everything beyond |
1811 | * stderr. | ||
1812 | */ | ||
1587 | for (i = 3; i < FD_SETSIZE; i++) | 1813 | for (i = 3; i < FD_SETSIZE; i++) |
1588 | close(i); | 1814 | close(i); |
1589 | 1815 | ||
@@ -1594,8 +1820,10 @@ static void __attribute__((noreturn)) restart_guest(void) | |||
1594 | err(1, "Could not exec %s", main_args[0]); | 1820 | err(1, "Could not exec %s", main_args[0]); |
1595 | } | 1821 | } |
1596 | 1822 | ||
1597 | /*L:220 Finally we reach the core of the Launcher which runs the Guest, serves | 1823 | /*L:220 |
1598 | * its input and output, and finally, lays it to rest. */ | 1824 | * Finally we reach the core of the Launcher which runs the Guest, serves |
1825 | * its input and output, and finally, lays it to rest. | ||
1826 | */ | ||
1599 | static void __attribute__((noreturn)) run_guest(void) | 1827 | static void __attribute__((noreturn)) run_guest(void) |
1600 | { | 1828 | { |
1601 | for (;;) { | 1829 | for (;;) { |
@@ -1630,7 +1858,7 @@ static void __attribute__((noreturn)) run_guest(void) | |||
1630 | * | 1858 | * |
1631 | * Are you ready? Take a deep breath and join me in the core of the Host, in | 1859 | * Are you ready? Take a deep breath and join me in the core of the Host, in |
1632 | * "make Host". | 1860 | * "make Host". |
1633 | :*/ | 1861 | :*/ |
1634 | 1862 | ||
1635 | static struct option opts[] = { | 1863 | static struct option opts[] = { |
1636 | { "verbose", 0, NULL, 'v' }, | 1864 | { "verbose", 0, NULL, 'v' }, |
@@ -1651,8 +1879,7 @@ static void usage(void) | |||
1651 | /*L:105 The main routine is where the real work begins: */ | 1879 | /*L:105 The main routine is where the real work begins: */ |
1652 | int main(int argc, char *argv[]) | 1880 | int main(int argc, char *argv[]) |
1653 | { | 1881 | { |
1654 | /* Memory, top-level pagetable, code startpoint and size of the | 1882 | /* Memory, code startpoint and size of the (optional) initrd. */ |
1655 | * (optional) initrd. */ | ||
1656 | unsigned long mem = 0, start, initrd_size = 0; | 1883 | unsigned long mem = 0, start, initrd_size = 0; |
1657 | /* Two temporaries. */ | 1884 | /* Two temporaries. */ |
1658 | int i, c; | 1885 | int i, c; |
@@ -1664,24 +1891,32 @@ int main(int argc, char *argv[]) | |||
1664 | /* Save the args: we "reboot" by execing ourselves again. */ | 1891 | /* Save the args: we "reboot" by execing ourselves again. */ |
1665 | main_args = argv; | 1892 | main_args = argv; |
1666 | 1893 | ||
1667 | /* First we initialize the device list. We keep a pointer to the last | 1894 | /* |
1895 | * First we initialize the device list. We keep a pointer to the last | ||
1668 | * device, and the next interrupt number to use for devices (1: | 1896 | * device, and the next interrupt number to use for devices (1: |
1669 | * remember that 0 is used by the timer). */ | 1897 | * remember that 0 is used by the timer). |
1898 | */ | ||
1670 | devices.lastdev = NULL; | 1899 | devices.lastdev = NULL; |
1671 | devices.next_irq = 1; | 1900 | devices.next_irq = 1; |
1672 | 1901 | ||
1902 | /* We're CPU 0. In fact, that's the only CPU possible right now. */ | ||
1673 | cpu_id = 0; | 1903 | cpu_id = 0; |
1674 | /* We need to know how much memory so we can set up the device | 1904 | |
1905 | /* | ||
1906 | * We need to know how much memory so we can set up the device | ||
1675 | * descriptor and memory pages for the devices as we parse the command | 1907 | * descriptor and memory pages for the devices as we parse the command |
1676 | * line. So we quickly look through the arguments to find the amount | 1908 | * line. So we quickly look through the arguments to find the amount |
1677 | * of memory now. */ | 1909 | * of memory now. |
1910 | */ | ||
1678 | for (i = 1; i < argc; i++) { | 1911 | for (i = 1; i < argc; i++) { |
1679 | if (argv[i][0] != '-') { | 1912 | if (argv[i][0] != '-') { |
1680 | mem = atoi(argv[i]) * 1024 * 1024; | 1913 | mem = atoi(argv[i]) * 1024 * 1024; |
1681 | /* We start by mapping anonymous pages over all of | 1914 | /* |
1915 | * We start by mapping anonymous pages over all of | ||
1682 | * guest-physical memory range. This fills it with 0, | 1916 | * guest-physical memory range. This fills it with 0, |
1683 | * and ensures that the Guest won't be killed when it | 1917 | * and ensures that the Guest won't be killed when it |
1684 | * tries to access it. */ | 1918 | * tries to access it. |
1919 | */ | ||
1685 | guest_base = map_zeroed_pages(mem / getpagesize() | 1920 | guest_base = map_zeroed_pages(mem / getpagesize() |
1686 | + DEVICE_PAGES); | 1921 | + DEVICE_PAGES); |
1687 | guest_limit = mem; | 1922 | guest_limit = mem; |
@@ -1714,8 +1949,10 @@ int main(int argc, char *argv[]) | |||
1714 | usage(); | 1949 | usage(); |
1715 | } | 1950 | } |
1716 | } | 1951 | } |
1717 | /* After the other arguments we expect memory and kernel image name, | 1952 | /* |
1718 | * followed by command line arguments for the kernel. */ | 1953 | * After the other arguments we expect memory and kernel image name, |
1954 | * followed by command line arguments for the kernel. | ||
1955 | */ | ||
1719 | if (optind + 2 > argc) | 1956 | if (optind + 2 > argc) |
1720 | usage(); | 1957 | usage(); |
1721 | 1958 | ||
@@ -1733,20 +1970,26 @@ int main(int argc, char *argv[]) | |||
1733 | /* Map the initrd image if requested (at top of physical memory) */ | 1970 | /* Map the initrd image if requested (at top of physical memory) */ |
1734 | if (initrd_name) { | 1971 | if (initrd_name) { |
1735 | initrd_size = load_initrd(initrd_name, mem); | 1972 | initrd_size = load_initrd(initrd_name, mem); |
1736 | /* These are the location in the Linux boot header where the | 1973 | /* |
1737 | * start and size of the initrd are expected to be found. */ | 1974 | * These are the location in the Linux boot header where the |
1975 | * start and size of the initrd are expected to be found. | ||
1976 | */ | ||
1738 | boot->hdr.ramdisk_image = mem - initrd_size; | 1977 | boot->hdr.ramdisk_image = mem - initrd_size; |
1739 | boot->hdr.ramdisk_size = initrd_size; | 1978 | boot->hdr.ramdisk_size = initrd_size; |
1740 | /* The bootloader type 0xFF means "unknown"; that's OK. */ | 1979 | /* The bootloader type 0xFF means "unknown"; that's OK. */ |
1741 | boot->hdr.type_of_loader = 0xFF; | 1980 | boot->hdr.type_of_loader = 0xFF; |
1742 | } | 1981 | } |
1743 | 1982 | ||
1744 | /* The Linux boot header contains an "E820" memory map: ours is a | 1983 | /* |
1745 | * simple, single region. */ | 1984 | * The Linux boot header contains an "E820" memory map: ours is a |
1985 | * simple, single region. | ||
1986 | */ | ||
1746 | boot->e820_entries = 1; | 1987 | boot->e820_entries = 1; |
1747 | boot->e820_map[0] = ((struct e820entry) { 0, mem, E820_RAM }); | 1988 | boot->e820_map[0] = ((struct e820entry) { 0, mem, E820_RAM }); |
1748 | /* The boot header contains a command line pointer: we put the command | 1989 | /* |
1749 | * line after the boot header. */ | 1990 | * The boot header contains a command line pointer: we put the command |
1991 | * line after the boot header. | ||
1992 | */ | ||
1750 | boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1); | 1993 | boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1); |
1751 | /* We use a simple helper to copy the arguments separated by spaces. */ | 1994 | /* We use a simple helper to copy the arguments separated by spaces. */ |
1752 | concat((char *)(boot + 1), argv+optind+2); | 1995 | concat((char *)(boot + 1), argv+optind+2); |
@@ -1760,11 +2003,13 @@ int main(int argc, char *argv[]) | |||
1760 | /* Tell the entry path not to try to reload segment registers. */ | 2003 | /* Tell the entry path not to try to reload segment registers. */ |
1761 | boot->hdr.loadflags |= KEEP_SEGMENTS; | 2004 | boot->hdr.loadflags |= KEEP_SEGMENTS; |
1762 | 2005 | ||
1763 | /* We tell the kernel to initialize the Guest: this returns the open | 2006 | /* |
1764 | * /dev/lguest file descriptor. */ | 2007 | * We tell the kernel to initialize the Guest: this returns the open |
2008 | * /dev/lguest file descriptor. | ||
2009 | */ | ||
1765 | tell_kernel(start); | 2010 | tell_kernel(start); |
1766 | 2011 | ||
1767 | /* Ensure that we terminate if a child dies. */ | 2012 | /* Ensure that we terminate if a device-servicing child dies. */ |
1768 | signal(SIGCHLD, kill_launcher); | 2013 | signal(SIGCHLD, kill_launcher); |
1769 | 2014 | ||
1770 | /* If we exit via err(), this kills all the threads, restores tty. */ | 2015 | /* If we exit via err(), this kills all the threads, restores tty. */ |
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h index 313389cd50d2..5136dad57cbb 100644 --- a/arch/x86/include/asm/lguest.h +++ b/arch/x86/include/asm/lguest.h | |||
@@ -17,8 +17,7 @@ | |||
17 | /* Pages for switcher itself, then two pages per cpu */ | 17 | /* Pages for switcher itself, then two pages per cpu */ |
18 | #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) | 18 | #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) |
19 | 19 | ||
20 | /* We map at -4M (-2M when PAE is activated) for ease of mapping | 20 | /* We map at -4M (-2M for PAE) for ease of mapping (one PTE page). */ |
21 | * into the guest (one PTE page). */ | ||
22 | #ifdef CONFIG_X86_PAE | 21 | #ifdef CONFIG_X86_PAE |
23 | #define SWITCHER_ADDR 0xFFE00000 | 22 | #define SWITCHER_ADDR 0xFFE00000 |
24 | #else | 23 | #else |
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index 33600a66755f..ba0eed8aa1a6 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h | |||
@@ -30,27 +30,27 @@ | |||
30 | #include <asm/hw_irq.h> | 30 | #include <asm/hw_irq.h> |
31 | #include <asm/kvm_para.h> | 31 | #include <asm/kvm_para.h> |
32 | 32 | ||
33 | /*G:030 But first, how does our Guest contact the Host to ask for privileged | 33 | /*G:030 |
34 | * But first, how does our Guest contact the Host to ask for privileged | ||
34 | * operations? There are two ways: the direct way is to make a "hypercall", | 35 | * operations? There are two ways: the direct way is to make a "hypercall", |
35 | * to make requests of the Host Itself. | 36 | * to make requests of the Host Itself. |
36 | * | 37 | * |
37 | * We use the KVM hypercall mechanism. Seventeen hypercalls are | 38 | * We use the KVM hypercall mechanism, though completely different hypercall |
38 | * available: the hypercall number is put in the %eax register, and the | 39 | * numbers. Seventeen hypercalls are available: the hypercall number is put in |
39 | * arguments (when required) are placed in %ebx, %ecx, %edx and %esi. | 40 | * the %eax register, and the arguments (when required) are placed in %ebx, |
40 | * If a return value makes sense, it's returned in %eax. | 41 | * %ecx, %edx and %esi. If a return value makes sense, it's returned in %eax. |
41 | * | 42 | * |
42 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful | 43 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful |
43 | * Host, rather than returning failure. This reflects Winston Churchill's | 44 | * Host, rather than returning failure. This reflects Winston Churchill's |
44 | * definition of a gentleman: "someone who is only rude intentionally". */ | 45 | * definition of a gentleman: "someone who is only rude intentionally". |
45 | /*:*/ | 46 | :*/ |
46 | 47 | ||
47 | /* Can't use our min() macro here: needs to be a constant */ | 48 | /* Can't use our min() macro here: needs to be a constant */ |
48 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) | 49 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) |
49 | 50 | ||
50 | #define LHCALL_RING_SIZE 64 | 51 | #define LHCALL_RING_SIZE 64 |
51 | struct hcall_args { | 52 | struct hcall_args { |
52 | /* These map directly onto eax, ebx, ecx, edx and esi | 53 | /* These map directly onto eax/ebx/ecx/edx/esi in struct lguest_regs */ |
53 | * in struct lguest_regs */ | ||
54 | unsigned long arg0, arg1, arg2, arg3, arg4; | 54 | unsigned long arg0, arg1, arg2, arg3, arg4; |
55 | }; | 55 | }; |
56 | 56 | ||
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index f2bf1f73d468..d677fa9ca650 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -22,7 +22,8 @@ | |||
22 | * | 22 | * |
23 | * So how does the kernel know it's a Guest? We'll see that later, but let's | 23 | * So how does the kernel know it's a Guest? We'll see that later, but let's |
24 | * just say that we end up here where we replace the native functions various | 24 | * just say that we end up here where we replace the native functions various |
25 | * "paravirt" structures with our Guest versions, then boot like normal. :*/ | 25 | * "paravirt" structures with our Guest versions, then boot like normal. |
26 | :*/ | ||
26 | 27 | ||
27 | /* | 28 | /* |
28 | * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation. | 29 | * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation. |
@@ -74,7 +75,8 @@ | |||
74 | * | 75 | * |
75 | * The Guest in our tale is a simple creature: identical to the Host but | 76 | * The Guest in our tale is a simple creature: identical to the Host but |
76 | * behaving in simplified but equivalent ways. In particular, the Guest is the | 77 | * behaving in simplified but equivalent ways. In particular, the Guest is the |
77 | * same kernel as the Host (or at least, built from the same source code). :*/ | 78 | * same kernel as the Host (or at least, built from the same source code). |
79 | :*/ | ||
78 | 80 | ||
79 | struct lguest_data lguest_data = { | 81 | struct lguest_data lguest_data = { |
80 | .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF }, | 82 | .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF }, |
@@ -85,7 +87,8 @@ struct lguest_data lguest_data = { | |||
85 | .syscall_vec = SYSCALL_VECTOR, | 87 | .syscall_vec = SYSCALL_VECTOR, |
86 | }; | 88 | }; |
87 | 89 | ||
88 | /*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a | 90 | /*G:037 |
91 | * async_hcall() is pretty simple: I'm quite proud of it really. We have a | ||
89 | * ring buffer of stored hypercalls which the Host will run though next time we | 92 | * ring buffer of stored hypercalls which the Host will run though next time we |
90 | * do a normal hypercall. Each entry in the ring has 5 slots for the hypercall | 93 | * do a normal hypercall. Each entry in the ring has 5 slots for the hypercall |
91 | * arguments, and a "hcall_status" word which is 0 if the call is ready to go, | 94 | * arguments, and a "hcall_status" word which is 0 if the call is ready to go, |
@@ -94,7 +97,8 @@ struct lguest_data lguest_data = { | |||
94 | * If we come around to a slot which hasn't been finished, then the table is | 97 | * If we come around to a slot which hasn't been finished, then the table is |
95 | * full and we just make the hypercall directly. This has the nice side | 98 | * full and we just make the hypercall directly. This has the nice side |
96 | * effect of causing the Host to run all the stored calls in the ring buffer | 99 | * effect of causing the Host to run all the stored calls in the ring buffer |
97 | * which empties it for next time! */ | 100 | * which empties it for next time! |
101 | */ | ||
98 | static void async_hcall(unsigned long call, unsigned long arg1, | 102 | static void async_hcall(unsigned long call, unsigned long arg1, |
99 | unsigned long arg2, unsigned long arg3, | 103 | unsigned long arg2, unsigned long arg3, |
100 | unsigned long arg4) | 104 | unsigned long arg4) |
@@ -103,9 +107,11 @@ static void async_hcall(unsigned long call, unsigned long arg1, | |||
103 | static unsigned int next_call; | 107 | static unsigned int next_call; |
104 | unsigned long flags; | 108 | unsigned long flags; |
105 | 109 | ||
106 | /* Disable interrupts if not already disabled: we don't want an | 110 | /* |
111 | * Disable interrupts if not already disabled: we don't want an | ||
107 | * interrupt handler making a hypercall while we're already doing | 112 | * interrupt handler making a hypercall while we're already doing |
108 | * one! */ | 113 | * one! |
114 | */ | ||
109 | local_irq_save(flags); | 115 | local_irq_save(flags); |
110 | if (lguest_data.hcall_status[next_call] != 0xFF) { | 116 | if (lguest_data.hcall_status[next_call] != 0xFF) { |
111 | /* Table full, so do normal hcall which will flush table. */ | 117 | /* Table full, so do normal hcall which will flush table. */ |
@@ -125,8 +131,9 @@ static void async_hcall(unsigned long call, unsigned long arg1, | |||
125 | local_irq_restore(flags); | 131 | local_irq_restore(flags); |
126 | } | 132 | } |
127 | 133 | ||
128 | /*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first | 134 | /*G:035 |
129 | * real optimization trick! | 135 | * Notice the lazy_hcall() above, rather than hcall(). This is our first real |
136 | * optimization trick! | ||
130 | * | 137 | * |
131 | * When lazy_mode is set, it means we're allowed to defer all hypercalls and do | 138 | * When lazy_mode is set, it means we're allowed to defer all hypercalls and do |
132 | * them as a batch when lazy_mode is eventually turned off. Because hypercalls | 139 | * them as a batch when lazy_mode is eventually turned off. Because hypercalls |
@@ -136,7 +143,8 @@ static void async_hcall(unsigned long call, unsigned long arg1, | |||
136 | * lguest_leave_lazy_mode(). | 143 | * lguest_leave_lazy_mode(). |
137 | * | 144 | * |
138 | * So, when we're in lazy mode, we call async_hcall() to store the call for | 145 | * So, when we're in lazy mode, we call async_hcall() to store the call for |
139 | * future processing: */ | 146 | * future processing: |
147 | */ | ||
140 | static void lazy_hcall1(unsigned long call, | 148 | static void lazy_hcall1(unsigned long call, |
141 | unsigned long arg1) | 149 | unsigned long arg1) |
142 | { | 150 | { |
@@ -146,6 +154,7 @@ static void lazy_hcall1(unsigned long call, | |||
146 | async_hcall(call, arg1, 0, 0, 0); | 154 | async_hcall(call, arg1, 0, 0, 0); |
147 | } | 155 | } |
148 | 156 | ||
157 | /* You can imagine what lazy_hcall2, 3 and 4 look like. :*/ | ||
149 | static void lazy_hcall2(unsigned long call, | 158 | static void lazy_hcall2(unsigned long call, |
150 | unsigned long arg1, | 159 | unsigned long arg1, |
151 | unsigned long arg2) | 160 | unsigned long arg2) |
@@ -181,8 +190,10 @@ static void lazy_hcall4(unsigned long call, | |||
181 | } | 190 | } |
182 | #endif | 191 | #endif |
183 | 192 | ||
184 | /* When lazy mode is turned off reset the per-cpu lazy mode variable and then | 193 | /*G:036 |
185 | * issue the do-nothing hypercall to flush any stored calls. */ | 194 | * When lazy mode is turned off reset the per-cpu lazy mode variable and then |
195 | * issue the do-nothing hypercall to flush any stored calls. | ||
196 | :*/ | ||
186 | static void lguest_leave_lazy_mmu_mode(void) | 197 | static void lguest_leave_lazy_mmu_mode(void) |
187 | { | 198 | { |
188 | kvm_hypercall0(LHCALL_FLUSH_ASYNC); | 199 | kvm_hypercall0(LHCALL_FLUSH_ASYNC); |
@@ -208,9 +219,11 @@ static void lguest_end_context_switch(struct task_struct *next) | |||
208 | * check there before it tries to deliver an interrupt. | 219 | * check there before it tries to deliver an interrupt. |
209 | */ | 220 | */ |
210 | 221 | ||
211 | /* save_flags() is expected to return the processor state (ie. "flags"). The | 222 | /* |
223 | * save_flags() is expected to return the processor state (ie. "flags"). The | ||
212 | * flags word contains all kind of stuff, but in practice Linux only cares | 224 | * flags word contains all kind of stuff, but in practice Linux only cares |
213 | * about the interrupt flag. Our "save_flags()" just returns that. */ | 225 | * about the interrupt flag. Our "save_flags()" just returns that. |
226 | */ | ||
214 | static unsigned long save_fl(void) | 227 | static unsigned long save_fl(void) |
215 | { | 228 | { |
216 | return lguest_data.irq_enabled; | 229 | return lguest_data.irq_enabled; |
@@ -222,13 +235,15 @@ static void irq_disable(void) | |||
222 | lguest_data.irq_enabled = 0; | 235 | lguest_data.irq_enabled = 0; |
223 | } | 236 | } |
224 | 237 | ||
225 | /* Let's pause a moment. Remember how I said these are called so often? | 238 | /* |
239 | * Let's pause a moment. Remember how I said these are called so often? | ||
226 | * Jeremy Fitzhardinge optimized them so hard early in 2009 that he had to | 240 | * Jeremy Fitzhardinge optimized them so hard early in 2009 that he had to |
227 | * break some rules. In particular, these functions are assumed to save their | 241 | * break some rules. In particular, these functions are assumed to save their |
228 | * own registers if they need to: normal C functions assume they can trash the | 242 | * own registers if they need to: normal C functions assume they can trash the |
229 | * eax register. To use normal C functions, we use | 243 | * eax register. To use normal C functions, we use |
230 | * PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the | 244 | * PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the |
231 | * C function, then restores it. */ | 245 | * C function, then restores it. |
246 | */ | ||
232 | PV_CALLEE_SAVE_REGS_THUNK(save_fl); | 247 | PV_CALLEE_SAVE_REGS_THUNK(save_fl); |
233 | PV_CALLEE_SAVE_REGS_THUNK(irq_disable); | 248 | PV_CALLEE_SAVE_REGS_THUNK(irq_disable); |
234 | /*:*/ | 249 | /*:*/ |
@@ -237,18 +252,18 @@ PV_CALLEE_SAVE_REGS_THUNK(irq_disable); | |||
237 | extern void lg_irq_enable(void); | 252 | extern void lg_irq_enable(void); |
238 | extern void lg_restore_fl(unsigned long flags); | 253 | extern void lg_restore_fl(unsigned long flags); |
239 | 254 | ||
240 | /*M:003 Note that we don't check for outstanding interrupts when we re-enable | 255 | /*M:003 |
241 | * them (or when we unmask an interrupt). This seems to work for the moment, | 256 | * We could be more efficient in our checking of outstanding interrupts, rather |
242 | * since interrupts are rare and we'll just get the interrupt on the next timer | 257 | * than using a branch. One way would be to put the "irq_enabled" field in a |
243 | * tick, but now we can run with CONFIG_NO_HZ, we should revisit this. One way | 258 | * page by itself, and have the Host write-protect it when an interrupt comes |
244 | * would be to put the "irq_enabled" field in a page by itself, and have the | 259 | * in when irqs are disabled. There will then be a page fault as soon as |
245 | * Host write-protect it when an interrupt comes in when irqs are disabled. | 260 | * interrupts are re-enabled. |
246 | * There will then be a page fault as soon as interrupts are re-enabled. | ||
247 | * | 261 | * |
248 | * A better method is to implement soft interrupt disable generally for x86: | 262 | * A better method is to implement soft interrupt disable generally for x86: |
249 | * instead of disabling interrupts, we set a flag. If an interrupt does come | 263 | * instead of disabling interrupts, we set a flag. If an interrupt does come |
250 | * in, we then disable them for real. This is uncommon, so we could simply use | 264 | * in, we then disable them for real. This is uncommon, so we could simply use |
251 | * a hypercall for interrupt control and not worry about efficiency. :*/ | 265 | * a hypercall for interrupt control and not worry about efficiency. |
266 | :*/ | ||
252 | 267 | ||
253 | /*G:034 | 268 | /*G:034 |
254 | * The Interrupt Descriptor Table (IDT). | 269 | * The Interrupt Descriptor Table (IDT). |
@@ -261,10 +276,12 @@ extern void lg_restore_fl(unsigned long flags); | |||
261 | static void lguest_write_idt_entry(gate_desc *dt, | 276 | static void lguest_write_idt_entry(gate_desc *dt, |
262 | int entrynum, const gate_desc *g) | 277 | int entrynum, const gate_desc *g) |
263 | { | 278 | { |
264 | /* The gate_desc structure is 8 bytes long: we hand it to the Host in | 279 | /* |
280 | * The gate_desc structure is 8 bytes long: we hand it to the Host in | ||
265 | * two 32-bit chunks. The whole 32-bit kernel used to hand descriptors | 281 | * two 32-bit chunks. The whole 32-bit kernel used to hand descriptors |
266 | * around like this; typesafety wasn't a big concern in Linux's early | 282 | * around like this; typesafety wasn't a big concern in Linux's early |
267 | * years. */ | 283 | * years. |
284 | */ | ||
268 | u32 *desc = (u32 *)g; | 285 | u32 *desc = (u32 *)g; |
269 | /* Keep the local copy up to date. */ | 286 | /* Keep the local copy up to date. */ |
270 | native_write_idt_entry(dt, entrynum, g); | 287 | native_write_idt_entry(dt, entrynum, g); |
@@ -272,9 +289,11 @@ static void lguest_write_idt_entry(gate_desc *dt, | |||
272 | kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); | 289 | kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); |
273 | } | 290 | } |
274 | 291 | ||
275 | /* Changing to a different IDT is very rare: we keep the IDT up-to-date every | 292 | /* |
293 | * Changing to a different IDT is very rare: we keep the IDT up-to-date every | ||
276 | * time it is written, so we can simply loop through all entries and tell the | 294 | * time it is written, so we can simply loop through all entries and tell the |
277 | * Host about them. */ | 295 | * Host about them. |
296 | */ | ||
278 | static void lguest_load_idt(const struct desc_ptr *desc) | 297 | static void lguest_load_idt(const struct desc_ptr *desc) |
279 | { | 298 | { |
280 | unsigned int i; | 299 | unsigned int i; |
@@ -305,9 +324,11 @@ static void lguest_load_gdt(const struct desc_ptr *desc) | |||
305 | kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b); | 324 | kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b); |
306 | } | 325 | } |
307 | 326 | ||
308 | /* For a single GDT entry which changes, we do the lazy thing: alter our GDT, | 327 | /* |
328 | * For a single GDT entry which changes, we do the lazy thing: alter our GDT, | ||
309 | * then tell the Host to reload the entire thing. This operation is so rare | 329 | * then tell the Host to reload the entire thing. This operation is so rare |
310 | * that this naive implementation is reasonable. */ | 330 | * that this naive implementation is reasonable. |
331 | */ | ||
311 | static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, | 332 | static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, |
312 | const void *desc, int type) | 333 | const void *desc, int type) |
313 | { | 334 | { |
@@ -317,29 +338,36 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, | |||
317 | dt[entrynum].a, dt[entrynum].b); | 338 | dt[entrynum].a, dt[entrynum].b); |
318 | } | 339 | } |
319 | 340 | ||
320 | /* OK, I lied. There are three "thread local storage" GDT entries which change | 341 | /* |
342 | * OK, I lied. There are three "thread local storage" GDT entries which change | ||
321 | * on every context switch (these three entries are how glibc implements | 343 | * on every context switch (these three entries are how glibc implements |
322 | * __thread variables). So we have a hypercall specifically for this case. */ | 344 | * __thread variables). So we have a hypercall specifically for this case. |
345 | */ | ||
323 | static void lguest_load_tls(struct thread_struct *t, unsigned int cpu) | 346 | static void lguest_load_tls(struct thread_struct *t, unsigned int cpu) |
324 | { | 347 | { |
325 | /* There's one problem which normal hardware doesn't have: the Host | 348 | /* |
349 | * There's one problem which normal hardware doesn't have: the Host | ||
326 | * can't handle us removing entries we're currently using. So we clear | 350 | * can't handle us removing entries we're currently using. So we clear |
327 | * the GS register here: if it's needed it'll be reloaded anyway. */ | 351 | * the GS register here: if it's needed it'll be reloaded anyway. |
352 | */ | ||
328 | lazy_load_gs(0); | 353 | lazy_load_gs(0); |
329 | lazy_hcall2(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu); | 354 | lazy_hcall2(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu); |
330 | } | 355 | } |
331 | 356 | ||
332 | /*G:038 That's enough excitement for now, back to ploughing through each of | 357 | /*G:038 |
333 | * the different pv_ops structures (we're about 1/3 of the way through). | 358 | * That's enough excitement for now, back to ploughing through each of the |
359 | * different pv_ops structures (we're about 1/3 of the way through). | ||
334 | * | 360 | * |
335 | * This is the Local Descriptor Table, another weird Intel thingy. Linux only | 361 | * This is the Local Descriptor Table, another weird Intel thingy. Linux only |
336 | * uses this for some strange applications like Wine. We don't do anything | 362 | * uses this for some strange applications like Wine. We don't do anything |
337 | * here, so they'll get an informative and friendly Segmentation Fault. */ | 363 | * here, so they'll get an informative and friendly Segmentation Fault. |
364 | */ | ||
338 | static void lguest_set_ldt(const void *addr, unsigned entries) | 365 | static void lguest_set_ldt(const void *addr, unsigned entries) |
339 | { | 366 | { |
340 | } | 367 | } |
341 | 368 | ||
342 | /* This loads a GDT entry into the "Task Register": that entry points to a | 369 | /* |
370 | * This loads a GDT entry into the "Task Register": that entry points to a | ||
343 | * structure called the Task State Segment. Some comments scattered though the | 371 | * structure called the Task State Segment. Some comments scattered though the |
344 | * kernel code indicate that this used for task switching in ages past, along | 372 | * kernel code indicate that this used for task switching in ages past, along |
345 | * with blood sacrifice and astrology. | 373 | * with blood sacrifice and astrology. |
@@ -347,19 +375,21 @@ static void lguest_set_ldt(const void *addr, unsigned entries) | |||
347 | * Now there's nothing interesting in here that we don't get told elsewhere. | 375 | * Now there's nothing interesting in here that we don't get told elsewhere. |
348 | * But the native version uses the "ltr" instruction, which makes the Host | 376 | * But the native version uses the "ltr" instruction, which makes the Host |
349 | * complain to the Guest about a Segmentation Fault and it'll oops. So we | 377 | * complain to the Guest about a Segmentation Fault and it'll oops. So we |
350 | * override the native version with a do-nothing version. */ | 378 | * override the native version with a do-nothing version. |
379 | */ | ||
351 | static void lguest_load_tr_desc(void) | 380 | static void lguest_load_tr_desc(void) |
352 | { | 381 | { |
353 | } | 382 | } |
354 | 383 | ||
355 | /* The "cpuid" instruction is a way of querying both the CPU identity | 384 | /* |
385 | * The "cpuid" instruction is a way of querying both the CPU identity | ||
356 | * (manufacturer, model, etc) and its features. It was introduced before the | 386 | * (manufacturer, model, etc) and its features. It was introduced before the |
357 | * Pentium in 1993 and keeps getting extended by both Intel, AMD and others. | 387 | * Pentium in 1993 and keeps getting extended by both Intel, AMD and others. |
358 | * As you might imagine, after a decade and a half this treatment, it is now a | 388 | * As you might imagine, after a decade and a half this treatment, it is now a |
359 | * giant ball of hair. Its entry in the current Intel manual runs to 28 pages. | 389 | * giant ball of hair. Its entry in the current Intel manual runs to 28 pages. |
360 | * | 390 | * |
361 | * This instruction even it has its own Wikipedia entry. The Wikipedia entry | 391 | * This instruction even it has its own Wikipedia entry. The Wikipedia entry |
362 | * has been translated into 4 languages. I am not making this up! | 392 | * has been translated into 5 languages. I am not making this up! |
363 | * | 393 | * |
364 | * We could get funky here and identify ourselves as "GenuineLguest", but | 394 | * We could get funky here and identify ourselves as "GenuineLguest", but |
365 | * instead we just use the real "cpuid" instruction. Then I pretty much turned | 395 | * instead we just use the real "cpuid" instruction. Then I pretty much turned |
@@ -371,7 +401,8 @@ static void lguest_load_tr_desc(void) | |||
371 | * Replacing the cpuid so we can turn features off is great for the kernel, but | 401 | * Replacing the cpuid so we can turn features off is great for the kernel, but |
372 | * anyone (including userspace) can just use the raw "cpuid" instruction and | 402 | * anyone (including userspace) can just use the raw "cpuid" instruction and |
373 | * the Host won't even notice since it isn't privileged. So we try not to get | 403 | * the Host won't even notice since it isn't privileged. So we try not to get |
374 | * too worked up about it. */ | 404 | * too worked up about it. |
405 | */ | ||
375 | static void lguest_cpuid(unsigned int *ax, unsigned int *bx, | 406 | static void lguest_cpuid(unsigned int *ax, unsigned int *bx, |
376 | unsigned int *cx, unsigned int *dx) | 407 | unsigned int *cx, unsigned int *dx) |
377 | { | 408 | { |
@@ -379,43 +410,63 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, | |||
379 | 410 | ||
380 | native_cpuid(ax, bx, cx, dx); | 411 | native_cpuid(ax, bx, cx, dx); |
381 | switch (function) { | 412 | switch (function) { |
382 | case 0: /* ID and highest CPUID. Futureproof a little by sticking to | 413 | /* |
383 | * older ones. */ | 414 | * CPUID 0 gives the highest legal CPUID number (and the ID string). |
415 | * We futureproof our code a little by sticking to known CPUID values. | ||
416 | */ | ||
417 | case 0: | ||
384 | if (*ax > 5) | 418 | if (*ax > 5) |
385 | *ax = 5; | 419 | *ax = 5; |
386 | break; | 420 | break; |
387 | case 1: /* Basic feature request. */ | 421 | |
388 | /* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */ | 422 | /* |
423 | * CPUID 1 is a basic feature request. | ||
424 | * | ||
425 | * CX: we only allow kernel to see SSE3, CMPXCHG16B and SSSE3 | ||
426 | * DX: SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU and PAE. | ||
427 | */ | ||
428 | case 1: | ||
389 | *cx &= 0x00002201; | 429 | *cx &= 0x00002201; |
390 | /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU, PAE. */ | ||
391 | *dx &= 0x07808151; | 430 | *dx &= 0x07808151; |
392 | /* The Host can do a nice optimization if it knows that the | 431 | /* |
432 | * The Host can do a nice optimization if it knows that the | ||
393 | * kernel mappings (addresses above 0xC0000000 or whatever | 433 | * kernel mappings (addresses above 0xC0000000 or whatever |
394 | * PAGE_OFFSET is set to) haven't changed. But Linux calls | 434 | * PAGE_OFFSET is set to) haven't changed. But Linux calls |
395 | * flush_tlb_user() for both user and kernel mappings unless | 435 | * flush_tlb_user() for both user and kernel mappings unless |
396 | * the Page Global Enable (PGE) feature bit is set. */ | 436 | * the Page Global Enable (PGE) feature bit is set. |
437 | */ | ||
397 | *dx |= 0x00002000; | 438 | *dx |= 0x00002000; |
398 | /* We also lie, and say we're family id 5. 6 or greater | 439 | /* |
440 | * We also lie, and say we're family id 5. 6 or greater | ||
399 | * leads to a rdmsr in early_init_intel which we can't handle. | 441 | * leads to a rdmsr in early_init_intel which we can't handle. |
400 | * Family ID is returned as bits 8-12 in ax. */ | 442 | * Family ID is returned as bits 8-12 in ax. |
443 | */ | ||
401 | *ax &= 0xFFFFF0FF; | 444 | *ax &= 0xFFFFF0FF; |
402 | *ax |= 0x00000500; | 445 | *ax |= 0x00000500; |
403 | break; | 446 | break; |
447 | /* | ||
448 | * 0x80000000 returns the highest Extended Function, so we futureproof | ||
449 | * like we do above by limiting it to known fields. | ||
450 | */ | ||
404 | case 0x80000000: | 451 | case 0x80000000: |
405 | /* Futureproof this a little: if they ask how much extended | ||
406 | * processor information there is, limit it to known fields. */ | ||
407 | if (*ax > 0x80000008) | 452 | if (*ax > 0x80000008) |
408 | *ax = 0x80000008; | 453 | *ax = 0x80000008; |
409 | break; | 454 | break; |
455 | |||
456 | /* | ||
457 | * PAE systems can mark pages as non-executable. Linux calls this the | ||
458 | * NX bit. Intel calls it XD (eXecute Disable), AMD EVP (Enhanced | ||
459 | * Virus Protection). We just switch turn if off here, since we don't | ||
460 | * support it. | ||
461 | */ | ||
410 | case 0x80000001: | 462 | case 0x80000001: |
411 | /* Here we should fix nx cap depending on host. */ | ||
412 | /* For this version of PAE, we just clear NX bit. */ | ||
413 | *dx &= ~(1 << 20); | 463 | *dx &= ~(1 << 20); |
414 | break; | 464 | break; |
415 | } | 465 | } |
416 | } | 466 | } |
417 | 467 | ||
418 | /* Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4. | 468 | /* |
469 | * Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4. | ||
419 | * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother | 470 | * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother |
420 | * it. The Host needs to know when the Guest wants to change them, so we have | 471 | * it. The Host needs to know when the Guest wants to change them, so we have |
421 | * a whole series of functions like read_cr0() and write_cr0(). | 472 | * a whole series of functions like read_cr0() and write_cr0(). |
@@ -430,7 +481,8 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, | |||
430 | * name like "FPUTRAP bit" be a little less cryptic? | 481 | * name like "FPUTRAP bit" be a little less cryptic? |
431 | * | 482 | * |
432 | * We store cr0 locally because the Host never changes it. The Guest sometimes | 483 | * We store cr0 locally because the Host never changes it. The Guest sometimes |
433 | * wants to read it and we'd prefer not to bother the Host unnecessarily. */ | 484 | * wants to read it and we'd prefer not to bother the Host unnecessarily. |
485 | */ | ||
434 | static unsigned long current_cr0; | 486 | static unsigned long current_cr0; |
435 | static void lguest_write_cr0(unsigned long val) | 487 | static void lguest_write_cr0(unsigned long val) |
436 | { | 488 | { |
@@ -443,18 +495,22 @@ static unsigned long lguest_read_cr0(void) | |||
443 | return current_cr0; | 495 | return current_cr0; |
444 | } | 496 | } |
445 | 497 | ||
446 | /* Intel provided a special instruction to clear the TS bit for people too cool | 498 | /* |
499 | * Intel provided a special instruction to clear the TS bit for people too cool | ||
447 | * to use write_cr0() to do it. This "clts" instruction is faster, because all | 500 | * to use write_cr0() to do it. This "clts" instruction is faster, because all |
448 | * the vowels have been optimized out. */ | 501 | * the vowels have been optimized out. |
502 | */ | ||
449 | static void lguest_clts(void) | 503 | static void lguest_clts(void) |
450 | { | 504 | { |
451 | lazy_hcall1(LHCALL_TS, 0); | 505 | lazy_hcall1(LHCALL_TS, 0); |
452 | current_cr0 &= ~X86_CR0_TS; | 506 | current_cr0 &= ~X86_CR0_TS; |
453 | } | 507 | } |
454 | 508 | ||
455 | /* cr2 is the virtual address of the last page fault, which the Guest only ever | 509 | /* |
510 | * cr2 is the virtual address of the last page fault, which the Guest only ever | ||
456 | * reads. The Host kindly writes this into our "struct lguest_data", so we | 511 | * reads. The Host kindly writes this into our "struct lguest_data", so we |
457 | * just read it out of there. */ | 512 | * just read it out of there. |
513 | */ | ||
458 | static unsigned long lguest_read_cr2(void) | 514 | static unsigned long lguest_read_cr2(void) |
459 | { | 515 | { |
460 | return lguest_data.cr2; | 516 | return lguest_data.cr2; |
@@ -463,10 +519,12 @@ static unsigned long lguest_read_cr2(void) | |||
463 | /* See lguest_set_pte() below. */ | 519 | /* See lguest_set_pte() below. */ |
464 | static bool cr3_changed = false; | 520 | static bool cr3_changed = false; |
465 | 521 | ||
466 | /* cr3 is the current toplevel pagetable page: the principle is the same as | 522 | /* |
523 | * cr3 is the current toplevel pagetable page: the principle is the same as | ||
467 | * cr0. Keep a local copy, and tell the Host when it changes. The only | 524 | * cr0. Keep a local copy, and tell the Host when it changes. The only |
468 | * difference is that our local copy is in lguest_data because the Host needs | 525 | * difference is that our local copy is in lguest_data because the Host needs |
469 | * to set it upon our initial hypercall. */ | 526 | * to set it upon our initial hypercall. |
527 | */ | ||
470 | static void lguest_write_cr3(unsigned long cr3) | 528 | static void lguest_write_cr3(unsigned long cr3) |
471 | { | 529 | { |
472 | lguest_data.pgdir = cr3; | 530 | lguest_data.pgdir = cr3; |
@@ -511,7 +569,7 @@ static void lguest_write_cr4(unsigned long val) | |||
511 | * cr3 ---> +---------+ | 569 | * cr3 ---> +---------+ |
512 | * | --------->+---------+ | 570 | * | --------->+---------+ |
513 | * | | | PADDR1 | | 571 | * | | | PADDR1 | |
514 | * Top-level | | PADDR2 | | 572 | * Mid-level | | PADDR2 | |
515 | * (PMD) page | | | | 573 | * (PMD) page | | | |
516 | * | | Lower-level | | 574 | * | | Lower-level | |
517 | * | | (PTE) page | | 575 | * | | (PTE) page | |
@@ -531,21 +589,62 @@ static void lguest_write_cr4(unsigned long val) | |||
531 | * Index into top Index into second Offset within page | 589 | * Index into top Index into second Offset within page |
532 | * page directory page pagetable page | 590 | * page directory page pagetable page |
533 | * | 591 | * |
534 | * The kernel spends a lot of time changing both the top-level page directory | 592 | * Now, unfortunately, this isn't the whole story: Intel added Physical Address |
535 | * and lower-level pagetable pages. The Guest doesn't know physical addresses, | 593 | * Extension (PAE) to allow 32 bit systems to use 64GB of memory (ie. 36 bits). |
536 | * so while it maintains these page tables exactly like normal, it also needs | 594 | * These are held in 64-bit page table entries, so we can now only fit 512 |
537 | * to keep the Host informed whenever it makes a change: the Host will create | 595 | * entries in a page, and the neat three-level tree breaks down. |
538 | * the real page tables based on the Guests'. | 596 | * |
597 | * The result is a four level page table: | ||
598 | * | ||
599 | * cr3 --> [ 4 Upper ] | ||
600 | * [ Level ] | ||
601 | * [ Entries ] | ||
602 | * [(PUD Page)]---> +---------+ | ||
603 | * | --------->+---------+ | ||
604 | * | | | PADDR1 | | ||
605 | * Mid-level | | PADDR2 | | ||
606 | * (PMD) page | | | | ||
607 | * | | Lower-level | | ||
608 | * | | (PTE) page | | ||
609 | * | | | | | ||
610 | * .... .... | ||
611 | * | ||
612 | * | ||
613 | * And the virtual address is decoded as: | ||
614 | * | ||
615 | * 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ||
616 | * |<-2->|<--- 9 bits ---->|<---- 9 bits --->|<------ 12 bits ------>| | ||
617 | * Index into Index into mid Index into lower Offset within page | ||
618 | * top entries directory page pagetable page | ||
619 | * | ||
620 | * It's too hard to switch between these two formats at runtime, so Linux only | ||
621 | * supports one or the other depending on whether CONFIG_X86_PAE is set. Many | ||
622 | * distributions turn it on, and not just for people with silly amounts of | ||
623 | * memory: the larger PTE entries allow room for the NX bit, which lets the | ||
624 | * kernel disable execution of pages and increase security. | ||
625 | * | ||
626 | * This was a problem for lguest, which couldn't run on these distributions; | ||
627 | * then Matias Zabaljauregui figured it all out and implemented it, and only a | ||
628 | * handful of puppies were crushed in the process! | ||
629 | * | ||
630 | * Back to our point: the kernel spends a lot of time changing both the | ||
631 | * top-level page directory and lower-level pagetable pages. The Guest doesn't | ||
632 | * know physical addresses, so while it maintains these page tables exactly | ||
633 | * like normal, it also needs to keep the Host informed whenever it makes a | ||
634 | * change: the Host will create the real page tables based on the Guests'. | ||
539 | */ | 635 | */ |
540 | 636 | ||
541 | /* The Guest calls this to set a second-level entry (pte), ie. to map a page | 637 | /* |
542 | * into a process' address space. We set the entry then tell the Host the | 638 | * The Guest calls this after it has set a second-level entry (pte), ie. to map |
543 | * toplevel and address this corresponds to. The Guest uses one pagetable per | 639 | * a page into a process' address space. Wetell the Host the toplevel and |
544 | * process, so we need to tell the Host which one we're changing (mm->pgd). */ | 640 | * address this corresponds to. The Guest uses one pagetable per process, so |
641 | * we need to tell the Host which one we're changing (mm->pgd). | ||
642 | */ | ||
545 | static void lguest_pte_update(struct mm_struct *mm, unsigned long addr, | 643 | static void lguest_pte_update(struct mm_struct *mm, unsigned long addr, |
546 | pte_t *ptep) | 644 | pte_t *ptep) |
547 | { | 645 | { |
548 | #ifdef CONFIG_X86_PAE | 646 | #ifdef CONFIG_X86_PAE |
647 | /* PAE needs to hand a 64 bit page table entry, so it uses two args. */ | ||
549 | lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr, | 648 | lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr, |
550 | ptep->pte_low, ptep->pte_high); | 649 | ptep->pte_low, ptep->pte_high); |
551 | #else | 650 | #else |
@@ -553,6 +652,7 @@ static void lguest_pte_update(struct mm_struct *mm, unsigned long addr, | |||
553 | #endif | 652 | #endif |
554 | } | 653 | } |
555 | 654 | ||
655 | /* This is the "set and update" combo-meal-deal version. */ | ||
556 | static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, | 656 | static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, |
557 | pte_t *ptep, pte_t pteval) | 657 | pte_t *ptep, pte_t pteval) |
558 | { | 658 | { |
@@ -560,10 +660,13 @@ static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
560 | lguest_pte_update(mm, addr, ptep); | 660 | lguest_pte_update(mm, addr, ptep); |
561 | } | 661 | } |
562 | 662 | ||
563 | /* The Guest calls lguest_set_pud to set a top-level entry and lguest_set_pmd | 663 | /* |
664 | * The Guest calls lguest_set_pud to set a top-level entry and lguest_set_pmd | ||
564 | * to set a middle-level entry when PAE is activated. | 665 | * to set a middle-level entry when PAE is activated. |
666 | * | ||
565 | * Again, we set the entry then tell the Host which page we changed, | 667 | * Again, we set the entry then tell the Host which page we changed, |
566 | * and the index of the entry we changed. */ | 668 | * and the index of the entry we changed. |
669 | */ | ||
567 | #ifdef CONFIG_X86_PAE | 670 | #ifdef CONFIG_X86_PAE |
568 | static void lguest_set_pud(pud_t *pudp, pud_t pudval) | 671 | static void lguest_set_pud(pud_t *pudp, pud_t pudval) |
569 | { | 672 | { |
@@ -582,8 +685,7 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) | |||
582 | } | 685 | } |
583 | #else | 686 | #else |
584 | 687 | ||
585 | /* The Guest calls lguest_set_pmd to set a top-level entry when PAE is not | 688 | /* The Guest calls lguest_set_pmd to set a top-level entry when !PAE. */ |
586 | * activated. */ | ||
587 | static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) | 689 | static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) |
588 | { | 690 | { |
589 | native_set_pmd(pmdp, pmdval); | 691 | native_set_pmd(pmdp, pmdval); |
@@ -592,7 +694,8 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) | |||
592 | } | 694 | } |
593 | #endif | 695 | #endif |
594 | 696 | ||
595 | /* There are a couple of legacy places where the kernel sets a PTE, but we | 697 | /* |
698 | * There are a couple of legacy places where the kernel sets a PTE, but we | ||
596 | * don't know the top level any more. This is useless for us, since we don't | 699 | * don't know the top level any more. This is useless for us, since we don't |
597 | * know which pagetable is changing or what address, so we just tell the Host | 700 | * know which pagetable is changing or what address, so we just tell the Host |
598 | * to forget all of them. Fortunately, this is very rare. | 701 | * to forget all of them. Fortunately, this is very rare. |
@@ -600,7 +703,8 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) | |||
600 | * ... except in early boot when the kernel sets up the initial pagetables, | 703 | * ... except in early boot when the kernel sets up the initial pagetables, |
601 | * which makes booting astonishingly slow: 1.83 seconds! So we don't even tell | 704 | * which makes booting astonishingly slow: 1.83 seconds! So we don't even tell |
602 | * the Host anything changed until we've done the first page table switch, | 705 | * the Host anything changed until we've done the first page table switch, |
603 | * which brings boot back to 0.25 seconds. */ | 706 | * which brings boot back to 0.25 seconds. |
707 | */ | ||
604 | static void lguest_set_pte(pte_t *ptep, pte_t pteval) | 708 | static void lguest_set_pte(pte_t *ptep, pte_t pteval) |
605 | { | 709 | { |
606 | native_set_pte(ptep, pteval); | 710 | native_set_pte(ptep, pteval); |
@@ -609,6 +713,11 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval) | |||
609 | } | 713 | } |
610 | 714 | ||
611 | #ifdef CONFIG_X86_PAE | 715 | #ifdef CONFIG_X86_PAE |
716 | /* | ||
717 | * With 64-bit PTE values, we need to be careful setting them: if we set 32 | ||
718 | * bits at a time, the hardware could see a weird half-set entry. These | ||
719 | * versions ensure we update all 64 bits at once. | ||
720 | */ | ||
612 | static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte) | 721 | static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte) |
613 | { | 722 | { |
614 | native_set_pte_atomic(ptep, pte); | 723 | native_set_pte_atomic(ptep, pte); |
@@ -616,19 +725,21 @@ static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte) | |||
616 | lazy_hcall1(LHCALL_FLUSH_TLB, 1); | 725 | lazy_hcall1(LHCALL_FLUSH_TLB, 1); |
617 | } | 726 | } |
618 | 727 | ||
619 | void lguest_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 728 | static void lguest_pte_clear(struct mm_struct *mm, unsigned long addr, |
729 | pte_t *ptep) | ||
620 | { | 730 | { |
621 | native_pte_clear(mm, addr, ptep); | 731 | native_pte_clear(mm, addr, ptep); |
622 | lguest_pte_update(mm, addr, ptep); | 732 | lguest_pte_update(mm, addr, ptep); |
623 | } | 733 | } |
624 | 734 | ||
625 | void lguest_pmd_clear(pmd_t *pmdp) | 735 | static void lguest_pmd_clear(pmd_t *pmdp) |
626 | { | 736 | { |
627 | lguest_set_pmd(pmdp, __pmd(0)); | 737 | lguest_set_pmd(pmdp, __pmd(0)); |
628 | } | 738 | } |
629 | #endif | 739 | #endif |
630 | 740 | ||
631 | /* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on | 741 | /* |
742 | * Unfortunately for Lguest, the pv_mmu_ops for page tables were based on | ||
632 | * native page table operations. On native hardware you can set a new page | 743 | * native page table operations. On native hardware you can set a new page |
633 | * table entry whenever you want, but if you want to remove one you have to do | 744 | * table entry whenever you want, but if you want to remove one you have to do |
634 | * a TLB flush (a TLB is a little cache of page table entries kept by the CPU). | 745 | * a TLB flush (a TLB is a little cache of page table entries kept by the CPU). |
@@ -637,24 +748,29 @@ void lguest_pmd_clear(pmd_t *pmdp) | |||
637 | * called when a valid entry is written, not when it's removed (ie. marked not | 748 | * called when a valid entry is written, not when it's removed (ie. marked not |
638 | * present). Instead, this is where we come when the Guest wants to remove a | 749 | * present). Instead, this is where we come when the Guest wants to remove a |
639 | * page table entry: we tell the Host to set that entry to 0 (ie. the present | 750 | * page table entry: we tell the Host to set that entry to 0 (ie. the present |
640 | * bit is zero). */ | 751 | * bit is zero). |
752 | */ | ||
641 | static void lguest_flush_tlb_single(unsigned long addr) | 753 | static void lguest_flush_tlb_single(unsigned long addr) |
642 | { | 754 | { |
643 | /* Simply set it to zero: if it was not, it will fault back in. */ | 755 | /* Simply set it to zero: if it was not, it will fault back in. */ |
644 | lazy_hcall3(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0); | 756 | lazy_hcall3(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0); |
645 | } | 757 | } |
646 | 758 | ||
647 | /* This is what happens after the Guest has removed a large number of entries. | 759 | /* |
760 | * This is what happens after the Guest has removed a large number of entries. | ||
648 | * This tells the Host that any of the page table entries for userspace might | 761 | * This tells the Host that any of the page table entries for userspace might |
649 | * have changed, ie. virtual addresses below PAGE_OFFSET. */ | 762 | * have changed, ie. virtual addresses below PAGE_OFFSET. |
763 | */ | ||
650 | static void lguest_flush_tlb_user(void) | 764 | static void lguest_flush_tlb_user(void) |
651 | { | 765 | { |
652 | lazy_hcall1(LHCALL_FLUSH_TLB, 0); | 766 | lazy_hcall1(LHCALL_FLUSH_TLB, 0); |
653 | } | 767 | } |
654 | 768 | ||
655 | /* This is called when the kernel page tables have changed. That's not very | 769 | /* |
770 | * This is called when the kernel page tables have changed. That's not very | ||
656 | * common (unless the Guest is using highmem, which makes the Guest extremely | 771 | * common (unless the Guest is using highmem, which makes the Guest extremely |
657 | * slow), so it's worth separating this from the user flushing above. */ | 772 | * slow), so it's worth separating this from the user flushing above. |
773 | */ | ||
658 | static void lguest_flush_tlb_kernel(void) | 774 | static void lguest_flush_tlb_kernel(void) |
659 | { | 775 | { |
660 | lazy_hcall1(LHCALL_FLUSH_TLB, 1); | 776 | lazy_hcall1(LHCALL_FLUSH_TLB, 1); |
@@ -691,26 +807,38 @@ static struct irq_chip lguest_irq_controller = { | |||
691 | .unmask = enable_lguest_irq, | 807 | .unmask = enable_lguest_irq, |
692 | }; | 808 | }; |
693 | 809 | ||
694 | /* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware | 810 | /* |
811 | * This sets up the Interrupt Descriptor Table (IDT) entry for each hardware | ||
695 | * interrupt (except 128, which is used for system calls), and then tells the | 812 | * interrupt (except 128, which is used for system calls), and then tells the |
696 | * Linux infrastructure that each interrupt is controlled by our level-based | 813 | * Linux infrastructure that each interrupt is controlled by our level-based |
697 | * lguest interrupt controller. */ | 814 | * lguest interrupt controller. |
815 | */ | ||
698 | static void __init lguest_init_IRQ(void) | 816 | static void __init lguest_init_IRQ(void) |
699 | { | 817 | { |
700 | unsigned int i; | 818 | unsigned int i; |
701 | 819 | ||
702 | for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { | 820 | for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { |
703 | /* Some systems map "vectors" to interrupts weirdly. Lguest has | 821 | /* Some systems map "vectors" to interrupts weirdly. Not us! */ |
704 | * a straightforward 1 to 1 mapping, so force that here. */ | ||
705 | __get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR; | 822 | __get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR; |
706 | if (i != SYSCALL_VECTOR) | 823 | if (i != SYSCALL_VECTOR) |
707 | set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]); | 824 | set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]); |
708 | } | 825 | } |
709 | /* This call is required to set up for 4k stacks, where we have | 826 | |
710 | * separate stacks for hard and soft interrupts. */ | 827 | /* |
828 | * This call is required to set up for 4k stacks, where we have | ||
829 | * separate stacks for hard and soft interrupts. | ||
830 | */ | ||
711 | irq_ctx_init(smp_processor_id()); | 831 | irq_ctx_init(smp_processor_id()); |
712 | } | 832 | } |
713 | 833 | ||
834 | /* | ||
835 | * With CONFIG_SPARSE_IRQ, interrupt descriptors are allocated as-needed, so | ||
836 | * rather than set them in lguest_init_IRQ we are called here every time an | ||
837 | * lguest device needs an interrupt. | ||
838 | * | ||
839 | * FIXME: irq_to_desc_alloc_node() can fail due to lack of memory, we should | ||
840 | * pass that up! | ||
841 | */ | ||
714 | void lguest_setup_irq(unsigned int irq) | 842 | void lguest_setup_irq(unsigned int irq) |
715 | { | 843 | { |
716 | irq_to_desc_alloc_node(irq, 0); | 844 | irq_to_desc_alloc_node(irq, 0); |
@@ -729,31 +857,39 @@ static unsigned long lguest_get_wallclock(void) | |||
729 | return lguest_data.time.tv_sec; | 857 | return lguest_data.time.tv_sec; |
730 | } | 858 | } |
731 | 859 | ||
732 | /* The TSC is an Intel thing called the Time Stamp Counter. The Host tells us | 860 | /* |
861 | * The TSC is an Intel thing called the Time Stamp Counter. The Host tells us | ||
733 | * what speed it runs at, or 0 if it's unusable as a reliable clock source. | 862 | * what speed it runs at, or 0 if it's unusable as a reliable clock source. |
734 | * This matches what we want here: if we return 0 from this function, the x86 | 863 | * This matches what we want here: if we return 0 from this function, the x86 |
735 | * TSC clock will give up and not register itself. */ | 864 | * TSC clock will give up and not register itself. |
865 | */ | ||
736 | static unsigned long lguest_tsc_khz(void) | 866 | static unsigned long lguest_tsc_khz(void) |
737 | { | 867 | { |
738 | return lguest_data.tsc_khz; | 868 | return lguest_data.tsc_khz; |
739 | } | 869 | } |
740 | 870 | ||
741 | /* If we can't use the TSC, the kernel falls back to our lower-priority | 871 | /* |
742 | * "lguest_clock", where we read the time value given to us by the Host. */ | 872 | * If we can't use the TSC, the kernel falls back to our lower-priority |
873 | * "lguest_clock", where we read the time value given to us by the Host. | ||
874 | */ | ||
743 | static cycle_t lguest_clock_read(struct clocksource *cs) | 875 | static cycle_t lguest_clock_read(struct clocksource *cs) |
744 | { | 876 | { |
745 | unsigned long sec, nsec; | 877 | unsigned long sec, nsec; |
746 | 878 | ||
747 | /* Since the time is in two parts (seconds and nanoseconds), we risk | 879 | /* |
880 | * Since the time is in two parts (seconds and nanoseconds), we risk | ||
748 | * reading it just as it's changing from 99 & 0.999999999 to 100 and 0, | 881 | * reading it just as it's changing from 99 & 0.999999999 to 100 and 0, |
749 | * and getting 99 and 0. As Linux tends to come apart under the stress | 882 | * and getting 99 and 0. As Linux tends to come apart under the stress |
750 | * of time travel, we must be careful: */ | 883 | * of time travel, we must be careful: |
884 | */ | ||
751 | do { | 885 | do { |
752 | /* First we read the seconds part. */ | 886 | /* First we read the seconds part. */ |
753 | sec = lguest_data.time.tv_sec; | 887 | sec = lguest_data.time.tv_sec; |
754 | /* This read memory barrier tells the compiler and the CPU that | 888 | /* |
889 | * This read memory barrier tells the compiler and the CPU that | ||
755 | * this can't be reordered: we have to complete the above | 890 | * this can't be reordered: we have to complete the above |
756 | * before going on. */ | 891 | * before going on. |
892 | */ | ||
757 | rmb(); | 893 | rmb(); |
758 | /* Now we read the nanoseconds part. */ | 894 | /* Now we read the nanoseconds part. */ |
759 | nsec = lguest_data.time.tv_nsec; | 895 | nsec = lguest_data.time.tv_nsec; |
@@ -777,9 +913,11 @@ static struct clocksource lguest_clock = { | |||
777 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 913 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
778 | }; | 914 | }; |
779 | 915 | ||
780 | /* We also need a "struct clock_event_device": Linux asks us to set it to go | 916 | /* |
917 | * We also need a "struct clock_event_device": Linux asks us to set it to go | ||
781 | * off some time in the future. Actually, James Morris figured all this out, I | 918 | * off some time in the future. Actually, James Morris figured all this out, I |
782 | * just applied the patch. */ | 919 | * just applied the patch. |
920 | */ | ||
783 | static int lguest_clockevent_set_next_event(unsigned long delta, | 921 | static int lguest_clockevent_set_next_event(unsigned long delta, |
784 | struct clock_event_device *evt) | 922 | struct clock_event_device *evt) |
785 | { | 923 | { |
@@ -829,8 +967,10 @@ static struct clock_event_device lguest_clockevent = { | |||
829 | .max_delta_ns = LG_CLOCK_MAX_DELTA, | 967 | .max_delta_ns = LG_CLOCK_MAX_DELTA, |
830 | }; | 968 | }; |
831 | 969 | ||
832 | /* This is the Guest timer interrupt handler (hardware interrupt 0). We just | 970 | /* |
833 | * call the clockevent infrastructure and it does whatever needs doing. */ | 971 | * This is the Guest timer interrupt handler (hardware interrupt 0). We just |
972 | * call the clockevent infrastructure and it does whatever needs doing. | ||
973 | */ | ||
834 | static void lguest_time_irq(unsigned int irq, struct irq_desc *desc) | 974 | static void lguest_time_irq(unsigned int irq, struct irq_desc *desc) |
835 | { | 975 | { |
836 | unsigned long flags; | 976 | unsigned long flags; |
@@ -841,10 +981,12 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc) | |||
841 | local_irq_restore(flags); | 981 | local_irq_restore(flags); |
842 | } | 982 | } |
843 | 983 | ||
844 | /* At some point in the boot process, we get asked to set up our timing | 984 | /* |
985 | * At some point in the boot process, we get asked to set up our timing | ||
845 | * infrastructure. The kernel doesn't expect timer interrupts before this, but | 986 | * infrastructure. The kernel doesn't expect timer interrupts before this, but |
846 | * we cleverly initialized the "blocked_interrupts" field of "struct | 987 | * we cleverly initialized the "blocked_interrupts" field of "struct |
847 | * lguest_data" so that timer interrupts were blocked until now. */ | 988 | * lguest_data" so that timer interrupts were blocked until now. |
989 | */ | ||
848 | static void lguest_time_init(void) | 990 | static void lguest_time_init(void) |
849 | { | 991 | { |
850 | /* Set up the timer interrupt (0) to go to our simple timer routine */ | 992 | /* Set up the timer interrupt (0) to go to our simple timer routine */ |
@@ -868,14 +1010,16 @@ static void lguest_time_init(void) | |||
868 | * to work. They're pretty simple. | 1010 | * to work. They're pretty simple. |
869 | */ | 1011 | */ |
870 | 1012 | ||
871 | /* The Guest needs to tell the Host what stack it expects traps to use. For | 1013 | /* |
1014 | * The Guest needs to tell the Host what stack it expects traps to use. For | ||
872 | * native hardware, this is part of the Task State Segment mentioned above in | 1015 | * native hardware, this is part of the Task State Segment mentioned above in |
873 | * lguest_load_tr_desc(), but to help hypervisors there's this special call. | 1016 | * lguest_load_tr_desc(), but to help hypervisors there's this special call. |
874 | * | 1017 | * |
875 | * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data | 1018 | * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data |
876 | * segment), the privilege level (we're privilege level 1, the Host is 0 and | 1019 | * segment), the privilege level (we're privilege level 1, the Host is 0 and |
877 | * will not tolerate us trying to use that), the stack pointer, and the number | 1020 | * will not tolerate us trying to use that), the stack pointer, and the number |
878 | * of pages in the stack. */ | 1021 | * of pages in the stack. |
1022 | */ | ||
879 | static void lguest_load_sp0(struct tss_struct *tss, | 1023 | static void lguest_load_sp0(struct tss_struct *tss, |
880 | struct thread_struct *thread) | 1024 | struct thread_struct *thread) |
881 | { | 1025 | { |
@@ -889,7 +1033,8 @@ static void lguest_set_debugreg(int regno, unsigned long value) | |||
889 | /* FIXME: Implement */ | 1033 | /* FIXME: Implement */ |
890 | } | 1034 | } |
891 | 1035 | ||
892 | /* There are times when the kernel wants to make sure that no memory writes are | 1036 | /* |
1037 | * There are times when the kernel wants to make sure that no memory writes are | ||
893 | * caught in the cache (that they've all reached real hardware devices). This | 1038 | * caught in the cache (that they've all reached real hardware devices). This |
894 | * doesn't matter for the Guest which has virtual hardware. | 1039 | * doesn't matter for the Guest which has virtual hardware. |
895 | * | 1040 | * |
@@ -903,11 +1048,13 @@ static void lguest_wbinvd(void) | |||
903 | { | 1048 | { |
904 | } | 1049 | } |
905 | 1050 | ||
906 | /* If the Guest expects to have an Advanced Programmable Interrupt Controller, | 1051 | /* |
1052 | * If the Guest expects to have an Advanced Programmable Interrupt Controller, | ||
907 | * we play dumb by ignoring writes and returning 0 for reads. So it's no | 1053 | * we play dumb by ignoring writes and returning 0 for reads. So it's no |
908 | * longer Programmable nor Controlling anything, and I don't think 8 lines of | 1054 | * longer Programmable nor Controlling anything, and I don't think 8 lines of |
909 | * code qualifies for Advanced. It will also never interrupt anything. It | 1055 | * code qualifies for Advanced. It will also never interrupt anything. It |
910 | * does, however, allow us to get through the Linux boot code. */ | 1056 | * does, however, allow us to get through the Linux boot code. |
1057 | */ | ||
911 | #ifdef CONFIG_X86_LOCAL_APIC | 1058 | #ifdef CONFIG_X86_LOCAL_APIC |
912 | static void lguest_apic_write(u32 reg, u32 v) | 1059 | static void lguest_apic_write(u32 reg, u32 v) |
913 | { | 1060 | { |
@@ -956,11 +1103,13 @@ static void lguest_safe_halt(void) | |||
956 | kvm_hypercall0(LHCALL_HALT); | 1103 | kvm_hypercall0(LHCALL_HALT); |
957 | } | 1104 | } |
958 | 1105 | ||
959 | /* The SHUTDOWN hypercall takes a string to describe what's happening, and | 1106 | /* |
1107 | * The SHUTDOWN hypercall takes a string to describe what's happening, and | ||
960 | * an argument which says whether this to restart (reboot) the Guest or not. | 1108 | * an argument which says whether this to restart (reboot) the Guest or not. |
961 | * | 1109 | * |
962 | * Note that the Host always prefers that the Guest speak in physical addresses | 1110 | * Note that the Host always prefers that the Guest speak in physical addresses |
963 | * rather than virtual addresses, so we use __pa() here. */ | 1111 | * rather than virtual addresses, so we use __pa() here. |
1112 | */ | ||
964 | static void lguest_power_off(void) | 1113 | static void lguest_power_off(void) |
965 | { | 1114 | { |
966 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"), | 1115 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"), |
@@ -991,8 +1140,10 @@ static __init char *lguest_memory_setup(void) | |||
991 | * nice to move it back to lguest_init. Patch welcome... */ | 1140 | * nice to move it back to lguest_init. Patch welcome... */ |
992 | atomic_notifier_chain_register(&panic_notifier_list, &paniced); | 1141 | atomic_notifier_chain_register(&panic_notifier_list, &paniced); |
993 | 1142 | ||
994 | /* The Linux bootloader header contains an "e820" memory map: the | 1143 | /* |
995 | * Launcher populated the first entry with our memory limit. */ | 1144 | *The Linux bootloader header contains an "e820" memory map: the |
1145 | * Launcher populated the first entry with our memory limit. | ||
1146 | */ | ||
996 | e820_add_region(boot_params.e820_map[0].addr, | 1147 | e820_add_region(boot_params.e820_map[0].addr, |
997 | boot_params.e820_map[0].size, | 1148 | boot_params.e820_map[0].size, |
998 | boot_params.e820_map[0].type); | 1149 | boot_params.e820_map[0].type); |
@@ -1001,16 +1152,17 @@ static __init char *lguest_memory_setup(void) | |||
1001 | return "LGUEST"; | 1152 | return "LGUEST"; |
1002 | } | 1153 | } |
1003 | 1154 | ||
1004 | /* We will eventually use the virtio console device to produce console output, | 1155 | /* |
1156 | * We will eventually use the virtio console device to produce console output, | ||
1005 | * but before that is set up we use LHCALL_NOTIFY on normal memory to produce | 1157 | * but before that is set up we use LHCALL_NOTIFY on normal memory to produce |
1006 | * console output. */ | 1158 | * console output. |
1159 | */ | ||
1007 | static __init int early_put_chars(u32 vtermno, const char *buf, int count) | 1160 | static __init int early_put_chars(u32 vtermno, const char *buf, int count) |
1008 | { | 1161 | { |
1009 | char scratch[17]; | 1162 | char scratch[17]; |
1010 | unsigned int len = count; | 1163 | unsigned int len = count; |
1011 | 1164 | ||
1012 | /* We use a nul-terminated string, so we have to make a copy. Icky, | 1165 | /* We use a nul-terminated string, so we make a copy. Icky, huh? */ |
1013 | * huh? */ | ||
1014 | if (len > sizeof(scratch) - 1) | 1166 | if (len > sizeof(scratch) - 1) |
1015 | len = sizeof(scratch) - 1; | 1167 | len = sizeof(scratch) - 1; |
1016 | scratch[len] = '\0'; | 1168 | scratch[len] = '\0'; |
@@ -1021,8 +1173,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) | |||
1021 | return len; | 1173 | return len; |
1022 | } | 1174 | } |
1023 | 1175 | ||
1024 | /* Rebooting also tells the Host we're finished, but the RESTART flag tells the | 1176 | /* |
1025 | * Launcher to reboot us. */ | 1177 | * Rebooting also tells the Host we're finished, but the RESTART flag tells the |
1178 | * Launcher to reboot us. | ||
1179 | */ | ||
1026 | static void lguest_restart(char *reason) | 1180 | static void lguest_restart(char *reason) |
1027 | { | 1181 | { |
1028 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART); | 1182 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART); |
@@ -1049,7 +1203,8 @@ static void lguest_restart(char *reason) | |||
1049 | * fit comfortably. | 1203 | * fit comfortably. |
1050 | * | 1204 | * |
1051 | * First we need assembly templates of each of the patchable Guest operations, | 1205 | * First we need assembly templates of each of the patchable Guest operations, |
1052 | * and these are in i386_head.S. */ | 1206 | * and these are in i386_head.S. |
1207 | */ | ||
1053 | 1208 | ||
1054 | /*G:060 We construct a table from the assembler templates: */ | 1209 | /*G:060 We construct a table from the assembler templates: */ |
1055 | static const struct lguest_insns | 1210 | static const struct lguest_insns |
@@ -1060,9 +1215,11 @@ static const struct lguest_insns | |||
1060 | [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf }, | 1215 | [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf }, |
1061 | }; | 1216 | }; |
1062 | 1217 | ||
1063 | /* Now our patch routine is fairly simple (based on the native one in | 1218 | /* |
1219 | * Now our patch routine is fairly simple (based on the native one in | ||
1064 | * paravirt.c). If we have a replacement, we copy it in and return how much of | 1220 | * paravirt.c). If we have a replacement, we copy it in and return how much of |
1065 | * the available space we used. */ | 1221 | * the available space we used. |
1222 | */ | ||
1066 | static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf, | 1223 | static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf, |
1067 | unsigned long addr, unsigned len) | 1224 | unsigned long addr, unsigned len) |
1068 | { | 1225 | { |
@@ -1074,8 +1231,7 @@ static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf, | |||
1074 | 1231 | ||
1075 | insn_len = lguest_insns[type].end - lguest_insns[type].start; | 1232 | insn_len = lguest_insns[type].end - lguest_insns[type].start; |
1076 | 1233 | ||
1077 | /* Similarly if we can't fit replacement (shouldn't happen, but let's | 1234 | /* Similarly if it can't fit (doesn't happen, but let's be thorough). */ |
1078 | * be thorough). */ | ||
1079 | if (len < insn_len) | 1235 | if (len < insn_len) |
1080 | return paravirt_patch_default(type, clobber, ibuf, addr, len); | 1236 | return paravirt_patch_default(type, clobber, ibuf, addr, len); |
1081 | 1237 | ||
@@ -1084,22 +1240,28 @@ static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf, | |||
1084 | return insn_len; | 1240 | return insn_len; |
1085 | } | 1241 | } |
1086 | 1242 | ||
1087 | /*G:029 Once we get to lguest_init(), we know we're a Guest. The various | 1243 | /*G:029 |
1244 | * Once we get to lguest_init(), we know we're a Guest. The various | ||
1088 | * pv_ops structures in the kernel provide points for (almost) every routine we | 1245 | * pv_ops structures in the kernel provide points for (almost) every routine we |
1089 | * have to override to avoid privileged instructions. */ | 1246 | * have to override to avoid privileged instructions. |
1247 | */ | ||
1090 | __init void lguest_init(void) | 1248 | __init void lguest_init(void) |
1091 | { | 1249 | { |
1092 | /* We're under lguest, paravirt is enabled, and we're running at | 1250 | /* We're under lguest. */ |
1093 | * privilege level 1, not 0 as normal. */ | ||
1094 | pv_info.name = "lguest"; | 1251 | pv_info.name = "lguest"; |
1252 | /* Paravirt is enabled. */ | ||
1095 | pv_info.paravirt_enabled = 1; | 1253 | pv_info.paravirt_enabled = 1; |
1254 | /* We're running at privilege level 1, not 0 as normal. */ | ||
1096 | pv_info.kernel_rpl = 1; | 1255 | pv_info.kernel_rpl = 1; |
1256 | /* Everyone except Xen runs with this set. */ | ||
1097 | pv_info.shared_kernel_pmd = 1; | 1257 | pv_info.shared_kernel_pmd = 1; |
1098 | 1258 | ||
1099 | /* We set up all the lguest overrides for sensitive operations. These | 1259 | /* |
1100 | * are detailed with the operations themselves. */ | 1260 | * We set up all the lguest overrides for sensitive operations. These |
1261 | * are detailed with the operations themselves. | ||
1262 | */ | ||
1101 | 1263 | ||
1102 | /* interrupt-related operations */ | 1264 | /* Interrupt-related operations */ |
1103 | pv_irq_ops.init_IRQ = lguest_init_IRQ; | 1265 | pv_irq_ops.init_IRQ = lguest_init_IRQ; |
1104 | pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl); | 1266 | pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl); |
1105 | pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl); | 1267 | pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl); |
@@ -1107,11 +1269,11 @@ __init void lguest_init(void) | |||
1107 | pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable); | 1269 | pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable); |
1108 | pv_irq_ops.safe_halt = lguest_safe_halt; | 1270 | pv_irq_ops.safe_halt = lguest_safe_halt; |
1109 | 1271 | ||
1110 | /* init-time operations */ | 1272 | /* Setup operations */ |
1111 | pv_init_ops.memory_setup = lguest_memory_setup; | 1273 | pv_init_ops.memory_setup = lguest_memory_setup; |
1112 | pv_init_ops.patch = lguest_patch; | 1274 | pv_init_ops.patch = lguest_patch; |
1113 | 1275 | ||
1114 | /* Intercepts of various cpu instructions */ | 1276 | /* Intercepts of various CPU instructions */ |
1115 | pv_cpu_ops.load_gdt = lguest_load_gdt; | 1277 | pv_cpu_ops.load_gdt = lguest_load_gdt; |
1116 | pv_cpu_ops.cpuid = lguest_cpuid; | 1278 | pv_cpu_ops.cpuid = lguest_cpuid; |
1117 | pv_cpu_ops.load_idt = lguest_load_idt; | 1279 | pv_cpu_ops.load_idt = lguest_load_idt; |
@@ -1132,7 +1294,7 @@ __init void lguest_init(void) | |||
1132 | pv_cpu_ops.start_context_switch = paravirt_start_context_switch; | 1294 | pv_cpu_ops.start_context_switch = paravirt_start_context_switch; |
1133 | pv_cpu_ops.end_context_switch = lguest_end_context_switch; | 1295 | pv_cpu_ops.end_context_switch = lguest_end_context_switch; |
1134 | 1296 | ||
1135 | /* pagetable management */ | 1297 | /* Pagetable management */ |
1136 | pv_mmu_ops.write_cr3 = lguest_write_cr3; | 1298 | pv_mmu_ops.write_cr3 = lguest_write_cr3; |
1137 | pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user; | 1299 | pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user; |
1138 | pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single; | 1300 | pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single; |
@@ -1154,54 +1316,71 @@ __init void lguest_init(void) | |||
1154 | pv_mmu_ops.pte_update_defer = lguest_pte_update; | 1316 | pv_mmu_ops.pte_update_defer = lguest_pte_update; |
1155 | 1317 | ||
1156 | #ifdef CONFIG_X86_LOCAL_APIC | 1318 | #ifdef CONFIG_X86_LOCAL_APIC |
1157 | /* apic read/write intercepts */ | 1319 | /* APIC read/write intercepts */ |
1158 | set_lguest_basic_apic_ops(); | 1320 | set_lguest_basic_apic_ops(); |
1159 | #endif | 1321 | #endif |
1160 | 1322 | ||
1161 | /* time operations */ | 1323 | /* Time operations */ |
1162 | pv_time_ops.get_wallclock = lguest_get_wallclock; | 1324 | pv_time_ops.get_wallclock = lguest_get_wallclock; |
1163 | pv_time_ops.time_init = lguest_time_init; | 1325 | pv_time_ops.time_init = lguest_time_init; |
1164 | pv_time_ops.get_tsc_khz = lguest_tsc_khz; | 1326 | pv_time_ops.get_tsc_khz = lguest_tsc_khz; |
1165 | 1327 | ||
1166 | /* Now is a good time to look at the implementations of these functions | 1328 | /* |
1167 | * before returning to the rest of lguest_init(). */ | 1329 | * Now is a good time to look at the implementations of these functions |
1330 | * before returning to the rest of lguest_init(). | ||
1331 | */ | ||
1168 | 1332 | ||
1169 | /*G:070 Now we've seen all the paravirt_ops, we return to | 1333 | /*G:070 |
1334 | * Now we've seen all the paravirt_ops, we return to | ||
1170 | * lguest_init() where the rest of the fairly chaotic boot setup | 1335 | * lguest_init() where the rest of the fairly chaotic boot setup |
1171 | * occurs. */ | 1336 | * occurs. |
1337 | */ | ||
1172 | 1338 | ||
1173 | /* The stack protector is a weird thing where gcc places a canary | 1339 | /* |
1340 | * The stack protector is a weird thing where gcc places a canary | ||
1174 | * value on the stack and then checks it on return. This file is | 1341 | * value on the stack and then checks it on return. This file is |
1175 | * compiled with -fno-stack-protector it, so we got this far without | 1342 | * compiled with -fno-stack-protector it, so we got this far without |
1176 | * problems. The value of the canary is kept at offset 20 from the | 1343 | * problems. The value of the canary is kept at offset 20 from the |
1177 | * %gs register, so we need to set that up before calling C functions | 1344 | * %gs register, so we need to set that up before calling C functions |
1178 | * in other files. */ | 1345 | * in other files. |
1346 | */ | ||
1179 | setup_stack_canary_segment(0); | 1347 | setup_stack_canary_segment(0); |
1180 | /* We could just call load_stack_canary_segment(), but we might as | 1348 | |
1181 | * call switch_to_new_gdt() which loads the whole table and sets up | 1349 | /* |
1182 | * the per-cpu segment descriptor register %fs as well. */ | 1350 | * We could just call load_stack_canary_segment(), but we might as well |
1351 | * call switch_to_new_gdt() which loads the whole table and sets up the | ||
1352 | * per-cpu segment descriptor register %fs as well. | ||
1353 | */ | ||
1183 | switch_to_new_gdt(0); | 1354 | switch_to_new_gdt(0); |
1184 | 1355 | ||
1185 | /* As described in head_32.S, we map the first 128M of memory. */ | 1356 | /* We actually boot with all memory mapped, but let's say 128MB. */ |
1186 | max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT; | 1357 | max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT; |
1187 | 1358 | ||
1188 | /* The Host<->Guest Switcher lives at the top of our address space, and | 1359 | /* |
1360 | * The Host<->Guest Switcher lives at the top of our address space, and | ||
1189 | * the Host told us how big it is when we made LGUEST_INIT hypercall: | 1361 | * the Host told us how big it is when we made LGUEST_INIT hypercall: |
1190 | * it put the answer in lguest_data.reserve_mem */ | 1362 | * it put the answer in lguest_data.reserve_mem |
1363 | */ | ||
1191 | reserve_top_address(lguest_data.reserve_mem); | 1364 | reserve_top_address(lguest_data.reserve_mem); |
1192 | 1365 | ||
1193 | /* If we don't initialize the lock dependency checker now, it crashes | 1366 | /* |
1194 | * paravirt_disable_iospace. */ | 1367 | * If we don't initialize the lock dependency checker now, it crashes |
1368 | * paravirt_disable_iospace. | ||
1369 | */ | ||
1195 | lockdep_init(); | 1370 | lockdep_init(); |
1196 | 1371 | ||
1197 | /* The IDE code spends about 3 seconds probing for disks: if we reserve | 1372 | /* |
1373 | * The IDE code spends about 3 seconds probing for disks: if we reserve | ||
1198 | * all the I/O ports up front it can't get them and so doesn't probe. | 1374 | * all the I/O ports up front it can't get them and so doesn't probe. |
1199 | * Other device drivers are similar (but less severe). This cuts the | 1375 | * Other device drivers are similar (but less severe). This cuts the |
1200 | * kernel boot time on my machine from 4.1 seconds to 0.45 seconds. */ | 1376 | * kernel boot time on my machine from 4.1 seconds to 0.45 seconds. |
1377 | */ | ||
1201 | paravirt_disable_iospace(); | 1378 | paravirt_disable_iospace(); |
1202 | 1379 | ||
1203 | /* This is messy CPU setup stuff which the native boot code does before | 1380 | /* |
1204 | * start_kernel, so we have to do, too: */ | 1381 | * This is messy CPU setup stuff which the native boot code does before |
1382 | * start_kernel, so we have to do, too: | ||
1383 | */ | ||
1205 | cpu_detect(&new_cpu_data); | 1384 | cpu_detect(&new_cpu_data); |
1206 | /* head.S usually sets up the first capability word, so do it here. */ | 1385 | /* head.S usually sets up the first capability word, so do it here. */ |
1207 | new_cpu_data.x86_capability[0] = cpuid_edx(1); | 1386 | new_cpu_data.x86_capability[0] = cpuid_edx(1); |
@@ -1218,22 +1397,28 @@ __init void lguest_init(void) | |||
1218 | acpi_ht = 0; | 1397 | acpi_ht = 0; |
1219 | #endif | 1398 | #endif |
1220 | 1399 | ||
1221 | /* We set the preferred console to "hvc". This is the "hypervisor | 1400 | /* |
1401 | * We set the preferred console to "hvc". This is the "hypervisor | ||
1222 | * virtual console" driver written by the PowerPC people, which we also | 1402 | * virtual console" driver written by the PowerPC people, which we also |
1223 | * adapted for lguest's use. */ | 1403 | * adapted for lguest's use. |
1404 | */ | ||
1224 | add_preferred_console("hvc", 0, NULL); | 1405 | add_preferred_console("hvc", 0, NULL); |
1225 | 1406 | ||
1226 | /* Register our very early console. */ | 1407 | /* Register our very early console. */ |
1227 | virtio_cons_early_init(early_put_chars); | 1408 | virtio_cons_early_init(early_put_chars); |
1228 | 1409 | ||
1229 | /* Last of all, we set the power management poweroff hook to point to | 1410 | /* |
1411 | * Last of all, we set the power management poweroff hook to point to | ||
1230 | * the Guest routine to power off, and the reboot hook to our restart | 1412 | * the Guest routine to power off, and the reboot hook to our restart |
1231 | * routine. */ | 1413 | * routine. |
1414 | */ | ||
1232 | pm_power_off = lguest_power_off; | 1415 | pm_power_off = lguest_power_off; |
1233 | machine_ops.restart = lguest_restart; | 1416 | machine_ops.restart = lguest_restart; |
1234 | 1417 | ||
1235 | /* Now we're set up, call i386_start_kernel() in head32.c and we proceed | 1418 | /* |
1236 | * to boot as normal. It never returns. */ | 1419 | * Now we're set up, call i386_start_kernel() in head32.c and we proceed |
1420 | * to boot as normal. It never returns. | ||
1421 | */ | ||
1237 | i386_start_kernel(); | 1422 | i386_start_kernel(); |
1238 | } | 1423 | } |
1239 | /* | 1424 | /* |
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index a9c8cfe61cd4..27eac0faee48 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S | |||
@@ -5,7 +5,8 @@ | |||
5 | #include <asm/thread_info.h> | 5 | #include <asm/thread_info.h> |
6 | #include <asm/processor-flags.h> | 6 | #include <asm/processor-flags.h> |
7 | 7 | ||
8 | /*G:020 Our story starts with the kernel booting into startup_32 in | 8 | /*G:020 |
9 | * Our story starts with the kernel booting into startup_32 in | ||
9 | * arch/x86/kernel/head_32.S. It expects a boot header, which is created by | 10 | * arch/x86/kernel/head_32.S. It expects a boot header, which is created by |
10 | * the bootloader (the Launcher in our case). | 11 | * the bootloader (the Launcher in our case). |
11 | * | 12 | * |
@@ -21,11 +22,14 @@ | |||
21 | * data without remembering to subtract __PAGE_OFFSET! | 22 | * data without remembering to subtract __PAGE_OFFSET! |
22 | * | 23 | * |
23 | * The .section line puts this code in .init.text so it will be discarded after | 24 | * The .section line puts this code in .init.text so it will be discarded after |
24 | * boot. */ | 25 | * boot. |
26 | */ | ||
25 | .section .init.text, "ax", @progbits | 27 | .section .init.text, "ax", @progbits |
26 | ENTRY(lguest_entry) | 28 | ENTRY(lguest_entry) |
27 | /* We make the "initialization" hypercall now to tell the Host about | 29 | /* |
28 | * us, and also find out where it put our page tables. */ | 30 | * We make the "initialization" hypercall now to tell the Host about |
31 | * us, and also find out where it put our page tables. | ||
32 | */ | ||
29 | movl $LHCALL_LGUEST_INIT, %eax | 33 | movl $LHCALL_LGUEST_INIT, %eax |
30 | movl $lguest_data - __PAGE_OFFSET, %ebx | 34 | movl $lguest_data - __PAGE_OFFSET, %ebx |
31 | .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ | 35 | .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ |
@@ -33,13 +37,14 @@ ENTRY(lguest_entry) | |||
33 | /* Set up the initial stack so we can run C code. */ | 37 | /* Set up the initial stack so we can run C code. */ |
34 | movl $(init_thread_union+THREAD_SIZE),%esp | 38 | movl $(init_thread_union+THREAD_SIZE),%esp |
35 | 39 | ||
36 | /* Jumps are relative, and we're running __PAGE_OFFSET too low at the | 40 | /* Jumps are relative: we're running __PAGE_OFFSET too low. */ |
37 | * moment. */ | ||
38 | jmp lguest_init+__PAGE_OFFSET | 41 | jmp lguest_init+__PAGE_OFFSET |
39 | 42 | ||
40 | /*G:055 We create a macro which puts the assembler code between lgstart_ and | 43 | /*G:055 |
41 | * lgend_ markers. These templates are put in the .text section: they can't be | 44 | * We create a macro which puts the assembler code between lgstart_ and lgend_ |
42 | * discarded after boot as we may need to patch modules, too. */ | 45 | * markers. These templates are put in the .text section: they can't be |
46 | * discarded after boot as we may need to patch modules, too. | ||
47 | */ | ||
43 | .text | 48 | .text |
44 | #define LGUEST_PATCH(name, insns...) \ | 49 | #define LGUEST_PATCH(name, insns...) \ |
45 | lgstart_##name: insns; lgend_##name:; \ | 50 | lgstart_##name: insns; lgend_##name:; \ |
@@ -48,83 +53,103 @@ ENTRY(lguest_entry) | |||
48 | LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled) | 53 | LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled) |
49 | LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) | 54 | LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) |
50 | 55 | ||
51 | /*G:033 But using those wrappers is inefficient (we'll see why that doesn't | 56 | /*G:033 |
52 | * matter for save_fl and irq_disable later). If we write our routines | 57 | * But using those wrappers is inefficient (we'll see why that doesn't matter |
53 | * carefully in assembler, we can avoid clobbering any registers and avoid | 58 | * for save_fl and irq_disable later). If we write our routines carefully in |
54 | * jumping through the wrapper functions. | 59 | * assembler, we can avoid clobbering any registers and avoid jumping through |
60 | * the wrapper functions. | ||
55 | * | 61 | * |
56 | * I skipped over our first piece of assembler, but this one is worth studying | 62 | * I skipped over our first piece of assembler, but this one is worth studying |
57 | * in a bit more detail so I'll describe in easy stages. First, the routine | 63 | * in a bit more detail so I'll describe in easy stages. First, the routine to |
58 | * to enable interrupts: */ | 64 | * enable interrupts: |
65 | */ | ||
59 | ENTRY(lg_irq_enable) | 66 | ENTRY(lg_irq_enable) |
60 | /* The reverse of irq_disable, this sets lguest_data.irq_enabled to | 67 | /* |
61 | * X86_EFLAGS_IF (ie. "Interrupts enabled"). */ | 68 | * The reverse of irq_disable, this sets lguest_data.irq_enabled to |
69 | * X86_EFLAGS_IF (ie. "Interrupts enabled"). | ||
70 | */ | ||
62 | movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled | 71 | movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled |
63 | /* But now we need to check if the Host wants to know: there might have | 72 | /* |
73 | * But now we need to check if the Host wants to know: there might have | ||
64 | * been interrupts waiting to be delivered, in which case it will have | 74 | * been interrupts waiting to be delivered, in which case it will have |
65 | * set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we | 75 | * set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we |
66 | * jump to send_interrupts, otherwise we're done. */ | 76 | * jump to send_interrupts, otherwise we're done. |
77 | */ | ||
67 | testl $0, lguest_data+LGUEST_DATA_irq_pending | 78 | testl $0, lguest_data+LGUEST_DATA_irq_pending |
68 | jnz send_interrupts | 79 | jnz send_interrupts |
69 | /* One cool thing about x86 is that you can do many things without using | 80 | /* |
81 | * One cool thing about x86 is that you can do many things without using | ||
70 | * a register. In this case, the normal path hasn't needed to save or | 82 | * a register. In this case, the normal path hasn't needed to save or |
71 | * restore any registers at all! */ | 83 | * restore any registers at all! |
84 | */ | ||
72 | ret | 85 | ret |
73 | send_interrupts: | 86 | send_interrupts: |
74 | /* OK, now we need a register: eax is used for the hypercall number, | 87 | /* |
88 | * OK, now we need a register: eax is used for the hypercall number, | ||
75 | * which is LHCALL_SEND_INTERRUPTS. | 89 | * which is LHCALL_SEND_INTERRUPTS. |
76 | * | 90 | * |
77 | * We used not to bother with this pending detection at all, which was | 91 | * We used not to bother with this pending detection at all, which was |
78 | * much simpler. Sooner or later the Host would realize it had to | 92 | * much simpler. Sooner or later the Host would realize it had to |
79 | * send us an interrupt. But that turns out to make performance 7 | 93 | * send us an interrupt. But that turns out to make performance 7 |
80 | * times worse on a simple tcp benchmark. So now we do this the hard | 94 | * times worse on a simple tcp benchmark. So now we do this the hard |
81 | * way. */ | 95 | * way. |
96 | */ | ||
82 | pushl %eax | 97 | pushl %eax |
83 | movl $LHCALL_SEND_INTERRUPTS, %eax | 98 | movl $LHCALL_SEND_INTERRUPTS, %eax |
84 | /* This is a vmcall instruction (same thing that KVM uses). Older | 99 | /* |
100 | * This is a vmcall instruction (same thing that KVM uses). Older | ||
85 | * assembler versions might not know the "vmcall" instruction, so we | 101 | * assembler versions might not know the "vmcall" instruction, so we |
86 | * create one manually here. */ | 102 | * create one manually here. |
103 | */ | ||
87 | .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ | 104 | .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ |
105 | /* Put eax back the way we found it. */ | ||
88 | popl %eax | 106 | popl %eax |
89 | ret | 107 | ret |
90 | 108 | ||
91 | /* Finally, the "popf" or "restore flags" routine. The %eax register holds the | 109 | /* |
110 | * Finally, the "popf" or "restore flags" routine. The %eax register holds the | ||
92 | * flags (in practice, either X86_EFLAGS_IF or 0): if it's X86_EFLAGS_IF we're | 111 | * flags (in practice, either X86_EFLAGS_IF or 0): if it's X86_EFLAGS_IF we're |
93 | * enabling interrupts again, if it's 0 we're leaving them off. */ | 112 | * enabling interrupts again, if it's 0 we're leaving them off. |
113 | */ | ||
94 | ENTRY(lg_restore_fl) | 114 | ENTRY(lg_restore_fl) |
95 | /* This is just "lguest_data.irq_enabled = flags;" */ | 115 | /* This is just "lguest_data.irq_enabled = flags;" */ |
96 | movl %eax, lguest_data+LGUEST_DATA_irq_enabled | 116 | movl %eax, lguest_data+LGUEST_DATA_irq_enabled |
97 | /* Now, if the %eax value has enabled interrupts and | 117 | /* |
118 | * Now, if the %eax value has enabled interrupts and | ||
98 | * lguest_data.irq_pending is set, we want to tell the Host so it can | 119 | * lguest_data.irq_pending is set, we want to tell the Host so it can |
99 | * deliver any outstanding interrupts. Fortunately, both values will | 120 | * deliver any outstanding interrupts. Fortunately, both values will |
100 | * be X86_EFLAGS_IF (ie. 512) in that case, and the "testl" | 121 | * be X86_EFLAGS_IF (ie. 512) in that case, and the "testl" |
101 | * instruction will AND them together for us. If both are set, we | 122 | * instruction will AND them together for us. If both are set, we |
102 | * jump to send_interrupts. */ | 123 | * jump to send_interrupts. |
124 | */ | ||
103 | testl lguest_data+LGUEST_DATA_irq_pending, %eax | 125 | testl lguest_data+LGUEST_DATA_irq_pending, %eax |
104 | jnz send_interrupts | 126 | jnz send_interrupts |
105 | /* Again, the normal path has used no extra registers. Clever, huh? */ | 127 | /* Again, the normal path has used no extra registers. Clever, huh? */ |
106 | ret | 128 | ret |
129 | /*:*/ | ||
107 | 130 | ||
108 | /* These demark the EIP range where host should never deliver interrupts. */ | 131 | /* These demark the EIP range where host should never deliver interrupts. */ |
109 | .global lguest_noirq_start | 132 | .global lguest_noirq_start |
110 | .global lguest_noirq_end | 133 | .global lguest_noirq_end |
111 | 134 | ||
112 | /*M:004 When the Host reflects a trap or injects an interrupt into the Guest, | 135 | /*M:004 |
113 | * it sets the eflags interrupt bit on the stack based on | 136 | * When the Host reflects a trap or injects an interrupt into the Guest, it |
114 | * lguest_data.irq_enabled, so the Guest iret logic does the right thing when | 137 | * sets the eflags interrupt bit on the stack based on lguest_data.irq_enabled, |
115 | * restoring it. However, when the Host sets the Guest up for direct traps, | 138 | * so the Guest iret logic does the right thing when restoring it. However, |
116 | * such as system calls, the processor is the one to push eflags onto the | 139 | * when the Host sets the Guest up for direct traps, such as system calls, the |
117 | * stack, and the interrupt bit will be 1 (in reality, interrupts are always | 140 | * processor is the one to push eflags onto the stack, and the interrupt bit |
118 | * enabled in the Guest). | 141 | * will be 1 (in reality, interrupts are always enabled in the Guest). |
119 | * | 142 | * |
120 | * This turns out to be harmless: the only trap which should happen under Linux | 143 | * This turns out to be harmless: the only trap which should happen under Linux |
121 | * with interrupts disabled is Page Fault (due to our lazy mapping of vmalloc | 144 | * with interrupts disabled is Page Fault (due to our lazy mapping of vmalloc |
122 | * regions), which has to be reflected through the Host anyway. If another | 145 | * regions), which has to be reflected through the Host anyway. If another |
123 | * trap *does* go off when interrupts are disabled, the Guest will panic, and | 146 | * trap *does* go off when interrupts are disabled, the Guest will panic, and |
124 | * we'll never get to this iret! :*/ | 147 | * we'll never get to this iret! |
148 | :*/ | ||
125 | 149 | ||
126 | /*G:045 There is one final paravirt_op that the Guest implements, and glancing | 150 | /*G:045 |
127 | * at it you can see why I left it to last. It's *cool*! It's in *assembler*! | 151 | * There is one final paravirt_op that the Guest implements, and glancing at it |
152 | * you can see why I left it to last. It's *cool*! It's in *assembler*! | ||
128 | * | 153 | * |
129 | * The "iret" instruction is used to return from an interrupt or trap. The | 154 | * The "iret" instruction is used to return from an interrupt or trap. The |
130 | * stack looks like this: | 155 | * stack looks like this: |
@@ -148,15 +173,18 @@ ENTRY(lg_restore_fl) | |||
148 | * return to userspace or wherever. Our solution to this is to surround the | 173 | * return to userspace or wherever. Our solution to this is to surround the |
149 | * code with lguest_noirq_start: and lguest_noirq_end: labels. We tell the | 174 | * code with lguest_noirq_start: and lguest_noirq_end: labels. We tell the |
150 | * Host that it is *never* to interrupt us there, even if interrupts seem to be | 175 | * Host that it is *never* to interrupt us there, even if interrupts seem to be |
151 | * enabled. */ | 176 | * enabled. |
177 | */ | ||
152 | ENTRY(lguest_iret) | 178 | ENTRY(lguest_iret) |
153 | pushl %eax | 179 | pushl %eax |
154 | movl 12(%esp), %eax | 180 | movl 12(%esp), %eax |
155 | lguest_noirq_start: | 181 | lguest_noirq_start: |
156 | /* Note the %ss: segment prefix here. Normal data accesses use the | 182 | /* |
183 | * Note the %ss: segment prefix here. Normal data accesses use the | ||
157 | * "ds" segment, but that will have already been restored for whatever | 184 | * "ds" segment, but that will have already been restored for whatever |
158 | * we're returning to (such as userspace): we can't trust it. The %ss: | 185 | * we're returning to (such as userspace): we can't trust it. The %ss: |
159 | * prefix makes sure we use the stack segment, which is still valid. */ | 186 | * prefix makes sure we use the stack segment, which is still valid. |
187 | */ | ||
160 | movl %eax,%ss:lguest_data+LGUEST_DATA_irq_enabled | 188 | movl %eax,%ss:lguest_data+LGUEST_DATA_irq_enabled |
161 | popl %eax | 189 | popl %eax |
162 | iret | 190 | iret |
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index a6974e9b8ebf..1e2cb846b3c9 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c | |||
@@ -1,6 +1,8 @@ | |||
1 | /*P:400 This contains run_guest() which actually calls into the Host<->Guest | 1 | /*P:400 |
2 | * This contains run_guest() which actually calls into the Host<->Guest | ||
2 | * Switcher and analyzes the return, such as determining if the Guest wants the | 3 | * Switcher and analyzes the return, such as determining if the Guest wants the |
3 | * Host to do something. This file also contains useful helper routines. :*/ | 4 | * Host to do something. This file also contains useful helper routines. |
5 | :*/ | ||
4 | #include <linux/module.h> | 6 | #include <linux/module.h> |
5 | #include <linux/stringify.h> | 7 | #include <linux/stringify.h> |
6 | #include <linux/stddef.h> | 8 | #include <linux/stddef.h> |
@@ -24,7 +26,8 @@ static struct page **switcher_page; | |||
24 | /* This One Big lock protects all inter-guest data structures. */ | 26 | /* This One Big lock protects all inter-guest data structures. */ |
25 | DEFINE_MUTEX(lguest_lock); | 27 | DEFINE_MUTEX(lguest_lock); |
26 | 28 | ||
27 | /*H:010 We need to set up the Switcher at a high virtual address. Remember the | 29 | /*H:010 |
30 | * We need to set up the Switcher at a high virtual address. Remember the | ||
28 | * Switcher is a few hundred bytes of assembler code which actually changes the | 31 | * Switcher is a few hundred bytes of assembler code which actually changes the |
29 | * CPU to run the Guest, and then changes back to the Host when a trap or | 32 | * CPU to run the Guest, and then changes back to the Host when a trap or |
30 | * interrupt happens. | 33 | * interrupt happens. |
@@ -33,7 +36,8 @@ DEFINE_MUTEX(lguest_lock); | |||
33 | * Host since it will be running as the switchover occurs. | 36 | * Host since it will be running as the switchover occurs. |
34 | * | 37 | * |
35 | * Trying to map memory at a particular address is an unusual thing to do, so | 38 | * Trying to map memory at a particular address is an unusual thing to do, so |
36 | * it's not a simple one-liner. */ | 39 | * it's not a simple one-liner. |
40 | */ | ||
37 | static __init int map_switcher(void) | 41 | static __init int map_switcher(void) |
38 | { | 42 | { |
39 | int i, err; | 43 | int i, err; |
@@ -47,8 +51,10 @@ static __init int map_switcher(void) | |||
47 | * easy. | 51 | * easy. |
48 | */ | 52 | */ |
49 | 53 | ||
50 | /* We allocate an array of struct page pointers. map_vm_area() wants | 54 | /* |
51 | * this, rather than just an array of pages. */ | 55 | * We allocate an array of struct page pointers. map_vm_area() wants |
56 | * this, rather than just an array of pages. | ||
57 | */ | ||
52 | switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES, | 58 | switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES, |
53 | GFP_KERNEL); | 59 | GFP_KERNEL); |
54 | if (!switcher_page) { | 60 | if (!switcher_page) { |
@@ -56,8 +62,10 @@ static __init int map_switcher(void) | |||
56 | goto out; | 62 | goto out; |
57 | } | 63 | } |
58 | 64 | ||
59 | /* Now we actually allocate the pages. The Guest will see these pages, | 65 | /* |
60 | * so we make sure they're zeroed. */ | 66 | * Now we actually allocate the pages. The Guest will see these pages, |
67 | * so we make sure they're zeroed. | ||
68 | */ | ||
61 | for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) { | 69 | for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) { |
62 | unsigned long addr = get_zeroed_page(GFP_KERNEL); | 70 | unsigned long addr = get_zeroed_page(GFP_KERNEL); |
63 | if (!addr) { | 71 | if (!addr) { |
@@ -67,19 +75,23 @@ static __init int map_switcher(void) | |||
67 | switcher_page[i] = virt_to_page(addr); | 75 | switcher_page[i] = virt_to_page(addr); |
68 | } | 76 | } |
69 | 77 | ||
70 | /* First we check that the Switcher won't overlap the fixmap area at | 78 | /* |
79 | * First we check that the Switcher won't overlap the fixmap area at | ||
71 | * the top of memory. It's currently nowhere near, but it could have | 80 | * the top of memory. It's currently nowhere near, but it could have |
72 | * very strange effects if it ever happened. */ | 81 | * very strange effects if it ever happened. |
82 | */ | ||
73 | if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){ | 83 | if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){ |
74 | err = -ENOMEM; | 84 | err = -ENOMEM; |
75 | printk("lguest: mapping switcher would thwack fixmap\n"); | 85 | printk("lguest: mapping switcher would thwack fixmap\n"); |
76 | goto free_pages; | 86 | goto free_pages; |
77 | } | 87 | } |
78 | 88 | ||
79 | /* Now we reserve the "virtual memory area" we want: 0xFFC00000 | 89 | /* |
90 | * Now we reserve the "virtual memory area" we want: 0xFFC00000 | ||
80 | * (SWITCHER_ADDR). We might not get it in theory, but in practice | 91 | * (SWITCHER_ADDR). We might not get it in theory, but in practice |
81 | * it's worked so far. The end address needs +1 because __get_vm_area | 92 | * it's worked so far. The end address needs +1 because __get_vm_area |
82 | * allocates an extra guard page, so we need space for that. */ | 93 | * allocates an extra guard page, so we need space for that. |
94 | */ | ||
83 | switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, | 95 | switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, |
84 | VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR | 96 | VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR |
85 | + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); | 97 | + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); |
@@ -89,11 +101,13 @@ static __init int map_switcher(void) | |||
89 | goto free_pages; | 101 | goto free_pages; |
90 | } | 102 | } |
91 | 103 | ||
92 | /* This code actually sets up the pages we've allocated to appear at | 104 | /* |
105 | * This code actually sets up the pages we've allocated to appear at | ||
93 | * SWITCHER_ADDR. map_vm_area() takes the vma we allocated above, the | 106 | * SWITCHER_ADDR. map_vm_area() takes the vma we allocated above, the |
94 | * kind of pages we're mapping (kernel pages), and a pointer to our | 107 | * kind of pages we're mapping (kernel pages), and a pointer to our |
95 | * array of struct pages. It increments that pointer, but we don't | 108 | * array of struct pages. It increments that pointer, but we don't |
96 | * care. */ | 109 | * care. |
110 | */ | ||
97 | pagep = switcher_page; | 111 | pagep = switcher_page; |
98 | err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep); | 112 | err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep); |
99 | if (err) { | 113 | if (err) { |
@@ -101,8 +115,10 @@ static __init int map_switcher(void) | |||
101 | goto free_vma; | 115 | goto free_vma; |
102 | } | 116 | } |
103 | 117 | ||
104 | /* Now the Switcher is mapped at the right address, we can't fail! | 118 | /* |
105 | * Copy in the compiled-in Switcher code (from <arch>_switcher.S). */ | 119 | * Now the Switcher is mapped at the right address, we can't fail! |
120 | * Copy in the compiled-in Switcher code (from <arch>_switcher.S). | ||
121 | */ | ||
106 | memcpy(switcher_vma->addr, start_switcher_text, | 122 | memcpy(switcher_vma->addr, start_switcher_text, |
107 | end_switcher_text - start_switcher_text); | 123 | end_switcher_text - start_switcher_text); |
108 | 124 | ||
@@ -124,8 +140,7 @@ out: | |||
124 | } | 140 | } |
125 | /*:*/ | 141 | /*:*/ |
126 | 142 | ||
127 | /* Cleaning up the mapping when the module is unloaded is almost... | 143 | /* Cleaning up the mapping when the module is unloaded is almost... too easy. */ |
128 | * too easy. */ | ||
129 | static void unmap_switcher(void) | 144 | static void unmap_switcher(void) |
130 | { | 145 | { |
131 | unsigned int i; | 146 | unsigned int i; |
@@ -151,16 +166,19 @@ static void unmap_switcher(void) | |||
151 | * But we can't trust the Guest: it might be trying to access the Launcher | 166 | * But we can't trust the Guest: it might be trying to access the Launcher |
152 | * code. We have to check that the range is below the pfn_limit the Launcher | 167 | * code. We have to check that the range is below the pfn_limit the Launcher |
153 | * gave us. We have to make sure that addr + len doesn't give us a false | 168 | * gave us. We have to make sure that addr + len doesn't give us a false |
154 | * positive by overflowing, too. */ | 169 | * positive by overflowing, too. |
170 | */ | ||
155 | bool lguest_address_ok(const struct lguest *lg, | 171 | bool lguest_address_ok(const struct lguest *lg, |
156 | unsigned long addr, unsigned long len) | 172 | unsigned long addr, unsigned long len) |
157 | { | 173 | { |
158 | return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); | 174 | return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); |
159 | } | 175 | } |
160 | 176 | ||
161 | /* This routine copies memory from the Guest. Here we can see how useful the | 177 | /* |
178 | * This routine copies memory from the Guest. Here we can see how useful the | ||
162 | * kill_lguest() routine we met in the Launcher can be: we return a random | 179 | * kill_lguest() routine we met in the Launcher can be: we return a random |
163 | * value (all zeroes) instead of needing to return an error. */ | 180 | * value (all zeroes) instead of needing to return an error. |
181 | */ | ||
164 | void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes) | 182 | void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes) |
165 | { | 183 | { |
166 | if (!lguest_address_ok(cpu->lg, addr, bytes) | 184 | if (!lguest_address_ok(cpu->lg, addr, bytes) |
@@ -181,9 +199,11 @@ void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b, | |||
181 | } | 199 | } |
182 | /*:*/ | 200 | /*:*/ |
183 | 201 | ||
184 | /*H:030 Let's jump straight to the the main loop which runs the Guest. | 202 | /*H:030 |
203 | * Let's jump straight to the the main loop which runs the Guest. | ||
185 | * Remember, this is called by the Launcher reading /dev/lguest, and we keep | 204 | * Remember, this is called by the Launcher reading /dev/lguest, and we keep |
186 | * going around and around until something interesting happens. */ | 205 | * going around and around until something interesting happens. |
206 | */ | ||
187 | int run_guest(struct lg_cpu *cpu, unsigned long __user *user) | 207 | int run_guest(struct lg_cpu *cpu, unsigned long __user *user) |
188 | { | 208 | { |
189 | /* We stop running once the Guest is dead. */ | 209 | /* We stop running once the Guest is dead. */ |
@@ -195,10 +215,17 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) | |||
195 | if (cpu->hcall) | 215 | if (cpu->hcall) |
196 | do_hypercalls(cpu); | 216 | do_hypercalls(cpu); |
197 | 217 | ||
198 | /* It's possible the Guest did a NOTIFY hypercall to the | 218 | /* |
199 | * Launcher, in which case we return from the read() now. */ | 219 | * It's possible the Guest did a NOTIFY hypercall to the |
220 | * Launcher. | ||
221 | */ | ||
200 | if (cpu->pending_notify) { | 222 | if (cpu->pending_notify) { |
223 | /* | ||
224 | * Does it just needs to write to a registered | ||
225 | * eventfd (ie. the appropriate virtqueue thread)? | ||
226 | */ | ||
201 | if (!send_notify_to_eventfd(cpu)) { | 227 | if (!send_notify_to_eventfd(cpu)) { |
228 | /* OK, we tell the main Laucher. */ | ||
202 | if (put_user(cpu->pending_notify, user)) | 229 | if (put_user(cpu->pending_notify, user)) |
203 | return -EFAULT; | 230 | return -EFAULT; |
204 | return sizeof(cpu->pending_notify); | 231 | return sizeof(cpu->pending_notify); |
@@ -209,29 +236,39 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) | |||
209 | if (signal_pending(current)) | 236 | if (signal_pending(current)) |
210 | return -ERESTARTSYS; | 237 | return -ERESTARTSYS; |
211 | 238 | ||
212 | /* Check if there are any interrupts which can be delivered now: | 239 | /* |
240 | * Check if there are any interrupts which can be delivered now: | ||
213 | * if so, this sets up the hander to be executed when we next | 241 | * if so, this sets up the hander to be executed when we next |
214 | * run the Guest. */ | 242 | * run the Guest. |
243 | */ | ||
215 | irq = interrupt_pending(cpu, &more); | 244 | irq = interrupt_pending(cpu, &more); |
216 | if (irq < LGUEST_IRQS) | 245 | if (irq < LGUEST_IRQS) |
217 | try_deliver_interrupt(cpu, irq, more); | 246 | try_deliver_interrupt(cpu, irq, more); |
218 | 247 | ||
219 | /* All long-lived kernel loops need to check with this horrible | 248 | /* |
249 | * All long-lived kernel loops need to check with this horrible | ||
220 | * thing called the freezer. If the Host is trying to suspend, | 250 | * thing called the freezer. If the Host is trying to suspend, |
221 | * it stops us. */ | 251 | * it stops us. |
252 | */ | ||
222 | try_to_freeze(); | 253 | try_to_freeze(); |
223 | 254 | ||
224 | /* Just make absolutely sure the Guest is still alive. One of | 255 | /* |
225 | * those hypercalls could have been fatal, for example. */ | 256 | * Just make absolutely sure the Guest is still alive. One of |
257 | * those hypercalls could have been fatal, for example. | ||
258 | */ | ||
226 | if (cpu->lg->dead) | 259 | if (cpu->lg->dead) |
227 | break; | 260 | break; |
228 | 261 | ||
229 | /* If the Guest asked to be stopped, we sleep. The Guest's | 262 | /* |
230 | * clock timer will wake us. */ | 263 | * If the Guest asked to be stopped, we sleep. The Guest's |
264 | * clock timer will wake us. | ||
265 | */ | ||
231 | if (cpu->halted) { | 266 | if (cpu->halted) { |
232 | set_current_state(TASK_INTERRUPTIBLE); | 267 | set_current_state(TASK_INTERRUPTIBLE); |
233 | /* Just before we sleep, make sure no interrupt snuck in | 268 | /* |
234 | * which we should be doing. */ | 269 | * Just before we sleep, make sure no interrupt snuck in |
270 | * which we should be doing. | ||
271 | */ | ||
235 | if (interrupt_pending(cpu, &more) < LGUEST_IRQS) | 272 | if (interrupt_pending(cpu, &more) < LGUEST_IRQS) |
236 | set_current_state(TASK_RUNNING); | 273 | set_current_state(TASK_RUNNING); |
237 | else | 274 | else |
@@ -239,8 +276,10 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) | |||
239 | continue; | 276 | continue; |
240 | } | 277 | } |
241 | 278 | ||
242 | /* OK, now we're ready to jump into the Guest. First we put up | 279 | /* |
243 | * the "Do Not Disturb" sign: */ | 280 | * OK, now we're ready to jump into the Guest. First we put up |
281 | * the "Do Not Disturb" sign: | ||
282 | */ | ||
244 | local_irq_disable(); | 283 | local_irq_disable(); |
245 | 284 | ||
246 | /* Actually run the Guest until something happens. */ | 285 | /* Actually run the Guest until something happens. */ |
@@ -327,8 +366,10 @@ static void __exit fini(void) | |||
327 | } | 366 | } |
328 | /*:*/ | 367 | /*:*/ |
329 | 368 | ||
330 | /* The Host side of lguest can be a module. This is a nice way for people to | 369 | /* |
331 | * play with it. */ | 370 | * The Host side of lguest can be a module. This is a nice way for people to |
371 | * play with it. | ||
372 | */ | ||
332 | module_init(init); | 373 | module_init(init); |
333 | module_exit(fini); | 374 | module_exit(fini); |
334 | MODULE_LICENSE("GPL"); | 375 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c index c29ffa19cb74..83511eb0923d 100644 --- a/drivers/lguest/hypercalls.c +++ b/drivers/lguest/hypercalls.c | |||
@@ -1,8 +1,10 @@ | |||
1 | /*P:500 Just as userspace programs request kernel operations through a system | 1 | /*P:500 |
2 | * Just as userspace programs request kernel operations through a system | ||
2 | * call, the Guest requests Host operations through a "hypercall". You might | 3 | * call, the Guest requests Host operations through a "hypercall". You might |
3 | * notice this nomenclature doesn't really follow any logic, but the name has | 4 | * notice this nomenclature doesn't really follow any logic, but the name has |
4 | * been around for long enough that we're stuck with it. As you'd expect, this | 5 | * been around for long enough that we're stuck with it. As you'd expect, this |
5 | * code is basically a one big switch statement. :*/ | 6 | * code is basically a one big switch statement. |
7 | :*/ | ||
6 | 8 | ||
7 | /* Copyright (C) 2006 Rusty Russell IBM Corporation | 9 | /* Copyright (C) 2006 Rusty Russell IBM Corporation |
8 | 10 | ||
@@ -28,30 +30,41 @@ | |||
28 | #include <asm/pgtable.h> | 30 | #include <asm/pgtable.h> |
29 | #include "lg.h" | 31 | #include "lg.h" |
30 | 32 | ||
31 | /*H:120 This is the core hypercall routine: where the Guest gets what it wants. | 33 | /*H:120 |
32 | * Or gets killed. Or, in the case of LHCALL_SHUTDOWN, both. */ | 34 | * This is the core hypercall routine: where the Guest gets what it wants. |
35 | * Or gets killed. Or, in the case of LHCALL_SHUTDOWN, both. | ||
36 | */ | ||
33 | static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) | 37 | static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) |
34 | { | 38 | { |
35 | switch (args->arg0) { | 39 | switch (args->arg0) { |
36 | case LHCALL_FLUSH_ASYNC: | 40 | case LHCALL_FLUSH_ASYNC: |
37 | /* This call does nothing, except by breaking out of the Guest | 41 | /* |
38 | * it makes us process all the asynchronous hypercalls. */ | 42 | * This call does nothing, except by breaking out of the Guest |
43 | * it makes us process all the asynchronous hypercalls. | ||
44 | */ | ||
39 | break; | 45 | break; |
40 | case LHCALL_SEND_INTERRUPTS: | 46 | case LHCALL_SEND_INTERRUPTS: |
41 | /* This call does nothing too, but by breaking out of the Guest | 47 | /* |
42 | * it makes us process any pending interrupts. */ | 48 | * This call does nothing too, but by breaking out of the Guest |
49 | * it makes us process any pending interrupts. | ||
50 | */ | ||
43 | break; | 51 | break; |
44 | case LHCALL_LGUEST_INIT: | 52 | case LHCALL_LGUEST_INIT: |
45 | /* You can't get here unless you're already initialized. Don't | 53 | /* |
46 | * do that. */ | 54 | * You can't get here unless you're already initialized. Don't |
55 | * do that. | ||
56 | */ | ||
47 | kill_guest(cpu, "already have lguest_data"); | 57 | kill_guest(cpu, "already have lguest_data"); |
48 | break; | 58 | break; |
49 | case LHCALL_SHUTDOWN: { | 59 | case LHCALL_SHUTDOWN: { |
50 | /* Shutdown is such a trivial hypercall that we do it in four | ||
51 | * lines right here. */ | ||
52 | char msg[128]; | 60 | char msg[128]; |
53 | /* If the lgread fails, it will call kill_guest() itself; the | 61 | /* |
54 | * kill_guest() with the message will be ignored. */ | 62 | * Shutdown is such a trivial hypercall that we do it in five |
63 | * lines right here. | ||
64 | * | ||
65 | * If the lgread fails, it will call kill_guest() itself; the | ||
66 | * kill_guest() with the message will be ignored. | ||
67 | */ | ||
55 | __lgread(cpu, msg, args->arg1, sizeof(msg)); | 68 | __lgread(cpu, msg, args->arg1, sizeof(msg)); |
56 | msg[sizeof(msg)-1] = '\0'; | 69 | msg[sizeof(msg)-1] = '\0'; |
57 | kill_guest(cpu, "CRASH: %s", msg); | 70 | kill_guest(cpu, "CRASH: %s", msg); |
@@ -60,16 +73,17 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) | |||
60 | break; | 73 | break; |
61 | } | 74 | } |
62 | case LHCALL_FLUSH_TLB: | 75 | case LHCALL_FLUSH_TLB: |
63 | /* FLUSH_TLB comes in two flavors, depending on the | 76 | /* FLUSH_TLB comes in two flavors, depending on the argument: */ |
64 | * argument: */ | ||
65 | if (args->arg1) | 77 | if (args->arg1) |
66 | guest_pagetable_clear_all(cpu); | 78 | guest_pagetable_clear_all(cpu); |
67 | else | 79 | else |
68 | guest_pagetable_flush_user(cpu); | 80 | guest_pagetable_flush_user(cpu); |
69 | break; | 81 | break; |
70 | 82 | ||
71 | /* All these calls simply pass the arguments through to the right | 83 | /* |
72 | * routines. */ | 84 | * All these calls simply pass the arguments through to the right |
85 | * routines. | ||
86 | */ | ||
73 | case LHCALL_NEW_PGTABLE: | 87 | case LHCALL_NEW_PGTABLE: |
74 | guest_new_pagetable(cpu, args->arg1); | 88 | guest_new_pagetable(cpu, args->arg1); |
75 | break; | 89 | break; |
@@ -112,15 +126,16 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) | |||
112 | kill_guest(cpu, "Bad hypercall %li\n", args->arg0); | 126 | kill_guest(cpu, "Bad hypercall %li\n", args->arg0); |
113 | } | 127 | } |
114 | } | 128 | } |
115 | /*:*/ | ||
116 | 129 | ||
117 | /*H:124 Asynchronous hypercalls are easy: we just look in the array in the | 130 | /*H:124 |
131 | * Asynchronous hypercalls are easy: we just look in the array in the | ||
118 | * Guest's "struct lguest_data" to see if any new ones are marked "ready". | 132 | * Guest's "struct lguest_data" to see if any new ones are marked "ready". |
119 | * | 133 | * |
120 | * We are careful to do these in order: obviously we respect the order the | 134 | * We are careful to do these in order: obviously we respect the order the |
121 | * Guest put them in the ring, but we also promise the Guest that they will | 135 | * Guest put them in the ring, but we also promise the Guest that they will |
122 | * happen before any normal hypercall (which is why we check this before | 136 | * happen before any normal hypercall (which is why we check this before |
123 | * checking for a normal hcall). */ | 137 | * checking for a normal hcall). |
138 | */ | ||
124 | static void do_async_hcalls(struct lg_cpu *cpu) | 139 | static void do_async_hcalls(struct lg_cpu *cpu) |
125 | { | 140 | { |
126 | unsigned int i; | 141 | unsigned int i; |
@@ -133,22 +148,28 @@ static void do_async_hcalls(struct lg_cpu *cpu) | |||
133 | /* We process "struct lguest_data"s hcalls[] ring once. */ | 148 | /* We process "struct lguest_data"s hcalls[] ring once. */ |
134 | for (i = 0; i < ARRAY_SIZE(st); i++) { | 149 | for (i = 0; i < ARRAY_SIZE(st); i++) { |
135 | struct hcall_args args; | 150 | struct hcall_args args; |
136 | /* We remember where we were up to from last time. This makes | 151 | /* |
152 | * We remember where we were up to from last time. This makes | ||
137 | * sure that the hypercalls are done in the order the Guest | 153 | * sure that the hypercalls are done in the order the Guest |
138 | * places them in the ring. */ | 154 | * places them in the ring. |
155 | */ | ||
139 | unsigned int n = cpu->next_hcall; | 156 | unsigned int n = cpu->next_hcall; |
140 | 157 | ||
141 | /* 0xFF means there's no call here (yet). */ | 158 | /* 0xFF means there's no call here (yet). */ |
142 | if (st[n] == 0xFF) | 159 | if (st[n] == 0xFF) |
143 | break; | 160 | break; |
144 | 161 | ||
145 | /* OK, we have hypercall. Increment the "next_hcall" cursor, | 162 | /* |
146 | * and wrap back to 0 if we reach the end. */ | 163 | * OK, we have hypercall. Increment the "next_hcall" cursor, |
164 | * and wrap back to 0 if we reach the end. | ||
165 | */ | ||
147 | if (++cpu->next_hcall == LHCALL_RING_SIZE) | 166 | if (++cpu->next_hcall == LHCALL_RING_SIZE) |
148 | cpu->next_hcall = 0; | 167 | cpu->next_hcall = 0; |
149 | 168 | ||
150 | /* Copy the hypercall arguments into a local copy of | 169 | /* |
151 | * the hcall_args struct. */ | 170 | * Copy the hypercall arguments into a local copy of the |
171 | * hcall_args struct. | ||
172 | */ | ||
152 | if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n], | 173 | if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n], |
153 | sizeof(struct hcall_args))) { | 174 | sizeof(struct hcall_args))) { |
154 | kill_guest(cpu, "Fetching async hypercalls"); | 175 | kill_guest(cpu, "Fetching async hypercalls"); |
@@ -164,19 +185,25 @@ static void do_async_hcalls(struct lg_cpu *cpu) | |||
164 | break; | 185 | break; |
165 | } | 186 | } |
166 | 187 | ||
167 | /* Stop doing hypercalls if they want to notify the Launcher: | 188 | /* |
168 | * it needs to service this first. */ | 189 | * Stop doing hypercalls if they want to notify the Launcher: |
190 | * it needs to service this first. | ||
191 | */ | ||
169 | if (cpu->pending_notify) | 192 | if (cpu->pending_notify) |
170 | break; | 193 | break; |
171 | } | 194 | } |
172 | } | 195 | } |
173 | 196 | ||
174 | /* Last of all, we look at what happens first of all. The very first time the | 197 | /* |
175 | * Guest makes a hypercall, we end up here to set things up: */ | 198 | * Last of all, we look at what happens first of all. The very first time the |
199 | * Guest makes a hypercall, we end up here to set things up: | ||
200 | */ | ||
176 | static void initialize(struct lg_cpu *cpu) | 201 | static void initialize(struct lg_cpu *cpu) |
177 | { | 202 | { |
178 | /* You can't do anything until you're initialized. The Guest knows the | 203 | /* |
179 | * rules, so we're unforgiving here. */ | 204 | * You can't do anything until you're initialized. The Guest knows the |
205 | * rules, so we're unforgiving here. | ||
206 | */ | ||
180 | if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) { | 207 | if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) { |
181 | kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0); | 208 | kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0); |
182 | return; | 209 | return; |
@@ -185,32 +212,44 @@ static void initialize(struct lg_cpu *cpu) | |||
185 | if (lguest_arch_init_hypercalls(cpu)) | 212 | if (lguest_arch_init_hypercalls(cpu)) |
186 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); | 213 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); |
187 | 214 | ||
188 | /* The Guest tells us where we're not to deliver interrupts by putting | 215 | /* |
189 | * the range of addresses into "struct lguest_data". */ | 216 | * The Guest tells us where we're not to deliver interrupts by putting |
217 | * the range of addresses into "struct lguest_data". | ||
218 | */ | ||
190 | if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start) | 219 | if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start) |
191 | || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end)) | 220 | || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end)) |
192 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); | 221 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); |
193 | 222 | ||
194 | /* We write the current time into the Guest's data page once so it can | 223 | /* |
195 | * set its clock. */ | 224 | * We write the current time into the Guest's data page once so it can |
225 | * set its clock. | ||
226 | */ | ||
196 | write_timestamp(cpu); | 227 | write_timestamp(cpu); |
197 | 228 | ||
198 | /* page_tables.c will also do some setup. */ | 229 | /* page_tables.c will also do some setup. */ |
199 | page_table_guest_data_init(cpu); | 230 | page_table_guest_data_init(cpu); |
200 | 231 | ||
201 | /* This is the one case where the above accesses might have been the | 232 | /* |
233 | * This is the one case where the above accesses might have been the | ||
202 | * first write to a Guest page. This may have caused a copy-on-write | 234 | * first write to a Guest page. This may have caused a copy-on-write |
203 | * fault, but the old page might be (read-only) in the Guest | 235 | * fault, but the old page might be (read-only) in the Guest |
204 | * pagetable. */ | 236 | * pagetable. |
237 | */ | ||
205 | guest_pagetable_clear_all(cpu); | 238 | guest_pagetable_clear_all(cpu); |
206 | } | 239 | } |
207 | /*:*/ | 240 | /*:*/ |
208 | 241 | ||
209 | /*M:013 If a Guest reads from a page (so creates a mapping) that it has never | 242 | /*M:013 |
243 | * If a Guest reads from a page (so creates a mapping) that it has never | ||
210 | * written to, and then the Launcher writes to it (ie. the output of a virtual | 244 | * written to, and then the Launcher writes to it (ie. the output of a virtual |
211 | * device), the Guest will still see the old page. In practice, this never | 245 | * device), the Guest will still see the old page. In practice, this never |
212 | * happens: why would the Guest read a page which it has never written to? But | 246 | * happens: why would the Guest read a page which it has never written to? But |
213 | * a similar scenario might one day bite us, so it's worth mentioning. :*/ | 247 | * a similar scenario might one day bite us, so it's worth mentioning. |
248 | * | ||
249 | * Note that if we used a shared anonymous mapping in the Launcher instead of | ||
250 | * mapping /dev/zero private, we wouldn't worry about cop-on-write. And we | ||
251 | * need that to switch the Launcher to processes (away from threads) anyway. | ||
252 | :*/ | ||
214 | 253 | ||
215 | /*H:100 | 254 | /*H:100 |
216 | * Hypercalls | 255 | * Hypercalls |
@@ -229,17 +268,22 @@ void do_hypercalls(struct lg_cpu *cpu) | |||
229 | return; | 268 | return; |
230 | } | 269 | } |
231 | 270 | ||
232 | /* The Guest has initialized. | 271 | /* |
272 | * The Guest has initialized. | ||
233 | * | 273 | * |
234 | * Look in the hypercall ring for the async hypercalls: */ | 274 | * Look in the hypercall ring for the async hypercalls: |
275 | */ | ||
235 | do_async_hcalls(cpu); | 276 | do_async_hcalls(cpu); |
236 | 277 | ||
237 | /* If we stopped reading the hypercall ring because the Guest did a | 278 | /* |
279 | * If we stopped reading the hypercall ring because the Guest did a | ||
238 | * NOTIFY to the Launcher, we want to return now. Otherwise we do | 280 | * NOTIFY to the Launcher, we want to return now. Otherwise we do |
239 | * the hypercall. */ | 281 | * the hypercall. |
282 | */ | ||
240 | if (!cpu->pending_notify) { | 283 | if (!cpu->pending_notify) { |
241 | do_hcall(cpu, cpu->hcall); | 284 | do_hcall(cpu, cpu->hcall); |
242 | /* Tricky point: we reset the hcall pointer to mark the | 285 | /* |
286 | * Tricky point: we reset the hcall pointer to mark the | ||
243 | * hypercall as "done". We use the hcall pointer rather than | 287 | * hypercall as "done". We use the hcall pointer rather than |
244 | * the trap number to indicate a hypercall is pending. | 288 | * the trap number to indicate a hypercall is pending. |
245 | * Normally it doesn't matter: the Guest will run again and | 289 | * Normally it doesn't matter: the Guest will run again and |
@@ -248,13 +292,16 @@ void do_hypercalls(struct lg_cpu *cpu) | |||
248 | * However, if we are signalled or the Guest sends I/O to the | 292 | * However, if we are signalled or the Guest sends I/O to the |
249 | * Launcher, the run_guest() loop will exit without running the | 293 | * Launcher, the run_guest() loop will exit without running the |
250 | * Guest. When it comes back it would try to re-run the | 294 | * Guest. When it comes back it would try to re-run the |
251 | * hypercall. Finding that bug sucked. */ | 295 | * hypercall. Finding that bug sucked. |
296 | */ | ||
252 | cpu->hcall = NULL; | 297 | cpu->hcall = NULL; |
253 | } | 298 | } |
254 | } | 299 | } |
255 | 300 | ||
256 | /* This routine supplies the Guest with time: it's used for wallclock time at | 301 | /* |
257 | * initial boot and as a rough time source if the TSC isn't available. */ | 302 | * This routine supplies the Guest with time: it's used for wallclock time at |
303 | * initial boot and as a rough time source if the TSC isn't available. | ||
304 | */ | ||
258 | void write_timestamp(struct lg_cpu *cpu) | 305 | void write_timestamp(struct lg_cpu *cpu) |
259 | { | 306 | { |
260 | struct timespec now; | 307 | struct timespec now; |
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c index 0e9067b0d507..18648180db02 100644 --- a/drivers/lguest/interrupts_and_traps.c +++ b/drivers/lguest/interrupts_and_traps.c | |||
@@ -1,4 +1,5 @@ | |||
1 | /*P:800 Interrupts (traps) are complicated enough to earn their own file. | 1 | /*P:800 |
2 | * Interrupts (traps) are complicated enough to earn their own file. | ||
2 | * There are three classes of interrupts: | 3 | * There are three classes of interrupts: |
3 | * | 4 | * |
4 | * 1) Real hardware interrupts which occur while we're running the Guest, | 5 | * 1) Real hardware interrupts which occur while we're running the Guest, |
@@ -10,7 +11,8 @@ | |||
10 | * just like real hardware would deliver them. Traps from the Guest can be set | 11 | * just like real hardware would deliver them. Traps from the Guest can be set |
11 | * up to go directly back into the Guest, but sometimes the Host wants to see | 12 | * up to go directly back into the Guest, but sometimes the Host wants to see |
12 | * them first, so we also have a way of "reflecting" them into the Guest as if | 13 | * them first, so we also have a way of "reflecting" them into the Guest as if |
13 | * they had been delivered to it directly. :*/ | 14 | * they had been delivered to it directly. |
15 | :*/ | ||
14 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
15 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
16 | #include <linux/module.h> | 18 | #include <linux/module.h> |
@@ -26,8 +28,10 @@ static unsigned long idt_address(u32 lo, u32 hi) | |||
26 | return (lo & 0x0000FFFF) | (hi & 0xFFFF0000); | 28 | return (lo & 0x0000FFFF) | (hi & 0xFFFF0000); |
27 | } | 29 | } |
28 | 30 | ||
29 | /* The "type" of the interrupt handler is a 4 bit field: we only support a | 31 | /* |
30 | * couple of types. */ | 32 | * The "type" of the interrupt handler is a 4 bit field: we only support a |
33 | * couple of types. | ||
34 | */ | ||
31 | static int idt_type(u32 lo, u32 hi) | 35 | static int idt_type(u32 lo, u32 hi) |
32 | { | 36 | { |
33 | return (hi >> 8) & 0xF; | 37 | return (hi >> 8) & 0xF; |
@@ -39,8 +43,10 @@ static bool idt_present(u32 lo, u32 hi) | |||
39 | return (hi & 0x8000); | 43 | return (hi & 0x8000); |
40 | } | 44 | } |
41 | 45 | ||
42 | /* We need a helper to "push" a value onto the Guest's stack, since that's a | 46 | /* |
43 | * big part of what delivering an interrupt does. */ | 47 | * We need a helper to "push" a value onto the Guest's stack, since that's a |
48 | * big part of what delivering an interrupt does. | ||
49 | */ | ||
44 | static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) | 50 | static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) |
45 | { | 51 | { |
46 | /* Stack grows upwards: move stack then write value. */ | 52 | /* Stack grows upwards: move stack then write value. */ |
@@ -48,7 +54,8 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) | |||
48 | lgwrite(cpu, *gstack, u32, val); | 54 | lgwrite(cpu, *gstack, u32, val); |
49 | } | 55 | } |
50 | 56 | ||
51 | /*H:210 The set_guest_interrupt() routine actually delivers the interrupt or | 57 | /*H:210 |
58 | * The set_guest_interrupt() routine actually delivers the interrupt or | ||
52 | * trap. The mechanics of delivering traps and interrupts to the Guest are the | 59 | * trap. The mechanics of delivering traps and interrupts to the Guest are the |
53 | * same, except some traps have an "error code" which gets pushed onto the | 60 | * same, except some traps have an "error code" which gets pushed onto the |
54 | * stack as well: the caller tells us if this is one. | 61 | * stack as well: the caller tells us if this is one. |
@@ -59,7 +66,8 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) | |||
59 | * | 66 | * |
60 | * We set up the stack just like the CPU does for a real interrupt, so it's | 67 | * We set up the stack just like the CPU does for a real interrupt, so it's |
61 | * identical for the Guest (and the standard "iret" instruction will undo | 68 | * identical for the Guest (and the standard "iret" instruction will undo |
62 | * it). */ | 69 | * it). |
70 | */ | ||
63 | static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, | 71 | static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, |
64 | bool has_err) | 72 | bool has_err) |
65 | { | 73 | { |
@@ -67,20 +75,26 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, | |||
67 | u32 eflags, ss, irq_enable; | 75 | u32 eflags, ss, irq_enable; |
68 | unsigned long virtstack; | 76 | unsigned long virtstack; |
69 | 77 | ||
70 | /* There are two cases for interrupts: one where the Guest is already | 78 | /* |
79 | * There are two cases for interrupts: one where the Guest is already | ||
71 | * in the kernel, and a more complex one where the Guest is in | 80 | * in the kernel, and a more complex one where the Guest is in |
72 | * userspace. We check the privilege level to find out. */ | 81 | * userspace. We check the privilege level to find out. |
82 | */ | ||
73 | if ((cpu->regs->ss&0x3) != GUEST_PL) { | 83 | if ((cpu->regs->ss&0x3) != GUEST_PL) { |
74 | /* The Guest told us their kernel stack with the SET_STACK | 84 | /* |
75 | * hypercall: both the virtual address and the segment */ | 85 | * The Guest told us their kernel stack with the SET_STACK |
86 | * hypercall: both the virtual address and the segment. | ||
87 | */ | ||
76 | virtstack = cpu->esp1; | 88 | virtstack = cpu->esp1; |
77 | ss = cpu->ss1; | 89 | ss = cpu->ss1; |
78 | 90 | ||
79 | origstack = gstack = guest_pa(cpu, virtstack); | 91 | origstack = gstack = guest_pa(cpu, virtstack); |
80 | /* We push the old stack segment and pointer onto the new | 92 | /* |
93 | * We push the old stack segment and pointer onto the new | ||
81 | * stack: when the Guest does an "iret" back from the interrupt | 94 | * stack: when the Guest does an "iret" back from the interrupt |
82 | * handler the CPU will notice they're dropping privilege | 95 | * handler the CPU will notice they're dropping privilege |
83 | * levels and expect these here. */ | 96 | * levels and expect these here. |
97 | */ | ||
84 | push_guest_stack(cpu, &gstack, cpu->regs->ss); | 98 | push_guest_stack(cpu, &gstack, cpu->regs->ss); |
85 | push_guest_stack(cpu, &gstack, cpu->regs->esp); | 99 | push_guest_stack(cpu, &gstack, cpu->regs->esp); |
86 | } else { | 100 | } else { |
@@ -91,18 +105,22 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, | |||
91 | origstack = gstack = guest_pa(cpu, virtstack); | 105 | origstack = gstack = guest_pa(cpu, virtstack); |
92 | } | 106 | } |
93 | 107 | ||
94 | /* Remember that we never let the Guest actually disable interrupts, so | 108 | /* |
109 | * Remember that we never let the Guest actually disable interrupts, so | ||
95 | * the "Interrupt Flag" bit is always set. We copy that bit from the | 110 | * the "Interrupt Flag" bit is always set. We copy that bit from the |
96 | * Guest's "irq_enabled" field into the eflags word: we saw the Guest | 111 | * Guest's "irq_enabled" field into the eflags word: we saw the Guest |
97 | * copy it back in "lguest_iret". */ | 112 | * copy it back in "lguest_iret". |
113 | */ | ||
98 | eflags = cpu->regs->eflags; | 114 | eflags = cpu->regs->eflags; |
99 | if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0 | 115 | if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0 |
100 | && !(irq_enable & X86_EFLAGS_IF)) | 116 | && !(irq_enable & X86_EFLAGS_IF)) |
101 | eflags &= ~X86_EFLAGS_IF; | 117 | eflags &= ~X86_EFLAGS_IF; |
102 | 118 | ||
103 | /* An interrupt is expected to push three things on the stack: the old | 119 | /* |
120 | * An interrupt is expected to push three things on the stack: the old | ||
104 | * "eflags" word, the old code segment, and the old instruction | 121 | * "eflags" word, the old code segment, and the old instruction |
105 | * pointer. */ | 122 | * pointer. |
123 | */ | ||
106 | push_guest_stack(cpu, &gstack, eflags); | 124 | push_guest_stack(cpu, &gstack, eflags); |
107 | push_guest_stack(cpu, &gstack, cpu->regs->cs); | 125 | push_guest_stack(cpu, &gstack, cpu->regs->cs); |
108 | push_guest_stack(cpu, &gstack, cpu->regs->eip); | 126 | push_guest_stack(cpu, &gstack, cpu->regs->eip); |
@@ -111,15 +129,19 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, | |||
111 | if (has_err) | 129 | if (has_err) |
112 | push_guest_stack(cpu, &gstack, cpu->regs->errcode); | 130 | push_guest_stack(cpu, &gstack, cpu->regs->errcode); |
113 | 131 | ||
114 | /* Now we've pushed all the old state, we change the stack, the code | 132 | /* |
115 | * segment and the address to execute. */ | 133 | * Now we've pushed all the old state, we change the stack, the code |
134 | * segment and the address to execute. | ||
135 | */ | ||
116 | cpu->regs->ss = ss; | 136 | cpu->regs->ss = ss; |
117 | cpu->regs->esp = virtstack + (gstack - origstack); | 137 | cpu->regs->esp = virtstack + (gstack - origstack); |
118 | cpu->regs->cs = (__KERNEL_CS|GUEST_PL); | 138 | cpu->regs->cs = (__KERNEL_CS|GUEST_PL); |
119 | cpu->regs->eip = idt_address(lo, hi); | 139 | cpu->regs->eip = idt_address(lo, hi); |
120 | 140 | ||
121 | /* There are two kinds of interrupt handlers: 0xE is an "interrupt | 141 | /* |
122 | * gate" which expects interrupts to be disabled on entry. */ | 142 | * There are two kinds of interrupt handlers: 0xE is an "interrupt |
143 | * gate" which expects interrupts to be disabled on entry. | ||
144 | */ | ||
123 | if (idt_type(lo, hi) == 0xE) | 145 | if (idt_type(lo, hi) == 0xE) |
124 | if (put_user(0, &cpu->lg->lguest_data->irq_enabled)) | 146 | if (put_user(0, &cpu->lg->lguest_data->irq_enabled)) |
125 | kill_guest(cpu, "Disabling interrupts"); | 147 | kill_guest(cpu, "Disabling interrupts"); |
@@ -130,7 +152,8 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, | |||
130 | * | 152 | * |
131 | * interrupt_pending() returns the first pending interrupt which isn't blocked | 153 | * interrupt_pending() returns the first pending interrupt which isn't blocked |
132 | * by the Guest. It is called before every entry to the Guest, and just before | 154 | * by the Guest. It is called before every entry to the Guest, and just before |
133 | * we go to sleep when the Guest has halted itself. */ | 155 | * we go to sleep when the Guest has halted itself. |
156 | */ | ||
134 | unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) | 157 | unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) |
135 | { | 158 | { |
136 | unsigned int irq; | 159 | unsigned int irq; |
@@ -140,8 +163,10 @@ unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) | |||
140 | if (!cpu->lg->lguest_data) | 163 | if (!cpu->lg->lguest_data) |
141 | return LGUEST_IRQS; | 164 | return LGUEST_IRQS; |
142 | 165 | ||
143 | /* Take our "irqs_pending" array and remove any interrupts the Guest | 166 | /* |
144 | * wants blocked: the result ends up in "blk". */ | 167 | * Take our "irqs_pending" array and remove any interrupts the Guest |
168 | * wants blocked: the result ends up in "blk". | ||
169 | */ | ||
145 | if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts, | 170 | if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts, |
146 | sizeof(blk))) | 171 | sizeof(blk))) |
147 | return LGUEST_IRQS; | 172 | return LGUEST_IRQS; |
@@ -154,16 +179,20 @@ unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) | |||
154 | return irq; | 179 | return irq; |
155 | } | 180 | } |
156 | 181 | ||
157 | /* This actually diverts the Guest to running an interrupt handler, once an | 182 | /* |
158 | * interrupt has been identified by interrupt_pending(). */ | 183 | * This actually diverts the Guest to running an interrupt handler, once an |
184 | * interrupt has been identified by interrupt_pending(). | ||
185 | */ | ||
159 | void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) | 186 | void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) |
160 | { | 187 | { |
161 | struct desc_struct *idt; | 188 | struct desc_struct *idt; |
162 | 189 | ||
163 | BUG_ON(irq >= LGUEST_IRQS); | 190 | BUG_ON(irq >= LGUEST_IRQS); |
164 | 191 | ||
165 | /* They may be in the middle of an iret, where they asked us never to | 192 | /* |
166 | * deliver interrupts. */ | 193 | * They may be in the middle of an iret, where they asked us never to |
194 | * deliver interrupts. | ||
195 | */ | ||
167 | if (cpu->regs->eip >= cpu->lg->noirq_start && | 196 | if (cpu->regs->eip >= cpu->lg->noirq_start && |
168 | (cpu->regs->eip < cpu->lg->noirq_end)) | 197 | (cpu->regs->eip < cpu->lg->noirq_end)) |
169 | return; | 198 | return; |
@@ -187,29 +216,37 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) | |||
187 | } | 216 | } |
188 | } | 217 | } |
189 | 218 | ||
190 | /* Look at the IDT entry the Guest gave us for this interrupt. The | 219 | /* |
220 | * Look at the IDT entry the Guest gave us for this interrupt. The | ||
191 | * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip | 221 | * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip |
192 | * over them. */ | 222 | * over them. |
223 | */ | ||
193 | idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq]; | 224 | idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq]; |
194 | /* If they don't have a handler (yet?), we just ignore it */ | 225 | /* If they don't have a handler (yet?), we just ignore it */ |
195 | if (idt_present(idt->a, idt->b)) { | 226 | if (idt_present(idt->a, idt->b)) { |
196 | /* OK, mark it no longer pending and deliver it. */ | 227 | /* OK, mark it no longer pending and deliver it. */ |
197 | clear_bit(irq, cpu->irqs_pending); | 228 | clear_bit(irq, cpu->irqs_pending); |
198 | /* set_guest_interrupt() takes the interrupt descriptor and a | 229 | /* |
230 | * set_guest_interrupt() takes the interrupt descriptor and a | ||
199 | * flag to say whether this interrupt pushes an error code onto | 231 | * flag to say whether this interrupt pushes an error code onto |
200 | * the stack as well: virtual interrupts never do. */ | 232 | * the stack as well: virtual interrupts never do. |
233 | */ | ||
201 | set_guest_interrupt(cpu, idt->a, idt->b, false); | 234 | set_guest_interrupt(cpu, idt->a, idt->b, false); |
202 | } | 235 | } |
203 | 236 | ||
204 | /* Every time we deliver an interrupt, we update the timestamp in the | 237 | /* |
238 | * Every time we deliver an interrupt, we update the timestamp in the | ||
205 | * Guest's lguest_data struct. It would be better for the Guest if we | 239 | * Guest's lguest_data struct. It would be better for the Guest if we |
206 | * did this more often, but it can actually be quite slow: doing it | 240 | * did this more often, but it can actually be quite slow: doing it |
207 | * here is a compromise which means at least it gets updated every | 241 | * here is a compromise which means at least it gets updated every |
208 | * timer interrupt. */ | 242 | * timer interrupt. |
243 | */ | ||
209 | write_timestamp(cpu); | 244 | write_timestamp(cpu); |
210 | 245 | ||
211 | /* If there are no other interrupts we want to deliver, clear | 246 | /* |
212 | * the pending flag. */ | 247 | * If there are no other interrupts we want to deliver, clear |
248 | * the pending flag. | ||
249 | */ | ||
213 | if (!more) | 250 | if (!more) |
214 | put_user(0, &cpu->lg->lguest_data->irq_pending); | 251 | put_user(0, &cpu->lg->lguest_data->irq_pending); |
215 | } | 252 | } |
@@ -217,24 +254,29 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) | |||
217 | /* And this is the routine when we want to set an interrupt for the Guest. */ | 254 | /* And this is the routine when we want to set an interrupt for the Guest. */ |
218 | void set_interrupt(struct lg_cpu *cpu, unsigned int irq) | 255 | void set_interrupt(struct lg_cpu *cpu, unsigned int irq) |
219 | { | 256 | { |
220 | /* Next time the Guest runs, the core code will see if it can deliver | 257 | /* |
221 | * this interrupt. */ | 258 | * Next time the Guest runs, the core code will see if it can deliver |
259 | * this interrupt. | ||
260 | */ | ||
222 | set_bit(irq, cpu->irqs_pending); | 261 | set_bit(irq, cpu->irqs_pending); |
223 | 262 | ||
224 | /* Make sure it sees it; it might be asleep (eg. halted), or | 263 | /* |
225 | * running the Guest right now, in which case kick_process() | 264 | * Make sure it sees it; it might be asleep (eg. halted), or running |
226 | * will knock it out. */ | 265 | * the Guest right now, in which case kick_process() will knock it out. |
266 | */ | ||
227 | if (!wake_up_process(cpu->tsk)) | 267 | if (!wake_up_process(cpu->tsk)) |
228 | kick_process(cpu->tsk); | 268 | kick_process(cpu->tsk); |
229 | } | 269 | } |
230 | /*:*/ | 270 | /*:*/ |
231 | 271 | ||
232 | /* Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent | 272 | /* |
273 | * Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent | ||
233 | * me a patch, so we support that too. It'd be a big step for lguest if half | 274 | * me a patch, so we support that too. It'd be a big step for lguest if half |
234 | * the Plan 9 user base were to start using it. | 275 | * the Plan 9 user base were to start using it. |
235 | * | 276 | * |
236 | * Actually now I think of it, it's possible that Ron *is* half the Plan 9 | 277 | * Actually now I think of it, it's possible that Ron *is* half the Plan 9 |
237 | * userbase. Oh well. */ | 278 | * userbase. Oh well. |
279 | */ | ||
238 | static bool could_be_syscall(unsigned int num) | 280 | static bool could_be_syscall(unsigned int num) |
239 | { | 281 | { |
240 | /* Normal Linux SYSCALL_VECTOR or reserved vector? */ | 282 | /* Normal Linux SYSCALL_VECTOR or reserved vector? */ |
@@ -274,9 +316,11 @@ void free_interrupts(void) | |||
274 | clear_bit(syscall_vector, used_vectors); | 316 | clear_bit(syscall_vector, used_vectors); |
275 | } | 317 | } |
276 | 318 | ||
277 | /*H:220 Now we've got the routines to deliver interrupts, delivering traps like | 319 | /*H:220 |
320 | * Now we've got the routines to deliver interrupts, delivering traps like | ||
278 | * page fault is easy. The only trick is that Intel decided that some traps | 321 | * page fault is easy. The only trick is that Intel decided that some traps |
279 | * should have error codes: */ | 322 | * should have error codes: |
323 | */ | ||
280 | static bool has_err(unsigned int trap) | 324 | static bool has_err(unsigned int trap) |
281 | { | 325 | { |
282 | return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17); | 326 | return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17); |
@@ -285,13 +329,17 @@ static bool has_err(unsigned int trap) | |||
285 | /* deliver_trap() returns true if it could deliver the trap. */ | 329 | /* deliver_trap() returns true if it could deliver the trap. */ |
286 | bool deliver_trap(struct lg_cpu *cpu, unsigned int num) | 330 | bool deliver_trap(struct lg_cpu *cpu, unsigned int num) |
287 | { | 331 | { |
288 | /* Trap numbers are always 8 bit, but we set an impossible trap number | 332 | /* |
289 | * for traps inside the Switcher, so check that here. */ | 333 | * Trap numbers are always 8 bit, but we set an impossible trap number |
334 | * for traps inside the Switcher, so check that here. | ||
335 | */ | ||
290 | if (num >= ARRAY_SIZE(cpu->arch.idt)) | 336 | if (num >= ARRAY_SIZE(cpu->arch.idt)) |
291 | return false; | 337 | return false; |
292 | 338 | ||
293 | /* Early on the Guest hasn't set the IDT entries (or maybe it put a | 339 | /* |
294 | * bogus one in): if we fail here, the Guest will be killed. */ | 340 | * Early on the Guest hasn't set the IDT entries (or maybe it put a |
341 | * bogus one in): if we fail here, the Guest will be killed. | ||
342 | */ | ||
295 | if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b)) | 343 | if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b)) |
296 | return false; | 344 | return false; |
297 | set_guest_interrupt(cpu, cpu->arch.idt[num].a, | 345 | set_guest_interrupt(cpu, cpu->arch.idt[num].a, |
@@ -299,7 +347,8 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num) | |||
299 | return true; | 347 | return true; |
300 | } | 348 | } |
301 | 349 | ||
302 | /*H:250 Here's the hard part: returning to the Host every time a trap happens | 350 | /*H:250 |
351 | * Here's the hard part: returning to the Host every time a trap happens | ||
303 | * and then calling deliver_trap() and re-entering the Guest is slow. | 352 | * and then calling deliver_trap() and re-entering the Guest is slow. |
304 | * Particularly because Guest userspace system calls are traps (usually trap | 353 | * Particularly because Guest userspace system calls are traps (usually trap |
305 | * 128). | 354 | * 128). |
@@ -311,69 +360,87 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num) | |||
311 | * the other hypervisors would beat it up at lunchtime. | 360 | * the other hypervisors would beat it up at lunchtime. |
312 | * | 361 | * |
313 | * This routine indicates if a particular trap number could be delivered | 362 | * This routine indicates if a particular trap number could be delivered |
314 | * directly. */ | 363 | * directly. |
364 | */ | ||
315 | static bool direct_trap(unsigned int num) | 365 | static bool direct_trap(unsigned int num) |
316 | { | 366 | { |
317 | /* Hardware interrupts don't go to the Guest at all (except system | 367 | /* |
318 | * call). */ | 368 | * Hardware interrupts don't go to the Guest at all (except system |
369 | * call). | ||
370 | */ | ||
319 | if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num)) | 371 | if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num)) |
320 | return false; | 372 | return false; |
321 | 373 | ||
322 | /* The Host needs to see page faults (for shadow paging and to save the | 374 | /* |
375 | * The Host needs to see page faults (for shadow paging and to save the | ||
323 | * fault address), general protection faults (in/out emulation) and | 376 | * fault address), general protection faults (in/out emulation) and |
324 | * device not available (TS handling), invalid opcode fault (kvm hcall), | 377 | * device not available (TS handling), invalid opcode fault (kvm hcall), |
325 | * and of course, the hypercall trap. */ | 378 | * and of course, the hypercall trap. |
379 | */ | ||
326 | return num != 14 && num != 13 && num != 7 && | 380 | return num != 14 && num != 13 && num != 7 && |
327 | num != 6 && num != LGUEST_TRAP_ENTRY; | 381 | num != 6 && num != LGUEST_TRAP_ENTRY; |
328 | } | 382 | } |
329 | /*:*/ | 383 | /*:*/ |
330 | 384 | ||
331 | /*M:005 The Guest has the ability to turn its interrupt gates into trap gates, | 385 | /*M:005 |
386 | * The Guest has the ability to turn its interrupt gates into trap gates, | ||
332 | * if it is careful. The Host will let trap gates can go directly to the | 387 | * if it is careful. The Host will let trap gates can go directly to the |
333 | * Guest, but the Guest needs the interrupts atomically disabled for an | 388 | * Guest, but the Guest needs the interrupts atomically disabled for an |
334 | * interrupt gate. It can do this by pointing the trap gate at instructions | 389 | * interrupt gate. It can do this by pointing the trap gate at instructions |
335 | * within noirq_start and noirq_end, where it can safely disable interrupts. */ | 390 | * within noirq_start and noirq_end, where it can safely disable interrupts. |
391 | */ | ||
336 | 392 | ||
337 | /*M:006 The Guests do not use the sysenter (fast system call) instruction, | 393 | /*M:006 |
394 | * The Guests do not use the sysenter (fast system call) instruction, | ||
338 | * because it's hardcoded to enter privilege level 0 and so can't go direct. | 395 | * because it's hardcoded to enter privilege level 0 and so can't go direct. |
339 | * It's about twice as fast as the older "int 0x80" system call, so it might | 396 | * It's about twice as fast as the older "int 0x80" system call, so it might |
340 | * still be worthwhile to handle it in the Switcher and lcall down to the | 397 | * still be worthwhile to handle it in the Switcher and lcall down to the |
341 | * Guest. The sysenter semantics are hairy tho: search for that keyword in | 398 | * Guest. The sysenter semantics are hairy tho: search for that keyword in |
342 | * entry.S :*/ | 399 | * entry.S |
400 | :*/ | ||
343 | 401 | ||
344 | /*H:260 When we make traps go directly into the Guest, we need to make sure | 402 | /*H:260 |
403 | * When we make traps go directly into the Guest, we need to make sure | ||
345 | * the kernel stack is valid (ie. mapped in the page tables). Otherwise, the | 404 | * the kernel stack is valid (ie. mapped in the page tables). Otherwise, the |
346 | * CPU trying to deliver the trap will fault while trying to push the interrupt | 405 | * CPU trying to deliver the trap will fault while trying to push the interrupt |
347 | * words on the stack: this is called a double fault, and it forces us to kill | 406 | * words on the stack: this is called a double fault, and it forces us to kill |
348 | * the Guest. | 407 | * the Guest. |
349 | * | 408 | * |
350 | * Which is deeply unfair, because (literally!) it wasn't the Guests' fault. */ | 409 | * Which is deeply unfair, because (literally!) it wasn't the Guests' fault. |
410 | */ | ||
351 | void pin_stack_pages(struct lg_cpu *cpu) | 411 | void pin_stack_pages(struct lg_cpu *cpu) |
352 | { | 412 | { |
353 | unsigned int i; | 413 | unsigned int i; |
354 | 414 | ||
355 | /* Depending on the CONFIG_4KSTACKS option, the Guest can have one or | 415 | /* |
356 | * two pages of stack space. */ | 416 | * Depending on the CONFIG_4KSTACKS option, the Guest can have one or |
417 | * two pages of stack space. | ||
418 | */ | ||
357 | for (i = 0; i < cpu->lg->stack_pages; i++) | 419 | for (i = 0; i < cpu->lg->stack_pages; i++) |
358 | /* The stack grows *upwards*, so the address we're given is the | 420 | /* |
421 | * The stack grows *upwards*, so the address we're given is the | ||
359 | * start of the page after the kernel stack. Subtract one to | 422 | * start of the page after the kernel stack. Subtract one to |
360 | * get back onto the first stack page, and keep subtracting to | 423 | * get back onto the first stack page, and keep subtracting to |
361 | * get to the rest of the stack pages. */ | 424 | * get to the rest of the stack pages. |
425 | */ | ||
362 | pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE); | 426 | pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE); |
363 | } | 427 | } |
364 | 428 | ||
365 | /* Direct traps also mean that we need to know whenever the Guest wants to use | 429 | /* |
430 | * Direct traps also mean that we need to know whenever the Guest wants to use | ||
366 | * a different kernel stack, so we can change the IDT entries to use that | 431 | * a different kernel stack, so we can change the IDT entries to use that |
367 | * stack. The IDT entries expect a virtual address, so unlike most addresses | 432 | * stack. The IDT entries expect a virtual address, so unlike most addresses |
368 | * the Guest gives us, the "esp" (stack pointer) value here is virtual, not | 433 | * the Guest gives us, the "esp" (stack pointer) value here is virtual, not |
369 | * physical. | 434 | * physical. |
370 | * | 435 | * |
371 | * In Linux each process has its own kernel stack, so this happens a lot: we | 436 | * In Linux each process has its own kernel stack, so this happens a lot: we |
372 | * change stacks on each context switch. */ | 437 | * change stacks on each context switch. |
438 | */ | ||
373 | void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages) | 439 | void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages) |
374 | { | 440 | { |
375 | /* You are not allowed have a stack segment with privilege level 0: bad | 441 | /* |
376 | * Guest! */ | 442 | * You're not allowed a stack segment with privilege level 0: bad Guest! |
443 | */ | ||
377 | if ((seg & 0x3) != GUEST_PL) | 444 | if ((seg & 0x3) != GUEST_PL) |
378 | kill_guest(cpu, "bad stack segment %i", seg); | 445 | kill_guest(cpu, "bad stack segment %i", seg); |
379 | /* We only expect one or two stack pages. */ | 446 | /* We only expect one or two stack pages. */ |
@@ -387,11 +454,15 @@ void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages) | |||
387 | pin_stack_pages(cpu); | 454 | pin_stack_pages(cpu); |
388 | } | 455 | } |
389 | 456 | ||
390 | /* All this reference to mapping stacks leads us neatly into the other complex | 457 | /* |
391 | * part of the Host: page table handling. */ | 458 | * All this reference to mapping stacks leads us neatly into the other complex |
459 | * part of the Host: page table handling. | ||
460 | */ | ||
392 | 461 | ||
393 | /*H:235 This is the routine which actually checks the Guest's IDT entry and | 462 | /*H:235 |
394 | * transfers it into the entry in "struct lguest": */ | 463 | * This is the routine which actually checks the Guest's IDT entry and |
464 | * transfers it into the entry in "struct lguest": | ||
465 | */ | ||
395 | static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap, | 466 | static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap, |
396 | unsigned int num, u32 lo, u32 hi) | 467 | unsigned int num, u32 lo, u32 hi) |
397 | { | 468 | { |
@@ -407,30 +478,38 @@ static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap, | |||
407 | if (type != 0xE && type != 0xF) | 478 | if (type != 0xE && type != 0xF) |
408 | kill_guest(cpu, "bad IDT type %i", type); | 479 | kill_guest(cpu, "bad IDT type %i", type); |
409 | 480 | ||
410 | /* We only copy the handler address, present bit, privilege level and | 481 | /* |
482 | * We only copy the handler address, present bit, privilege level and | ||
411 | * type. The privilege level controls where the trap can be triggered | 483 | * type. The privilege level controls where the trap can be triggered |
412 | * manually with an "int" instruction. This is usually GUEST_PL, | 484 | * manually with an "int" instruction. This is usually GUEST_PL, |
413 | * except for system calls which userspace can use. */ | 485 | * except for system calls which userspace can use. |
486 | */ | ||
414 | trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF); | 487 | trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF); |
415 | trap->b = (hi&0xFFFFEF00); | 488 | trap->b = (hi&0xFFFFEF00); |
416 | } | 489 | } |
417 | 490 | ||
418 | /*H:230 While we're here, dealing with delivering traps and interrupts to the | 491 | /*H:230 |
492 | * While we're here, dealing with delivering traps and interrupts to the | ||
419 | * Guest, we might as well complete the picture: how the Guest tells us where | 493 | * Guest, we might as well complete the picture: how the Guest tells us where |
420 | * it wants them to go. This would be simple, except making traps fast | 494 | * it wants them to go. This would be simple, except making traps fast |
421 | * requires some tricks. | 495 | * requires some tricks. |
422 | * | 496 | * |
423 | * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the | 497 | * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the |
424 | * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. */ | 498 | * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. |
499 | */ | ||
425 | void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi) | 500 | void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi) |
426 | { | 501 | { |
427 | /* Guest never handles: NMI, doublefault, spurious interrupt or | 502 | /* |
428 | * hypercall. We ignore when it tries to set them. */ | 503 | * Guest never handles: NMI, doublefault, spurious interrupt or |
504 | * hypercall. We ignore when it tries to set them. | ||
505 | */ | ||
429 | if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY) | 506 | if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY) |
430 | return; | 507 | return; |
431 | 508 | ||
432 | /* Mark the IDT as changed: next time the Guest runs we'll know we have | 509 | /* |
433 | * to copy this again. */ | 510 | * Mark the IDT as changed: next time the Guest runs we'll know we have |
511 | * to copy this again. | ||
512 | */ | ||
434 | cpu->changed |= CHANGED_IDT; | 513 | cpu->changed |= CHANGED_IDT; |
435 | 514 | ||
436 | /* Check that the Guest doesn't try to step outside the bounds. */ | 515 | /* Check that the Guest doesn't try to step outside the bounds. */ |
@@ -440,9 +519,11 @@ void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi) | |||
440 | set_trap(cpu, &cpu->arch.idt[num], num, lo, hi); | 519 | set_trap(cpu, &cpu->arch.idt[num], num, lo, hi); |
441 | } | 520 | } |
442 | 521 | ||
443 | /* The default entry for each interrupt points into the Switcher routines which | 522 | /* |
523 | * The default entry for each interrupt points into the Switcher routines which | ||
444 | * simply return to the Host. The run_guest() loop will then call | 524 | * simply return to the Host. The run_guest() loop will then call |
445 | * deliver_trap() to bounce it back into the Guest. */ | 525 | * deliver_trap() to bounce it back into the Guest. |
526 | */ | ||
446 | static void default_idt_entry(struct desc_struct *idt, | 527 | static void default_idt_entry(struct desc_struct *idt, |
447 | int trap, | 528 | int trap, |
448 | const unsigned long handler, | 529 | const unsigned long handler, |
@@ -451,13 +532,17 @@ static void default_idt_entry(struct desc_struct *idt, | |||
451 | /* A present interrupt gate. */ | 532 | /* A present interrupt gate. */ |
452 | u32 flags = 0x8e00; | 533 | u32 flags = 0x8e00; |
453 | 534 | ||
454 | /* Set the privilege level on the entry for the hypercall: this allows | 535 | /* |
455 | * the Guest to use the "int" instruction to trigger it. */ | 536 | * Set the privilege level on the entry for the hypercall: this allows |
537 | * the Guest to use the "int" instruction to trigger it. | ||
538 | */ | ||
456 | if (trap == LGUEST_TRAP_ENTRY) | 539 | if (trap == LGUEST_TRAP_ENTRY) |
457 | flags |= (GUEST_PL << 13); | 540 | flags |= (GUEST_PL << 13); |
458 | else if (base) | 541 | else if (base) |
459 | /* Copy priv. level from what Guest asked for. This allows | 542 | /* |
460 | * debug (int 3) traps from Guest userspace, for example. */ | 543 | * Copy privilege level from what Guest asked for. This allows |
544 | * debug (int 3) traps from Guest userspace, for example. | ||
545 | */ | ||
461 | flags |= (base->b & 0x6000); | 546 | flags |= (base->b & 0x6000); |
462 | 547 | ||
463 | /* Now pack it into the IDT entry in its weird format. */ | 548 | /* Now pack it into the IDT entry in its weird format. */ |
@@ -475,16 +560,20 @@ void setup_default_idt_entries(struct lguest_ro_state *state, | |||
475 | default_idt_entry(&state->guest_idt[i], i, def[i], NULL); | 560 | default_idt_entry(&state->guest_idt[i], i, def[i], NULL); |
476 | } | 561 | } |
477 | 562 | ||
478 | /*H:240 We don't use the IDT entries in the "struct lguest" directly, instead | 563 | /*H:240 |
564 | * We don't use the IDT entries in the "struct lguest" directly, instead | ||
479 | * we copy them into the IDT which we've set up for Guests on this CPU, just | 565 | * we copy them into the IDT which we've set up for Guests on this CPU, just |
480 | * before we run the Guest. This routine does that copy. */ | 566 | * before we run the Guest. This routine does that copy. |
567 | */ | ||
481 | void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, | 568 | void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, |
482 | const unsigned long *def) | 569 | const unsigned long *def) |
483 | { | 570 | { |
484 | unsigned int i; | 571 | unsigned int i; |
485 | 572 | ||
486 | /* We can simply copy the direct traps, otherwise we use the default | 573 | /* |
487 | * ones in the Switcher: they will return to the Host. */ | 574 | * We can simply copy the direct traps, otherwise we use the default |
575 | * ones in the Switcher: they will return to the Host. | ||
576 | */ | ||
488 | for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) { | 577 | for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) { |
489 | const struct desc_struct *gidt = &cpu->arch.idt[i]; | 578 | const struct desc_struct *gidt = &cpu->arch.idt[i]; |
490 | 579 | ||
@@ -492,14 +581,16 @@ void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, | |||
492 | if (!direct_trap(i)) | 581 | if (!direct_trap(i)) |
493 | continue; | 582 | continue; |
494 | 583 | ||
495 | /* Only trap gates (type 15) can go direct to the Guest. | 584 | /* |
585 | * Only trap gates (type 15) can go direct to the Guest. | ||
496 | * Interrupt gates (type 14) disable interrupts as they are | 586 | * Interrupt gates (type 14) disable interrupts as they are |
497 | * entered, which we never let the Guest do. Not present | 587 | * entered, which we never let the Guest do. Not present |
498 | * entries (type 0x0) also can't go direct, of course. | 588 | * entries (type 0x0) also can't go direct, of course. |
499 | * | 589 | * |
500 | * If it can't go direct, we still need to copy the priv. level: | 590 | * If it can't go direct, we still need to copy the priv. level: |
501 | * they might want to give userspace access to a software | 591 | * they might want to give userspace access to a software |
502 | * interrupt. */ | 592 | * interrupt. |
593 | */ | ||
503 | if (idt_type(gidt->a, gidt->b) == 0xF) | 594 | if (idt_type(gidt->a, gidt->b) == 0xF) |
504 | idt[i] = *gidt; | 595 | idt[i] = *gidt; |
505 | else | 596 | else |
@@ -518,7 +609,8 @@ void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, | |||
518 | * the next timer interrupt (in nanoseconds). We use the high-resolution timer | 609 | * the next timer interrupt (in nanoseconds). We use the high-resolution timer |
519 | * infrastructure to set a callback at that time. | 610 | * infrastructure to set a callback at that time. |
520 | * | 611 | * |
521 | * 0 means "turn off the clock". */ | 612 | * 0 means "turn off the clock". |
613 | */ | ||
522 | void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta) | 614 | void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta) |
523 | { | 615 | { |
524 | ktime_t expires; | 616 | ktime_t expires; |
@@ -529,9 +621,11 @@ void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta) | |||
529 | return; | 621 | return; |
530 | } | 622 | } |
531 | 623 | ||
532 | /* We use wallclock time here, so the Guest might not be running for | 624 | /* |
625 | * We use wallclock time here, so the Guest might not be running for | ||
533 | * all the time between now and the timer interrupt it asked for. This | 626 | * all the time between now and the timer interrupt it asked for. This |
534 | * is almost always the right thing to do. */ | 627 | * is almost always the right thing to do. |
628 | */ | ||
535 | expires = ktime_add_ns(ktime_get_real(), delta); | 629 | expires = ktime_add_ns(ktime_get_real(), delta); |
536 | hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS); | 630 | hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS); |
537 | } | 631 | } |
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index 01c591923793..bc28745d05af 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h | |||
@@ -16,15 +16,13 @@ | |||
16 | void free_pagetables(void); | 16 | void free_pagetables(void); |
17 | int init_pagetables(struct page **switcher_page, unsigned int pages); | 17 | int init_pagetables(struct page **switcher_page, unsigned int pages); |
18 | 18 | ||
19 | struct pgdir | 19 | struct pgdir { |
20 | { | ||
21 | unsigned long gpgdir; | 20 | unsigned long gpgdir; |
22 | pgd_t *pgdir; | 21 | pgd_t *pgdir; |
23 | }; | 22 | }; |
24 | 23 | ||
25 | /* We have two pages shared with guests, per cpu. */ | 24 | /* We have two pages shared with guests, per cpu. */ |
26 | struct lguest_pages | 25 | struct lguest_pages { |
27 | { | ||
28 | /* This is the stack page mapped rw in guest */ | 26 | /* This is the stack page mapped rw in guest */ |
29 | char spare[PAGE_SIZE - sizeof(struct lguest_regs)]; | 27 | char spare[PAGE_SIZE - sizeof(struct lguest_regs)]; |
30 | struct lguest_regs regs; | 28 | struct lguest_regs regs; |
@@ -54,13 +52,13 @@ struct lg_cpu { | |||
54 | 52 | ||
55 | unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */ | 53 | unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */ |
56 | 54 | ||
57 | /* At end of a page shared mapped over lguest_pages in guest. */ | 55 | /* At end of a page shared mapped over lguest_pages in guest. */ |
58 | unsigned long regs_page; | 56 | unsigned long regs_page; |
59 | struct lguest_regs *regs; | 57 | struct lguest_regs *regs; |
60 | 58 | ||
61 | struct lguest_pages *last_pages; | 59 | struct lguest_pages *last_pages; |
62 | 60 | ||
63 | int cpu_pgd; /* which pgd this cpu is currently using */ | 61 | int cpu_pgd; /* Which pgd this cpu is currently using */ |
64 | 62 | ||
65 | /* If a hypercall was asked for, this points to the arguments. */ | 63 | /* If a hypercall was asked for, this points to the arguments. */ |
66 | struct hcall_args *hcall; | 64 | struct hcall_args *hcall; |
@@ -89,15 +87,17 @@ struct lg_eventfd_map { | |||
89 | }; | 87 | }; |
90 | 88 | ||
91 | /* The private info the thread maintains about the guest. */ | 89 | /* The private info the thread maintains about the guest. */ |
92 | struct lguest | 90 | struct lguest { |
93 | { | ||
94 | struct lguest_data __user *lguest_data; | 91 | struct lguest_data __user *lguest_data; |
95 | struct lg_cpu cpus[NR_CPUS]; | 92 | struct lg_cpu cpus[NR_CPUS]; |
96 | unsigned int nr_cpus; | 93 | unsigned int nr_cpus; |
97 | 94 | ||
98 | u32 pfn_limit; | 95 | u32 pfn_limit; |
99 | /* This provides the offset to the base of guest-physical | 96 | |
100 | * memory in the Launcher. */ | 97 | /* |
98 | * This provides the offset to the base of guest-physical memory in the | ||
99 | * Launcher. | ||
100 | */ | ||
101 | void __user *mem_base; | 101 | void __user *mem_base; |
102 | unsigned long kernel_address; | 102 | unsigned long kernel_address; |
103 | 103 | ||
@@ -122,11 +122,13 @@ bool lguest_address_ok(const struct lguest *lg, | |||
122 | void __lgread(struct lg_cpu *, void *, unsigned long, unsigned); | 122 | void __lgread(struct lg_cpu *, void *, unsigned long, unsigned); |
123 | void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned); | 123 | void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned); |
124 | 124 | ||
125 | /*H:035 Using memory-copy operations like that is usually inconvient, so we | 125 | /*H:035 |
126 | * Using memory-copy operations like that is usually inconvient, so we | ||
126 | * have the following helper macros which read and write a specific type (often | 127 | * have the following helper macros which read and write a specific type (often |
127 | * an unsigned long). | 128 | * an unsigned long). |
128 | * | 129 | * |
129 | * This reads into a variable of the given type then returns that. */ | 130 | * This reads into a variable of the given type then returns that. |
131 | */ | ||
130 | #define lgread(cpu, addr, type) \ | 132 | #define lgread(cpu, addr, type) \ |
131 | ({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; }) | 133 | ({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; }) |
132 | 134 | ||
@@ -140,9 +142,11 @@ void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned); | |||
140 | 142 | ||
141 | int run_guest(struct lg_cpu *cpu, unsigned long __user *user); | 143 | int run_guest(struct lg_cpu *cpu, unsigned long __user *user); |
142 | 144 | ||
143 | /* Helper macros to obtain the first 12 or the last 20 bits, this is only the | 145 | /* |
146 | * Helper macros to obtain the first 12 or the last 20 bits, this is only the | ||
144 | * first step in the migration to the kernel types. pte_pfn is already defined | 147 | * first step in the migration to the kernel types. pte_pfn is already defined |
145 | * in the kernel. */ | 148 | * in the kernel. |
149 | */ | ||
146 | #define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) | 150 | #define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) |
147 | #define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) | 151 | #define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) |
148 | #define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK) | 152 | #define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK) |
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index e082cdac88b4..b6200bc39b58 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c | |||
@@ -1,10 +1,12 @@ | |||
1 | /*P:050 Lguest guests use a very simple method to describe devices. It's a | 1 | /*P:050 |
2 | * Lguest guests use a very simple method to describe devices. It's a | ||
2 | * series of device descriptors contained just above the top of normal Guest | 3 | * series of device descriptors contained just above the top of normal Guest |
3 | * memory. | 4 | * memory. |
4 | * | 5 | * |
5 | * We use the standard "virtio" device infrastructure, which provides us with a | 6 | * We use the standard "virtio" device infrastructure, which provides us with a |
6 | * console, a network and a block driver. Each one expects some configuration | 7 | * console, a network and a block driver. Each one expects some configuration |
7 | * information and a "virtqueue" or two to send and receive data. :*/ | 8 | * information and a "virtqueue" or two to send and receive data. |
9 | :*/ | ||
8 | #include <linux/init.h> | 10 | #include <linux/init.h> |
9 | #include <linux/bootmem.h> | 11 | #include <linux/bootmem.h> |
10 | #include <linux/lguest_launcher.h> | 12 | #include <linux/lguest_launcher.h> |
@@ -20,8 +22,10 @@ | |||
20 | /* The pointer to our (page) of device descriptions. */ | 22 | /* The pointer to our (page) of device descriptions. */ |
21 | static void *lguest_devices; | 23 | static void *lguest_devices; |
22 | 24 | ||
23 | /* For Guests, device memory can be used as normal memory, so we cast away the | 25 | /* |
24 | * __iomem to quieten sparse. */ | 26 | * For Guests, device memory can be used as normal memory, so we cast away the |
27 | * __iomem to quieten sparse. | ||
28 | */ | ||
25 | static inline void *lguest_map(unsigned long phys_addr, unsigned long pages) | 29 | static inline void *lguest_map(unsigned long phys_addr, unsigned long pages) |
26 | { | 30 | { |
27 | return (__force void *)ioremap_cache(phys_addr, PAGE_SIZE*pages); | 31 | return (__force void *)ioremap_cache(phys_addr, PAGE_SIZE*pages); |
@@ -32,8 +36,10 @@ static inline void lguest_unmap(void *addr) | |||
32 | iounmap((__force void __iomem *)addr); | 36 | iounmap((__force void __iomem *)addr); |
33 | } | 37 | } |
34 | 38 | ||
35 | /*D:100 Each lguest device is just a virtio device plus a pointer to its entry | 39 | /*D:100 |
36 | * in the lguest_devices page. */ | 40 | * Each lguest device is just a virtio device plus a pointer to its entry |
41 | * in the lguest_devices page. | ||
42 | */ | ||
37 | struct lguest_device { | 43 | struct lguest_device { |
38 | struct virtio_device vdev; | 44 | struct virtio_device vdev; |
39 | 45 | ||
@@ -41,9 +47,11 @@ struct lguest_device { | |||
41 | struct lguest_device_desc *desc; | 47 | struct lguest_device_desc *desc; |
42 | }; | 48 | }; |
43 | 49 | ||
44 | /* Since the virtio infrastructure hands us a pointer to the virtio_device all | 50 | /* |
51 | * Since the virtio infrastructure hands us a pointer to the virtio_device all | ||
45 | * the time, it helps to have a curt macro to get a pointer to the struct | 52 | * the time, it helps to have a curt macro to get a pointer to the struct |
46 | * lguest_device it's enclosed in. */ | 53 | * lguest_device it's enclosed in. |
54 | */ | ||
47 | #define to_lgdev(vd) container_of(vd, struct lguest_device, vdev) | 55 | #define to_lgdev(vd) container_of(vd, struct lguest_device, vdev) |
48 | 56 | ||
49 | /*D:130 | 57 | /*D:130 |
@@ -55,7 +63,8 @@ struct lguest_device { | |||
55 | * the driver will look at them during setup. | 63 | * the driver will look at them during setup. |
56 | * | 64 | * |
57 | * A convenient routine to return the device's virtqueue config array: | 65 | * A convenient routine to return the device's virtqueue config array: |
58 | * immediately after the descriptor. */ | 66 | * immediately after the descriptor. |
67 | */ | ||
59 | static struct lguest_vqconfig *lg_vq(const struct lguest_device_desc *desc) | 68 | static struct lguest_vqconfig *lg_vq(const struct lguest_device_desc *desc) |
60 | { | 69 | { |
61 | return (void *)(desc + 1); | 70 | return (void *)(desc + 1); |
@@ -98,10 +107,12 @@ static u32 lg_get_features(struct virtio_device *vdev) | |||
98 | return features; | 107 | return features; |
99 | } | 108 | } |
100 | 109 | ||
101 | /* The virtio core takes the features the Host offers, and copies the | 110 | /* |
102 | * ones supported by the driver into the vdev->features array. Once | 111 | * The virtio core takes the features the Host offers, and copies the ones |
103 | * that's all sorted out, this routine is called so we can tell the | 112 | * supported by the driver into the vdev->features array. Once that's all |
104 | * Host which features we understand and accept. */ | 113 | * sorted out, this routine is called so we can tell the Host which features we |
114 | * understand and accept. | ||
115 | */ | ||
105 | static void lg_finalize_features(struct virtio_device *vdev) | 116 | static void lg_finalize_features(struct virtio_device *vdev) |
106 | { | 117 | { |
107 | unsigned int i, bits; | 118 | unsigned int i, bits; |
@@ -112,10 +123,11 @@ static void lg_finalize_features(struct virtio_device *vdev) | |||
112 | /* Give virtio_ring a chance to accept features. */ | 123 | /* Give virtio_ring a chance to accept features. */ |
113 | vring_transport_features(vdev); | 124 | vring_transport_features(vdev); |
114 | 125 | ||
115 | /* The vdev->feature array is a Linux bitmask: this isn't the | 126 | /* |
116 | * same as a the simple array of bits used by lguest devices | 127 | * The vdev->feature array is a Linux bitmask: this isn't the same as a |
117 | * for features. So we do this slow, manual conversion which is | 128 | * the simple array of bits used by lguest devices for features. So we |
118 | * completely general. */ | 129 | * do this slow, manual conversion which is completely general. |
130 | */ | ||
119 | memset(out_features, 0, desc->feature_len); | 131 | memset(out_features, 0, desc->feature_len); |
120 | bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; | 132 | bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; |
121 | for (i = 0; i < bits; i++) { | 133 | for (i = 0; i < bits; i++) { |
@@ -146,15 +158,19 @@ static void lg_set(struct virtio_device *vdev, unsigned int offset, | |||
146 | memcpy(lg_config(desc) + offset, buf, len); | 158 | memcpy(lg_config(desc) + offset, buf, len); |
147 | } | 159 | } |
148 | 160 | ||
149 | /* The operations to get and set the status word just access the status field | 161 | /* |
150 | * of the device descriptor. */ | 162 | * The operations to get and set the status word just access the status field |
163 | * of the device descriptor. | ||
164 | */ | ||
151 | static u8 lg_get_status(struct virtio_device *vdev) | 165 | static u8 lg_get_status(struct virtio_device *vdev) |
152 | { | 166 | { |
153 | return to_lgdev(vdev)->desc->status; | 167 | return to_lgdev(vdev)->desc->status; |
154 | } | 168 | } |
155 | 169 | ||
156 | /* To notify on status updates, we (ab)use the NOTIFY hypercall, with the | 170 | /* |
157 | * descriptor address of the device. A zero status means "reset". */ | 171 | * To notify on status updates, we (ab)use the NOTIFY hypercall, with the |
172 | * descriptor address of the device. A zero status means "reset". | ||
173 | */ | ||
158 | static void set_status(struct virtio_device *vdev, u8 status) | 174 | static void set_status(struct virtio_device *vdev, u8 status) |
159 | { | 175 | { |
160 | unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices; | 176 | unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices; |
@@ -191,8 +207,7 @@ static void lg_reset(struct virtio_device *vdev) | |||
191 | */ | 207 | */ |
192 | 208 | ||
193 | /*D:140 This is the information we remember about each virtqueue. */ | 209 | /*D:140 This is the information we remember about each virtqueue. */ |
194 | struct lguest_vq_info | 210 | struct lguest_vq_info { |
195 | { | ||
196 | /* A copy of the information contained in the device config. */ | 211 | /* A copy of the information contained in the device config. */ |
197 | struct lguest_vqconfig config; | 212 | struct lguest_vqconfig config; |
198 | 213 | ||
@@ -200,13 +215,17 @@ struct lguest_vq_info | |||
200 | void *pages; | 215 | void *pages; |
201 | }; | 216 | }; |
202 | 217 | ||
203 | /* When the virtio_ring code wants to prod the Host, it calls us here and we | 218 | /* |
219 | * When the virtio_ring code wants to prod the Host, it calls us here and we | ||
204 | * make a hypercall. We hand the physical address of the virtqueue so the Host | 220 | * make a hypercall. We hand the physical address of the virtqueue so the Host |
205 | * knows which virtqueue we're talking about. */ | 221 | * knows which virtqueue we're talking about. |
222 | */ | ||
206 | static void lg_notify(struct virtqueue *vq) | 223 | static void lg_notify(struct virtqueue *vq) |
207 | { | 224 | { |
208 | /* We store our virtqueue information in the "priv" pointer of the | 225 | /* |
209 | * virtqueue structure. */ | 226 | * We store our virtqueue information in the "priv" pointer of the |
227 | * virtqueue structure. | ||
228 | */ | ||
210 | struct lguest_vq_info *lvq = vq->priv; | 229 | struct lguest_vq_info *lvq = vq->priv; |
211 | 230 | ||
212 | kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT); | 231 | kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT); |
@@ -215,7 +234,8 @@ static void lg_notify(struct virtqueue *vq) | |||
215 | /* An extern declaration inside a C file is bad form. Don't do it. */ | 234 | /* An extern declaration inside a C file is bad form. Don't do it. */ |
216 | extern void lguest_setup_irq(unsigned int irq); | 235 | extern void lguest_setup_irq(unsigned int irq); |
217 | 236 | ||
218 | /* This routine finds the first virtqueue described in the configuration of | 237 | /* |
238 | * This routine finds the Nth virtqueue described in the configuration of | ||
219 | * this device and sets it up. | 239 | * this device and sets it up. |
220 | * | 240 | * |
221 | * This is kind of an ugly duckling. It'd be nicer to have a standard | 241 | * This is kind of an ugly duckling. It'd be nicer to have a standard |
@@ -223,9 +243,7 @@ extern void lguest_setup_irq(unsigned int irq); | |||
223 | * everyone wants to do it differently. The KVM coders want the Guest to | 243 | * everyone wants to do it differently. The KVM coders want the Guest to |
224 | * allocate its own pages and tell the Host where they are, but for lguest it's | 244 | * allocate its own pages and tell the Host where they are, but for lguest it's |
225 | * simpler for the Host to simply tell us where the pages are. | 245 | * simpler for the Host to simply tell us where the pages are. |
226 | * | 246 | */ |
227 | * So we provide drivers with a "find the Nth virtqueue and set it up" | ||
228 | * function. */ | ||
229 | static struct virtqueue *lg_find_vq(struct virtio_device *vdev, | 247 | static struct virtqueue *lg_find_vq(struct virtio_device *vdev, |
230 | unsigned index, | 248 | unsigned index, |
231 | void (*callback)(struct virtqueue *vq), | 249 | void (*callback)(struct virtqueue *vq), |
@@ -244,9 +262,11 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, | |||
244 | if (!lvq) | 262 | if (!lvq) |
245 | return ERR_PTR(-ENOMEM); | 263 | return ERR_PTR(-ENOMEM); |
246 | 264 | ||
247 | /* Make a copy of the "struct lguest_vqconfig" entry, which sits after | 265 | /* |
266 | * Make a copy of the "struct lguest_vqconfig" entry, which sits after | ||
248 | * the descriptor. We need a copy because the config space might not | 267 | * the descriptor. We need a copy because the config space might not |
249 | * be aligned correctly. */ | 268 | * be aligned correctly. |
269 | */ | ||
250 | memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config)); | 270 | memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config)); |
251 | 271 | ||
252 | printk("Mapping virtqueue %i addr %lx\n", index, | 272 | printk("Mapping virtqueue %i addr %lx\n", index, |
@@ -261,8 +281,10 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, | |||
261 | goto free_lvq; | 281 | goto free_lvq; |
262 | } | 282 | } |
263 | 283 | ||
264 | /* OK, tell virtio_ring.c to set up a virtqueue now we know its size | 284 | /* |
265 | * and we've got a pointer to its pages. */ | 285 | * OK, tell virtio_ring.c to set up a virtqueue now we know its size |
286 | * and we've got a pointer to its pages. | ||
287 | */ | ||
266 | vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, | 288 | vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, |
267 | vdev, lvq->pages, lg_notify, callback, name); | 289 | vdev, lvq->pages, lg_notify, callback, name); |
268 | if (!vq) { | 290 | if (!vq) { |
@@ -273,18 +295,23 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, | |||
273 | /* Make sure the interrupt is allocated. */ | 295 | /* Make sure the interrupt is allocated. */ |
274 | lguest_setup_irq(lvq->config.irq); | 296 | lguest_setup_irq(lvq->config.irq); |
275 | 297 | ||
276 | /* Tell the interrupt for this virtqueue to go to the virtio_ring | 298 | /* |
277 | * interrupt handler. */ | 299 | * Tell the interrupt for this virtqueue to go to the virtio_ring |
278 | /* FIXME: We used to have a flag for the Host to tell us we could use | 300 | * interrupt handler. |
301 | * | ||
302 | * FIXME: We used to have a flag for the Host to tell us we could use | ||
279 | * the interrupt as a source of randomness: it'd be nice to have that | 303 | * the interrupt as a source of randomness: it'd be nice to have that |
280 | * back.. */ | 304 | * back. |
305 | */ | ||
281 | err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED, | 306 | err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED, |
282 | dev_name(&vdev->dev), vq); | 307 | dev_name(&vdev->dev), vq); |
283 | if (err) | 308 | if (err) |
284 | goto destroy_vring; | 309 | goto destroy_vring; |
285 | 310 | ||
286 | /* Last of all we hook up our 'struct lguest_vq_info" to the | 311 | /* |
287 | * virtqueue's priv pointer. */ | 312 | * Last of all we hook up our 'struct lguest_vq_info" to the |
313 | * virtqueue's priv pointer. | ||
314 | */ | ||
288 | vq->priv = lvq; | 315 | vq->priv = lvq; |
289 | return vq; | 316 | return vq; |
290 | 317 | ||
@@ -358,11 +385,14 @@ static struct virtio_config_ops lguest_config_ops = { | |||
358 | .del_vqs = lg_del_vqs, | 385 | .del_vqs = lg_del_vqs, |
359 | }; | 386 | }; |
360 | 387 | ||
361 | /* The root device for the lguest virtio devices. This makes them appear as | 388 | /* |
362 | * /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2. */ | 389 | * The root device for the lguest virtio devices. This makes them appear as |
390 | * /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2. | ||
391 | */ | ||
363 | static struct device *lguest_root; | 392 | static struct device *lguest_root; |
364 | 393 | ||
365 | /*D:120 This is the core of the lguest bus: actually adding a new device. | 394 | /*D:120 |
395 | * This is the core of the lguest bus: actually adding a new device. | ||
366 | * It's a separate function because it's neater that way, and because an | 396 | * It's a separate function because it's neater that way, and because an |
367 | * earlier version of the code supported hotplug and unplug. They were removed | 397 | * earlier version of the code supported hotplug and unplug. They were removed |
368 | * early on because they were never used. | 398 | * early on because they were never used. |
@@ -371,14 +401,14 @@ static struct device *lguest_root; | |||
371 | * | 401 | * |
372 | * It's worth reading this carefully: we start with a pointer to the new device | 402 | * It's worth reading this carefully: we start with a pointer to the new device |
373 | * descriptor in the "lguest_devices" page, and the offset into the device | 403 | * descriptor in the "lguest_devices" page, and the offset into the device |
374 | * descriptor page so we can uniquely identify it if things go badly wrong. */ | 404 | * descriptor page so we can uniquely identify it if things go badly wrong. |
405 | */ | ||
375 | static void add_lguest_device(struct lguest_device_desc *d, | 406 | static void add_lguest_device(struct lguest_device_desc *d, |
376 | unsigned int offset) | 407 | unsigned int offset) |
377 | { | 408 | { |
378 | struct lguest_device *ldev; | 409 | struct lguest_device *ldev; |
379 | 410 | ||
380 | /* Start with zeroed memory; Linux's device layer seems to count on | 411 | /* Start with zeroed memory; Linux's device layer counts on it. */ |
381 | * it. */ | ||
382 | ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); | 412 | ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); |
383 | if (!ldev) { | 413 | if (!ldev) { |
384 | printk(KERN_EMERG "Cannot allocate lguest dev %u type %u\n", | 414 | printk(KERN_EMERG "Cannot allocate lguest dev %u type %u\n", |
@@ -388,17 +418,25 @@ static void add_lguest_device(struct lguest_device_desc *d, | |||
388 | 418 | ||
389 | /* This devices' parent is the lguest/ dir. */ | 419 | /* This devices' parent is the lguest/ dir. */ |
390 | ldev->vdev.dev.parent = lguest_root; | 420 | ldev->vdev.dev.parent = lguest_root; |
391 | /* We have a unique device index thanks to the dev_index counter. */ | 421 | /* |
422 | * The device type comes straight from the descriptor. There's also a | ||
423 | * device vendor field in the virtio_device struct, which we leave as | ||
424 | * 0. | ||
425 | */ | ||
392 | ldev->vdev.id.device = d->type; | 426 | ldev->vdev.id.device = d->type; |
393 | /* We have a simple set of routines for querying the device's | 427 | /* |
394 | * configuration information and setting its status. */ | 428 | * We have a simple set of routines for querying the device's |
429 | * configuration information and setting its status. | ||
430 | */ | ||
395 | ldev->vdev.config = &lguest_config_ops; | 431 | ldev->vdev.config = &lguest_config_ops; |
396 | /* And we remember the device's descriptor for lguest_config_ops. */ | 432 | /* And we remember the device's descriptor for lguest_config_ops. */ |
397 | ldev->desc = d; | 433 | ldev->desc = d; |
398 | 434 | ||
399 | /* register_virtio_device() sets up the generic fields for the struct | 435 | /* |
436 | * register_virtio_device() sets up the generic fields for the struct | ||
400 | * virtio_device and calls device_register(). This makes the bus | 437 | * virtio_device and calls device_register(). This makes the bus |
401 | * infrastructure look for a matching driver. */ | 438 | * infrastructure look for a matching driver. |
439 | */ | ||
402 | if (register_virtio_device(&ldev->vdev) != 0) { | 440 | if (register_virtio_device(&ldev->vdev) != 0) { |
403 | printk(KERN_ERR "Failed to register lguest dev %u type %u\n", | 441 | printk(KERN_ERR "Failed to register lguest dev %u type %u\n", |
404 | offset, d->type); | 442 | offset, d->type); |
@@ -406,8 +444,10 @@ static void add_lguest_device(struct lguest_device_desc *d, | |||
406 | } | 444 | } |
407 | } | 445 | } |
408 | 446 | ||
409 | /*D:110 scan_devices() simply iterates through the device page. The type 0 is | 447 | /*D:110 |
410 | * reserved to mean "end of devices". */ | 448 | * scan_devices() simply iterates through the device page. The type 0 is |
449 | * reserved to mean "end of devices". | ||
450 | */ | ||
411 | static void scan_devices(void) | 451 | static void scan_devices(void) |
412 | { | 452 | { |
413 | unsigned int i; | 453 | unsigned int i; |
@@ -426,7 +466,8 @@ static void scan_devices(void) | |||
426 | } | 466 | } |
427 | } | 467 | } |
428 | 468 | ||
429 | /*D:105 Fairly early in boot, lguest_devices_init() is called to set up the | 469 | /*D:105 |
470 | * Fairly early in boot, lguest_devices_init() is called to set up the | ||
430 | * lguest device infrastructure. We check that we are a Guest by checking | 471 | * lguest device infrastructure. We check that we are a Guest by checking |
431 | * pv_info.name: there are other ways of checking, but this seems most | 472 | * pv_info.name: there are other ways of checking, but this seems most |
432 | * obvious to me. | 473 | * obvious to me. |
@@ -437,7 +478,8 @@ static void scan_devices(void) | |||
437 | * correct sysfs incantation). | 478 | * correct sysfs incantation). |
438 | * | 479 | * |
439 | * Finally we call scan_devices() which adds all the devices found in the | 480 | * Finally we call scan_devices() which adds all the devices found in the |
440 | * lguest_devices page. */ | 481 | * lguest_devices page. |
482 | */ | ||
441 | static int __init lguest_devices_init(void) | 483 | static int __init lguest_devices_init(void) |
442 | { | 484 | { |
443 | if (strcmp(pv_info.name, "lguest") != 0) | 485 | if (strcmp(pv_info.name, "lguest") != 0) |
@@ -456,11 +498,13 @@ static int __init lguest_devices_init(void) | |||
456 | /* We do this after core stuff, but before the drivers. */ | 498 | /* We do this after core stuff, but before the drivers. */ |
457 | postcore_initcall(lguest_devices_init); | 499 | postcore_initcall(lguest_devices_init); |
458 | 500 | ||
459 | /*D:150 At this point in the journey we used to now wade through the lguest | 501 | /*D:150 |
502 | * At this point in the journey we used to now wade through the lguest | ||
460 | * devices themselves: net, block and console. Since they're all now virtio | 503 | * devices themselves: net, block and console. Since they're all now virtio |
461 | * devices rather than lguest-specific, I've decided to ignore them. Mostly, | 504 | * devices rather than lguest-specific, I've decided to ignore them. Mostly, |
462 | * they're kind of boring. But this does mean you'll never experience the | 505 | * they're kind of boring. But this does mean you'll never experience the |
463 | * thrill of reading the forbidden love scene buried deep in the block driver. | 506 | * thrill of reading the forbidden love scene buried deep in the block driver. |
464 | * | 507 | * |
465 | * "make Launcher" beckons, where we answer questions like "Where do Guests | 508 | * "make Launcher" beckons, where we answer questions like "Where do Guests |
466 | * come from?", and "What do you do when someone asks for optimization?". */ | 509 | * come from?", and "What do you do when someone asks for optimization?". |
510 | */ | ||
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 9f9a2953b383..b4d3f7ca554f 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c | |||
@@ -1,8 +1,9 @@ | |||
1 | /*P:200 This contains all the /dev/lguest code, whereby the userspace launcher | 1 | /*P:200 This contains all the /dev/lguest code, whereby the userspace launcher |
2 | * controls and communicates with the Guest. For example, the first write will | 2 | * controls and communicates with the Guest. For example, the first write will |
3 | * tell us the Guest's memory layout, pagetable, entry point and kernel address | 3 | * tell us the Guest's memory layout and entry point. A read will run the |
4 | * offset. A read will run the Guest until something happens, such as a signal | 4 | * Guest until something happens, such as a signal or the Guest doing a NOTIFY |
5 | * or the Guest doing a NOTIFY out to the Launcher. :*/ | 5 | * out to the Launcher. |
6 | :*/ | ||
6 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
7 | #include <linux/miscdevice.h> | 8 | #include <linux/miscdevice.h> |
8 | #include <linux/fs.h> | 9 | #include <linux/fs.h> |
@@ -11,14 +12,41 @@ | |||
11 | #include <linux/file.h> | 12 | #include <linux/file.h> |
12 | #include "lg.h" | 13 | #include "lg.h" |
13 | 14 | ||
15 | /*L:056 | ||
16 | * Before we move on, let's jump ahead and look at what the kernel does when | ||
17 | * it needs to look up the eventfds. That will complete our picture of how we | ||
18 | * use RCU. | ||
19 | * | ||
20 | * The notification value is in cpu->pending_notify: we return true if it went | ||
21 | * to an eventfd. | ||
22 | */ | ||
14 | bool send_notify_to_eventfd(struct lg_cpu *cpu) | 23 | bool send_notify_to_eventfd(struct lg_cpu *cpu) |
15 | { | 24 | { |
16 | unsigned int i; | 25 | unsigned int i; |
17 | struct lg_eventfd_map *map; | 26 | struct lg_eventfd_map *map; |
18 | 27 | ||
19 | /* lg->eventfds is RCU-protected */ | 28 | /* |
29 | * This "rcu_read_lock()" helps track when someone is still looking at | ||
30 | * the (RCU-using) eventfds array. It's not actually a lock at all; | ||
31 | * indeed it's a noop in many configurations. (You didn't expect me to | ||
32 | * explain all the RCU secrets here, did you?) | ||
33 | */ | ||
20 | rcu_read_lock(); | 34 | rcu_read_lock(); |
35 | /* | ||
36 | * rcu_dereference is the counter-side of rcu_assign_pointer(); it | ||
37 | * makes sure we don't access the memory pointed to by | ||
38 | * cpu->lg->eventfds before cpu->lg->eventfds is set. Sounds crazy, | ||
39 | * but Alpha allows this! Paul McKenney points out that a really | ||
40 | * aggressive compiler could have the same effect: | ||
41 | * http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html | ||
42 | * | ||
43 | * So play safe, use rcu_dereference to get the rcu-protected pointer: | ||
44 | */ | ||
21 | map = rcu_dereference(cpu->lg->eventfds); | 45 | map = rcu_dereference(cpu->lg->eventfds); |
46 | /* | ||
47 | * Simple array search: even if they add an eventfd while we do this, | ||
48 | * we'll continue to use the old array and just won't see the new one. | ||
49 | */ | ||
22 | for (i = 0; i < map->num; i++) { | 50 | for (i = 0; i < map->num; i++) { |
23 | if (map->map[i].addr == cpu->pending_notify) { | 51 | if (map->map[i].addr == cpu->pending_notify) { |
24 | eventfd_signal(map->map[i].event, 1); | 52 | eventfd_signal(map->map[i].event, 1); |
@@ -26,19 +54,50 @@ bool send_notify_to_eventfd(struct lg_cpu *cpu) | |||
26 | break; | 54 | break; |
27 | } | 55 | } |
28 | } | 56 | } |
57 | /* We're done with the rcu-protected variable cpu->lg->eventfds. */ | ||
29 | rcu_read_unlock(); | 58 | rcu_read_unlock(); |
59 | |||
60 | /* If we cleared the notification, it's because we found a match. */ | ||
30 | return cpu->pending_notify == 0; | 61 | return cpu->pending_notify == 0; |
31 | } | 62 | } |
32 | 63 | ||
64 | /*L:055 | ||
65 | * One of the more tricksy tricks in the Linux Kernel is a technique called | ||
66 | * Read Copy Update. Since one point of lguest is to teach lguest journeyers | ||
67 | * about kernel coding, I use it here. (In case you're curious, other purposes | ||
68 | * include learning about virtualization and instilling a deep appreciation for | ||
69 | * simplicity and puppies). | ||
70 | * | ||
71 | * We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we | ||
72 | * add new eventfds without ever blocking readers from accessing the array. | ||
73 | * The current Launcher only does this during boot, so that never happens. But | ||
74 | * Read Copy Update is cool, and adding a lock risks damaging even more puppies | ||
75 | * than this code does. | ||
76 | * | ||
77 | * We allocate a brand new one-larger array, copy the old one and add our new | ||
78 | * element. Then we make the lg eventfd pointer point to the new array. | ||
79 | * That's the easy part: now we need to free the old one, but we need to make | ||
80 | * sure no slow CPU somewhere is still looking at it. That's what | ||
81 | * synchronize_rcu does for us: waits until every CPU has indicated that it has | ||
82 | * moved on to know it's no longer using the old one. | ||
83 | * | ||
84 | * If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update. | ||
85 | */ | ||
33 | static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) | 86 | static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) |
34 | { | 87 | { |
35 | struct lg_eventfd_map *new, *old = lg->eventfds; | 88 | struct lg_eventfd_map *new, *old = lg->eventfds; |
36 | 89 | ||
90 | /* | ||
91 | * We don't allow notifications on value 0 anyway (pending_notify of | ||
92 | * 0 means "nothing pending"). | ||
93 | */ | ||
37 | if (!addr) | 94 | if (!addr) |
38 | return -EINVAL; | 95 | return -EINVAL; |
39 | 96 | ||
40 | /* Replace the old array with the new one, carefully: others can | 97 | /* |
41 | * be accessing it at the same time */ | 98 | * Replace the old array with the new one, carefully: others can |
99 | * be accessing it at the same time. | ||
100 | */ | ||
42 | new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1), | 101 | new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1), |
43 | GFP_KERNEL); | 102 | GFP_KERNEL); |
44 | if (!new) | 103 | if (!new) |
@@ -52,22 +111,41 @@ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) | |||
52 | new->map[new->num].addr = addr; | 111 | new->map[new->num].addr = addr; |
53 | new->map[new->num].event = eventfd_ctx_fdget(fd); | 112 | new->map[new->num].event = eventfd_ctx_fdget(fd); |
54 | if (IS_ERR(new->map[new->num].event)) { | 113 | if (IS_ERR(new->map[new->num].event)) { |
114 | int err = PTR_ERR(new->map[new->num].event); | ||
55 | kfree(new); | 115 | kfree(new); |
56 | return PTR_ERR(new->map[new->num].event); | 116 | return err; |
57 | } | 117 | } |
58 | new->num++; | 118 | new->num++; |
59 | 119 | ||
60 | /* Now put new one in place. */ | 120 | /* |
121 | * Now put new one in place: rcu_assign_pointer() is a fancy way of | ||
122 | * doing "lg->eventfds = new", but it uses memory barriers to make | ||
123 | * absolutely sure that the contents of "new" written above is nailed | ||
124 | * down before we actually do the assignment. | ||
125 | * | ||
126 | * We have to think about these kinds of things when we're operating on | ||
127 | * live data without locks. | ||
128 | */ | ||
61 | rcu_assign_pointer(lg->eventfds, new); | 129 | rcu_assign_pointer(lg->eventfds, new); |
62 | 130 | ||
63 | /* We're not in a big hurry. Wait until noone's looking at old | 131 | /* |
64 | * version, then delete it. */ | 132 | * We're not in a big hurry. Wait until noone's looking at old |
133 | * version, then free it. | ||
134 | */ | ||
65 | synchronize_rcu(); | 135 | synchronize_rcu(); |
66 | kfree(old); | 136 | kfree(old); |
67 | 137 | ||
68 | return 0; | 138 | return 0; |
69 | } | 139 | } |
70 | 140 | ||
141 | /*L:052 | ||
142 | * Receiving notifications from the Guest is usually done by attaching a | ||
143 | * particular LHCALL_NOTIFY value to an event filedescriptor. The eventfd will | ||
144 | * become readable when the Guest does an LHCALL_NOTIFY with that value. | ||
145 | * | ||
146 | * This is really convenient for processing each virtqueue in a separate | ||
147 | * thread. | ||
148 | */ | ||
71 | static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) | 149 | static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) |
72 | { | 150 | { |
73 | unsigned long addr, fd; | 151 | unsigned long addr, fd; |
@@ -79,15 +157,22 @@ static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) | |||
79 | if (get_user(fd, input) != 0) | 157 | if (get_user(fd, input) != 0) |
80 | return -EFAULT; | 158 | return -EFAULT; |
81 | 159 | ||
160 | /* | ||
161 | * Just make sure two callers don't add eventfds at once. We really | ||
162 | * only need to lock against callers adding to the same Guest, so using | ||
163 | * the Big Lguest Lock is overkill. But this is setup, not a fast path. | ||
164 | */ | ||
82 | mutex_lock(&lguest_lock); | 165 | mutex_lock(&lguest_lock); |
83 | err = add_eventfd(lg, addr, fd); | 166 | err = add_eventfd(lg, addr, fd); |
84 | mutex_unlock(&lguest_lock); | 167 | mutex_unlock(&lguest_lock); |
85 | 168 | ||
86 | return 0; | 169 | return err; |
87 | } | 170 | } |
88 | 171 | ||
89 | /*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt | 172 | /*L:050 |
90 | * number to /dev/lguest. */ | 173 | * Sending an interrupt is done by writing LHREQ_IRQ and an interrupt |
174 | * number to /dev/lguest. | ||
175 | */ | ||
91 | static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) | 176 | static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) |
92 | { | 177 | { |
93 | unsigned long irq; | 178 | unsigned long irq; |
@@ -97,12 +182,18 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) | |||
97 | if (irq >= LGUEST_IRQS) | 182 | if (irq >= LGUEST_IRQS) |
98 | return -EINVAL; | 183 | return -EINVAL; |
99 | 184 | ||
185 | /* | ||
186 | * Next time the Guest runs, the core code will see if it can deliver | ||
187 | * this interrupt. | ||
188 | */ | ||
100 | set_interrupt(cpu, irq); | 189 | set_interrupt(cpu, irq); |
101 | return 0; | 190 | return 0; |
102 | } | 191 | } |
103 | 192 | ||
104 | /*L:040 Once our Guest is initialized, the Launcher makes it run by reading | 193 | /*L:040 |
105 | * from /dev/lguest. */ | 194 | * Once our Guest is initialized, the Launcher makes it run by reading |
195 | * from /dev/lguest. | ||
196 | */ | ||
106 | static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) | 197 | static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) |
107 | { | 198 | { |
108 | struct lguest *lg = file->private_data; | 199 | struct lguest *lg = file->private_data; |
@@ -138,8 +229,10 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) | |||
138 | return len; | 229 | return len; |
139 | } | 230 | } |
140 | 231 | ||
141 | /* If we returned from read() last time because the Guest sent I/O, | 232 | /* |
142 | * clear the flag. */ | 233 | * If we returned from read() last time because the Guest sent I/O, |
234 | * clear the flag. | ||
235 | */ | ||
143 | if (cpu->pending_notify) | 236 | if (cpu->pending_notify) |
144 | cpu->pending_notify = 0; | 237 | cpu->pending_notify = 0; |
145 | 238 | ||
@@ -147,8 +240,10 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) | |||
147 | return run_guest(cpu, (unsigned long __user *)user); | 240 | return run_guest(cpu, (unsigned long __user *)user); |
148 | } | 241 | } |
149 | 242 | ||
150 | /*L:025 This actually initializes a CPU. For the moment, a Guest is only | 243 | /*L:025 |
151 | * uniprocessor, so "id" is always 0. */ | 244 | * This actually initializes a CPU. For the moment, a Guest is only |
245 | * uniprocessor, so "id" is always 0. | ||
246 | */ | ||
152 | static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) | 247 | static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) |
153 | { | 248 | { |
154 | /* We have a limited number the number of CPUs in the lguest struct. */ | 249 | /* We have a limited number the number of CPUs in the lguest struct. */ |
@@ -163,8 +258,10 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) | |||
163 | /* Each CPU has a timer it can set. */ | 258 | /* Each CPU has a timer it can set. */ |
164 | init_clockdev(cpu); | 259 | init_clockdev(cpu); |
165 | 260 | ||
166 | /* We need a complete page for the Guest registers: they are accessible | 261 | /* |
167 | * to the Guest and we can only grant it access to whole pages. */ | 262 | * We need a complete page for the Guest registers: they are accessible |
263 | * to the Guest and we can only grant it access to whole pages. | ||
264 | */ | ||
168 | cpu->regs_page = get_zeroed_page(GFP_KERNEL); | 265 | cpu->regs_page = get_zeroed_page(GFP_KERNEL); |
169 | if (!cpu->regs_page) | 266 | if (!cpu->regs_page) |
170 | return -ENOMEM; | 267 | return -ENOMEM; |
@@ -172,29 +269,38 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) | |||
172 | /* We actually put the registers at the bottom of the page. */ | 269 | /* We actually put the registers at the bottom of the page. */ |
173 | cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs); | 270 | cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs); |
174 | 271 | ||
175 | /* Now we initialize the Guest's registers, handing it the start | 272 | /* |
176 | * address. */ | 273 | * Now we initialize the Guest's registers, handing it the start |
274 | * address. | ||
275 | */ | ||
177 | lguest_arch_setup_regs(cpu, start_ip); | 276 | lguest_arch_setup_regs(cpu, start_ip); |
178 | 277 | ||
179 | /* We keep a pointer to the Launcher task (ie. current task) for when | 278 | /* |
180 | * other Guests want to wake this one (eg. console input). */ | 279 | * We keep a pointer to the Launcher task (ie. current task) for when |
280 | * other Guests want to wake this one (eg. console input). | ||
281 | */ | ||
181 | cpu->tsk = current; | 282 | cpu->tsk = current; |
182 | 283 | ||
183 | /* We need to keep a pointer to the Launcher's memory map, because if | 284 | /* |
285 | * We need to keep a pointer to the Launcher's memory map, because if | ||
184 | * the Launcher dies we need to clean it up. If we don't keep a | 286 | * the Launcher dies we need to clean it up. If we don't keep a |
185 | * reference, it is destroyed before close() is called. */ | 287 | * reference, it is destroyed before close() is called. |
288 | */ | ||
186 | cpu->mm = get_task_mm(cpu->tsk); | 289 | cpu->mm = get_task_mm(cpu->tsk); |
187 | 290 | ||
188 | /* We remember which CPU's pages this Guest used last, for optimization | 291 | /* |
189 | * when the same Guest runs on the same CPU twice. */ | 292 | * We remember which CPU's pages this Guest used last, for optimization |
293 | * when the same Guest runs on the same CPU twice. | ||
294 | */ | ||
190 | cpu->last_pages = NULL; | 295 | cpu->last_pages = NULL; |
191 | 296 | ||
192 | /* No error == success. */ | 297 | /* No error == success. */ |
193 | return 0; | 298 | return 0; |
194 | } | 299 | } |
195 | 300 | ||
196 | /*L:020 The initialization write supplies 3 pointer sized (32 or 64 bit) | 301 | /*L:020 |
197 | * values (in addition to the LHREQ_INITIALIZE value). These are: | 302 | * The initialization write supplies 3 pointer sized (32 or 64 bit) values (in |
303 | * addition to the LHREQ_INITIALIZE value). These are: | ||
198 | * | 304 | * |
199 | * base: The start of the Guest-physical memory inside the Launcher memory. | 305 | * base: The start of the Guest-physical memory inside the Launcher memory. |
200 | * | 306 | * |
@@ -206,14 +312,15 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) | |||
206 | */ | 312 | */ |
207 | static int initialize(struct file *file, const unsigned long __user *input) | 313 | static int initialize(struct file *file, const unsigned long __user *input) |
208 | { | 314 | { |
209 | /* "struct lguest" contains everything we (the Host) know about a | 315 | /* "struct lguest" contains all we (the Host) know about a Guest. */ |
210 | * Guest. */ | ||
211 | struct lguest *lg; | 316 | struct lguest *lg; |
212 | int err; | 317 | int err; |
213 | unsigned long args[3]; | 318 | unsigned long args[3]; |
214 | 319 | ||
215 | /* We grab the Big Lguest lock, which protects against multiple | 320 | /* |
216 | * simultaneous initializations. */ | 321 | * We grab the Big Lguest lock, which protects against multiple |
322 | * simultaneous initializations. | ||
323 | */ | ||
217 | mutex_lock(&lguest_lock); | 324 | mutex_lock(&lguest_lock); |
218 | /* You can't initialize twice! Close the device and start again... */ | 325 | /* You can't initialize twice! Close the device and start again... */ |
219 | if (file->private_data) { | 326 | if (file->private_data) { |
@@ -248,8 +355,10 @@ static int initialize(struct file *file, const unsigned long __user *input) | |||
248 | if (err) | 355 | if (err) |
249 | goto free_eventfds; | 356 | goto free_eventfds; |
250 | 357 | ||
251 | /* Initialize the Guest's shadow page tables, using the toplevel | 358 | /* |
252 | * address the Launcher gave us. This allocates memory, so can fail. */ | 359 | * Initialize the Guest's shadow page tables, using the toplevel |
360 | * address the Launcher gave us. This allocates memory, so can fail. | ||
361 | */ | ||
253 | err = init_guest_pagetable(lg); | 362 | err = init_guest_pagetable(lg); |
254 | if (err) | 363 | if (err) |
255 | goto free_regs; | 364 | goto free_regs; |
@@ -274,20 +383,24 @@ unlock: | |||
274 | return err; | 383 | return err; |
275 | } | 384 | } |
276 | 385 | ||
277 | /*L:010 The first operation the Launcher does must be a write. All writes | 386 | /*L:010 |
387 | * The first operation the Launcher does must be a write. All writes | ||
278 | * start with an unsigned long number: for the first write this must be | 388 | * start with an unsigned long number: for the first write this must be |
279 | * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use | 389 | * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use |
280 | * writes of other values to send interrupts. | 390 | * writes of other values to send interrupts or set up receipt of notifications. |
281 | * | 391 | * |
282 | * Note that we overload the "offset" in the /dev/lguest file to indicate what | 392 | * Note that we overload the "offset" in the /dev/lguest file to indicate what |
283 | * CPU number we're dealing with. Currently this is always 0, since we only | 393 | * CPU number we're dealing with. Currently this is always 0 since we only |
284 | * support uniprocessor Guests, but you can see the beginnings of SMP support | 394 | * support uniprocessor Guests, but you can see the beginnings of SMP support |
285 | * here. */ | 395 | * here. |
396 | */ | ||
286 | static ssize_t write(struct file *file, const char __user *in, | 397 | static ssize_t write(struct file *file, const char __user *in, |
287 | size_t size, loff_t *off) | 398 | size_t size, loff_t *off) |
288 | { | 399 | { |
289 | /* Once the Guest is initialized, we hold the "struct lguest" in the | 400 | /* |
290 | * file private data. */ | 401 | * Once the Guest is initialized, we hold the "struct lguest" in the |
402 | * file private data. | ||
403 | */ | ||
291 | struct lguest *lg = file->private_data; | 404 | struct lguest *lg = file->private_data; |
292 | const unsigned long __user *input = (const unsigned long __user *)in; | 405 | const unsigned long __user *input = (const unsigned long __user *)in; |
293 | unsigned long req; | 406 | unsigned long req; |
@@ -322,13 +435,15 @@ static ssize_t write(struct file *file, const char __user *in, | |||
322 | } | 435 | } |
323 | } | 436 | } |
324 | 437 | ||
325 | /*L:060 The final piece of interface code is the close() routine. It reverses | 438 | /*L:060 |
439 | * The final piece of interface code is the close() routine. It reverses | ||
326 | * everything done in initialize(). This is usually called because the | 440 | * everything done in initialize(). This is usually called because the |
327 | * Launcher exited. | 441 | * Launcher exited. |
328 | * | 442 | * |
329 | * Note that the close routine returns 0 or a negative error number: it can't | 443 | * Note that the close routine returns 0 or a negative error number: it can't |
330 | * really fail, but it can whine. I blame Sun for this wart, and K&R C for | 444 | * really fail, but it can whine. I blame Sun for this wart, and K&R C for |
331 | * letting them do it. :*/ | 445 | * letting them do it. |
446 | :*/ | ||
332 | static int close(struct inode *inode, struct file *file) | 447 | static int close(struct inode *inode, struct file *file) |
333 | { | 448 | { |
334 | struct lguest *lg = file->private_data; | 449 | struct lguest *lg = file->private_data; |
@@ -338,8 +453,10 @@ static int close(struct inode *inode, struct file *file) | |||
338 | if (!lg) | 453 | if (!lg) |
339 | return 0; | 454 | return 0; |
340 | 455 | ||
341 | /* We need the big lock, to protect from inter-guest I/O and other | 456 | /* |
342 | * Launchers initializing guests. */ | 457 | * We need the big lock, to protect from inter-guest I/O and other |
458 | * Launchers initializing guests. | ||
459 | */ | ||
343 | mutex_lock(&lguest_lock); | 460 | mutex_lock(&lguest_lock); |
344 | 461 | ||
345 | /* Free up the shadow page tables for the Guest. */ | 462 | /* Free up the shadow page tables for the Guest. */ |
@@ -350,8 +467,10 @@ static int close(struct inode *inode, struct file *file) | |||
350 | hrtimer_cancel(&lg->cpus[i].hrt); | 467 | hrtimer_cancel(&lg->cpus[i].hrt); |
351 | /* We can free up the register page we allocated. */ | 468 | /* We can free up the register page we allocated. */ |
352 | free_page(lg->cpus[i].regs_page); | 469 | free_page(lg->cpus[i].regs_page); |
353 | /* Now all the memory cleanups are done, it's safe to release | 470 | /* |
354 | * the Launcher's memory management structure. */ | 471 | * Now all the memory cleanups are done, it's safe to release |
472 | * the Launcher's memory management structure. | ||
473 | */ | ||
355 | mmput(lg->cpus[i].mm); | 474 | mmput(lg->cpus[i].mm); |
356 | } | 475 | } |
357 | 476 | ||
@@ -360,8 +479,10 @@ static int close(struct inode *inode, struct file *file) | |||
360 | eventfd_ctx_put(lg->eventfds->map[i].event); | 479 | eventfd_ctx_put(lg->eventfds->map[i].event); |
361 | kfree(lg->eventfds); | 480 | kfree(lg->eventfds); |
362 | 481 | ||
363 | /* If lg->dead doesn't contain an error code it will be NULL or a | 482 | /* |
364 | * kmalloc()ed string, either of which is ok to hand to kfree(). */ | 483 | * If lg->dead doesn't contain an error code it will be NULL or a |
484 | * kmalloc()ed string, either of which is ok to hand to kfree(). | ||
485 | */ | ||
365 | if (!IS_ERR(lg->dead)) | 486 | if (!IS_ERR(lg->dead)) |
366 | kfree(lg->dead); | 487 | kfree(lg->dead); |
367 | /* Free the memory allocated to the lguest_struct */ | 488 | /* Free the memory allocated to the lguest_struct */ |
@@ -385,7 +506,8 @@ static int close(struct inode *inode, struct file *file) | |||
385 | * | 506 | * |
386 | * We begin our understanding with the Host kernel interface which the Launcher | 507 | * We begin our understanding with the Host kernel interface which the Launcher |
387 | * uses: reading and writing a character device called /dev/lguest. All the | 508 | * uses: reading and writing a character device called /dev/lguest. All the |
388 | * work happens in the read(), write() and close() routines: */ | 509 | * work happens in the read(), write() and close() routines: |
510 | */ | ||
389 | static struct file_operations lguest_fops = { | 511 | static struct file_operations lguest_fops = { |
390 | .owner = THIS_MODULE, | 512 | .owner = THIS_MODULE, |
391 | .release = close, | 513 | .release = close, |
@@ -393,8 +515,10 @@ static struct file_operations lguest_fops = { | |||
393 | .read = read, | 515 | .read = read, |
394 | }; | 516 | }; |
395 | 517 | ||
396 | /* This is a textbook example of a "misc" character device. Populate a "struct | 518 | /* |
397 | * miscdevice" and register it with misc_register(). */ | 519 | * This is a textbook example of a "misc" character device. Populate a "struct |
520 | * miscdevice" and register it with misc_register(). | ||
521 | */ | ||
398 | static struct miscdevice lguest_dev = { | 522 | static struct miscdevice lguest_dev = { |
399 | .minor = MISC_DYNAMIC_MINOR, | 523 | .minor = MISC_DYNAMIC_MINOR, |
400 | .name = "lguest", | 524 | .name = "lguest", |
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index a6fe1abda240..a8d0aee3bc0e 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c | |||
@@ -1,9 +1,11 @@ | |||
1 | /*P:700 The pagetable code, on the other hand, still shows the scars of | 1 | /*P:700 |
2 | * The pagetable code, on the other hand, still shows the scars of | ||
2 | * previous encounters. It's functional, and as neat as it can be in the | 3 | * previous encounters. It's functional, and as neat as it can be in the |
3 | * circumstances, but be wary, for these things are subtle and break easily. | 4 | * circumstances, but be wary, for these things are subtle and break easily. |
4 | * The Guest provides a virtual to physical mapping, but we can neither trust | 5 | * The Guest provides a virtual to physical mapping, but we can neither trust |
5 | * it nor use it: we verify and convert it here then point the CPU to the | 6 | * it nor use it: we verify and convert it here then point the CPU to the |
6 | * converted Guest pages when running the Guest. :*/ | 7 | * converted Guest pages when running the Guest. |
8 | :*/ | ||
7 | 9 | ||
8 | /* Copyright (C) Rusty Russell IBM Corporation 2006. | 10 | /* Copyright (C) Rusty Russell IBM Corporation 2006. |
9 | * GPL v2 and any later version */ | 11 | * GPL v2 and any later version */ |
@@ -17,18 +19,20 @@ | |||
17 | #include <asm/bootparam.h> | 19 | #include <asm/bootparam.h> |
18 | #include "lg.h" | 20 | #include "lg.h" |
19 | 21 | ||
20 | /*M:008 We hold reference to pages, which prevents them from being swapped. | 22 | /*M:008 |
23 | * We hold reference to pages, which prevents them from being swapped. | ||
21 | * It'd be nice to have a callback in the "struct mm_struct" when Linux wants | 24 | * It'd be nice to have a callback in the "struct mm_struct" when Linux wants |
22 | * to swap out. If we had this, and a shrinker callback to trim PTE pages, we | 25 | * to swap out. If we had this, and a shrinker callback to trim PTE pages, we |
23 | * could probably consider launching Guests as non-root. :*/ | 26 | * could probably consider launching Guests as non-root. |
27 | :*/ | ||
24 | 28 | ||
25 | /*H:300 | 29 | /*H:300 |
26 | * The Page Table Code | 30 | * The Page Table Code |
27 | * | 31 | * |
28 | * We use two-level page tables for the Guest. If you're not entirely | 32 | * We use two-level page tables for the Guest, or three-level with PAE. If |
29 | * comfortable with virtual addresses, physical addresses and page tables then | 33 | * you're not entirely comfortable with virtual addresses, physical addresses |
30 | * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with | 34 | * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page |
31 | * diagrams!). | 35 | * Table Handling" (with diagrams!). |
32 | * | 36 | * |
33 | * The Guest keeps page tables, but we maintain the actual ones here: these are | 37 | * The Guest keeps page tables, but we maintain the actual ones here: these are |
34 | * called "shadow" page tables. Which is a very Guest-centric name: these are | 38 | * called "shadow" page tables. Which is a very Guest-centric name: these are |
@@ -45,16 +49,18 @@ | |||
45 | * (v) Flushing (throwing away) page tables, | 49 | * (v) Flushing (throwing away) page tables, |
46 | * (vi) Mapping the Switcher when the Guest is about to run, | 50 | * (vi) Mapping the Switcher when the Guest is about to run, |
47 | * (vii) Setting up the page tables initially. | 51 | * (vii) Setting up the page tables initially. |
48 | :*/ | 52 | :*/ |
49 | 53 | ||
50 | 54 | /* | |
51 | /* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is | 55 | * The Switcher uses the complete top PTE page. That's 1024 PTE entries (4MB) |
52 | * conveniently placed at the top 4MB, so it uses a separate, complete PTE | 56 | * or 512 PTE entries with PAE (2MB). |
53 | * page. */ | 57 | */ |
54 | #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) | 58 | #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) |
55 | 59 | ||
56 | /* For PAE we need the PMD index as well. We use the last 2MB, so we | 60 | /* |
57 | * will need the last pmd entry of the last pmd page. */ | 61 | * For PAE we need the PMD index as well. We use the last 2MB, so we |
62 | * will need the last pmd entry of the last pmd page. | ||
63 | */ | ||
58 | #ifdef CONFIG_X86_PAE | 64 | #ifdef CONFIG_X86_PAE |
59 | #define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) | 65 | #define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) |
60 | #define RESERVE_MEM 2U | 66 | #define RESERVE_MEM 2U |
@@ -64,14 +70,18 @@ | |||
64 | #define CHECK_GPGD_MASK _PAGE_TABLE | 70 | #define CHECK_GPGD_MASK _PAGE_TABLE |
65 | #endif | 71 | #endif |
66 | 72 | ||
67 | /* We actually need a separate PTE page for each CPU. Remember that after the | 73 | /* |
74 | * We actually need a separate PTE page for each CPU. Remember that after the | ||
68 | * Switcher code itself comes two pages for each CPU, and we don't want this | 75 | * Switcher code itself comes two pages for each CPU, and we don't want this |
69 | * CPU's guest to see the pages of any other CPU. */ | 76 | * CPU's guest to see the pages of any other CPU. |
77 | */ | ||
70 | static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); | 78 | static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); |
71 | #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) | 79 | #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) |
72 | 80 | ||
73 | /*H:320 The page table code is curly enough to need helper functions to keep it | 81 | /*H:320 |
74 | * clear and clean. | 82 | * The page table code is curly enough to need helper functions to keep it |
83 | * clear and clean. The kernel itself provides many of them; one advantage | ||
84 | * of insisting that the Guest and Host use the same CONFIG_PAE setting. | ||
75 | * | 85 | * |
76 | * There are two functions which return pointers to the shadow (aka "real") | 86 | * There are two functions which return pointers to the shadow (aka "real") |
77 | * page tables. | 87 | * page tables. |
@@ -79,7 +89,8 @@ static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); | |||
79 | * spgd_addr() takes the virtual address and returns a pointer to the top-level | 89 | * spgd_addr() takes the virtual address and returns a pointer to the top-level |
80 | * page directory entry (PGD) for that address. Since we keep track of several | 90 | * page directory entry (PGD) for that address. Since we keep track of several |
81 | * page tables, the "i" argument tells us which one we're interested in (it's | 91 | * page tables, the "i" argument tells us which one we're interested in (it's |
82 | * usually the current one). */ | 92 | * usually the current one). |
93 | */ | ||
83 | static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) | 94 | static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) |
84 | { | 95 | { |
85 | unsigned int index = pgd_index(vaddr); | 96 | unsigned int index = pgd_index(vaddr); |
@@ -96,9 +107,11 @@ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) | |||
96 | } | 107 | } |
97 | 108 | ||
98 | #ifdef CONFIG_X86_PAE | 109 | #ifdef CONFIG_X86_PAE |
99 | /* This routine then takes the PGD entry given above, which contains the | 110 | /* |
111 | * This routine then takes the PGD entry given above, which contains the | ||
100 | * address of the PMD page. It then returns a pointer to the PMD entry for the | 112 | * address of the PMD page. It then returns a pointer to the PMD entry for the |
101 | * given address. */ | 113 | * given address. |
114 | */ | ||
102 | static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) | 115 | static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) |
103 | { | 116 | { |
104 | unsigned int index = pmd_index(vaddr); | 117 | unsigned int index = pmd_index(vaddr); |
@@ -119,9 +132,11 @@ static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) | |||
119 | } | 132 | } |
120 | #endif | 133 | #endif |
121 | 134 | ||
122 | /* This routine then takes the page directory entry returned above, which | 135 | /* |
136 | * This routine then takes the page directory entry returned above, which | ||
123 | * contains the address of the page table entry (PTE) page. It then returns a | 137 | * contains the address of the page table entry (PTE) page. It then returns a |
124 | * pointer to the PTE entry for the given address. */ | 138 | * pointer to the PTE entry for the given address. |
139 | */ | ||
125 | static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) | 140 | static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) |
126 | { | 141 | { |
127 | #ifdef CONFIG_X86_PAE | 142 | #ifdef CONFIG_X86_PAE |
@@ -139,8 +154,10 @@ static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) | |||
139 | return &page[pte_index(vaddr)]; | 154 | return &page[pte_index(vaddr)]; |
140 | } | 155 | } |
141 | 156 | ||
142 | /* These two functions just like the above two, except they access the Guest | 157 | /* |
143 | * page tables. Hence they return a Guest address. */ | 158 | * These functions are just like the above two, except they access the Guest |
159 | * page tables. Hence they return a Guest address. | ||
160 | */ | ||
144 | static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) | 161 | static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) |
145 | { | 162 | { |
146 | unsigned int index = vaddr >> (PGDIR_SHIFT); | 163 | unsigned int index = vaddr >> (PGDIR_SHIFT); |
@@ -148,6 +165,7 @@ static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) | |||
148 | } | 165 | } |
149 | 166 | ||
150 | #ifdef CONFIG_X86_PAE | 167 | #ifdef CONFIG_X86_PAE |
168 | /* Follow the PGD to the PMD. */ | ||
151 | static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) | 169 | static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) |
152 | { | 170 | { |
153 | unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; | 171 | unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; |
@@ -155,6 +173,7 @@ static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) | |||
155 | return gpage + pmd_index(vaddr) * sizeof(pmd_t); | 173 | return gpage + pmd_index(vaddr) * sizeof(pmd_t); |
156 | } | 174 | } |
157 | 175 | ||
176 | /* Follow the PMD to the PTE. */ | ||
158 | static unsigned long gpte_addr(struct lg_cpu *cpu, | 177 | static unsigned long gpte_addr(struct lg_cpu *cpu, |
159 | pmd_t gpmd, unsigned long vaddr) | 178 | pmd_t gpmd, unsigned long vaddr) |
160 | { | 179 | { |
@@ -164,6 +183,7 @@ static unsigned long gpte_addr(struct lg_cpu *cpu, | |||
164 | return gpage + pte_index(vaddr) * sizeof(pte_t); | 183 | return gpage + pte_index(vaddr) * sizeof(pte_t); |
165 | } | 184 | } |
166 | #else | 185 | #else |
186 | /* Follow the PGD to the PTE (no mid-level for !PAE). */ | ||
167 | static unsigned long gpte_addr(struct lg_cpu *cpu, | 187 | static unsigned long gpte_addr(struct lg_cpu *cpu, |
168 | pgd_t gpgd, unsigned long vaddr) | 188 | pgd_t gpgd, unsigned long vaddr) |
169 | { | 189 | { |
@@ -175,17 +195,21 @@ static unsigned long gpte_addr(struct lg_cpu *cpu, | |||
175 | #endif | 195 | #endif |
176 | /*:*/ | 196 | /*:*/ |
177 | 197 | ||
178 | /*M:014 get_pfn is slow: we could probably try to grab batches of pages here as | 198 | /*M:014 |
179 | * an optimization (ie. pre-faulting). :*/ | 199 | * get_pfn is slow: we could probably try to grab batches of pages here as |
200 | * an optimization (ie. pre-faulting). | ||
201 | :*/ | ||
180 | 202 | ||
181 | /*H:350 This routine takes a page number given by the Guest and converts it to | 203 | /*H:350 |
204 | * This routine takes a page number given by the Guest and converts it to | ||
182 | * an actual, physical page number. It can fail for several reasons: the | 205 | * an actual, physical page number. It can fail for several reasons: the |
183 | * virtual address might not be mapped by the Launcher, the write flag is set | 206 | * virtual address might not be mapped by the Launcher, the write flag is set |
184 | * and the page is read-only, or the write flag was set and the page was | 207 | * and the page is read-only, or the write flag was set and the page was |
185 | * shared so had to be copied, but we ran out of memory. | 208 | * shared so had to be copied, but we ran out of memory. |
186 | * | 209 | * |
187 | * This holds a reference to the page, so release_pte() is careful to put that | 210 | * This holds a reference to the page, so release_pte() is careful to put that |
188 | * back. */ | 211 | * back. |
212 | */ | ||
189 | static unsigned long get_pfn(unsigned long virtpfn, int write) | 213 | static unsigned long get_pfn(unsigned long virtpfn, int write) |
190 | { | 214 | { |
191 | struct page *page; | 215 | struct page *page; |
@@ -198,33 +222,41 @@ static unsigned long get_pfn(unsigned long virtpfn, int write) | |||
198 | return -1UL; | 222 | return -1UL; |
199 | } | 223 | } |
200 | 224 | ||
201 | /*H:340 Converting a Guest page table entry to a shadow (ie. real) page table | 225 | /*H:340 |
226 | * Converting a Guest page table entry to a shadow (ie. real) page table | ||
202 | * entry can be a little tricky. The flags are (almost) the same, but the | 227 | * entry can be a little tricky. The flags are (almost) the same, but the |
203 | * Guest PTE contains a virtual page number: the CPU needs the real page | 228 | * Guest PTE contains a virtual page number: the CPU needs the real page |
204 | * number. */ | 229 | * number. |
230 | */ | ||
205 | static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) | 231 | static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) |
206 | { | 232 | { |
207 | unsigned long pfn, base, flags; | 233 | unsigned long pfn, base, flags; |
208 | 234 | ||
209 | /* The Guest sets the global flag, because it thinks that it is using | 235 | /* |
236 | * The Guest sets the global flag, because it thinks that it is using | ||
210 | * PGE. We only told it to use PGE so it would tell us whether it was | 237 | * PGE. We only told it to use PGE so it would tell us whether it was |
211 | * flushing a kernel mapping or a userspace mapping. We don't actually | 238 | * flushing a kernel mapping or a userspace mapping. We don't actually |
212 | * use the global bit, so throw it away. */ | 239 | * use the global bit, so throw it away. |
240 | */ | ||
213 | flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); | 241 | flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); |
214 | 242 | ||
215 | /* The Guest's pages are offset inside the Launcher. */ | 243 | /* The Guest's pages are offset inside the Launcher. */ |
216 | base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; | 244 | base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; |
217 | 245 | ||
218 | /* We need a temporary "unsigned long" variable to hold the answer from | 246 | /* |
247 | * We need a temporary "unsigned long" variable to hold the answer from | ||
219 | * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't | 248 | * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't |
220 | * fit in spte.pfn. get_pfn() finds the real physical number of the | 249 | * fit in spte.pfn. get_pfn() finds the real physical number of the |
221 | * page, given the virtual number. */ | 250 | * page, given the virtual number. |
251 | */ | ||
222 | pfn = get_pfn(base + pte_pfn(gpte), write); | 252 | pfn = get_pfn(base + pte_pfn(gpte), write); |
223 | if (pfn == -1UL) { | 253 | if (pfn == -1UL) { |
224 | kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); | 254 | kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); |
225 | /* When we destroy the Guest, we'll go through the shadow page | 255 | /* |
256 | * When we destroy the Guest, we'll go through the shadow page | ||
226 | * tables and release_pte() them. Make sure we don't think | 257 | * tables and release_pte() them. Make sure we don't think |
227 | * this one is valid! */ | 258 | * this one is valid! |
259 | */ | ||
228 | flags = 0; | 260 | flags = 0; |
229 | } | 261 | } |
230 | /* Now we assemble our shadow PTE from the page number and flags. */ | 262 | /* Now we assemble our shadow PTE from the page number and flags. */ |
@@ -234,8 +266,10 @@ static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) | |||
234 | /*H:460 And to complete the chain, release_pte() looks like this: */ | 266 | /*H:460 And to complete the chain, release_pte() looks like this: */ |
235 | static void release_pte(pte_t pte) | 267 | static void release_pte(pte_t pte) |
236 | { | 268 | { |
237 | /* Remember that get_user_pages_fast() took a reference to the page, in | 269 | /* |
238 | * get_pfn()? We have to put it back now. */ | 270 | * Remember that get_user_pages_fast() took a reference to the page, in |
271 | * get_pfn()? We have to put it back now. | ||
272 | */ | ||
239 | if (pte_flags(pte) & _PAGE_PRESENT) | 273 | if (pte_flags(pte) & _PAGE_PRESENT) |
240 | put_page(pte_page(pte)); | 274 | put_page(pte_page(pte)); |
241 | } | 275 | } |
@@ -273,7 +307,8 @@ static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd) | |||
273 | * and return to the Guest without it knowing. | 307 | * and return to the Guest without it knowing. |
274 | * | 308 | * |
275 | * If we fixed up the fault (ie. we mapped the address), this routine returns | 309 | * If we fixed up the fault (ie. we mapped the address), this routine returns |
276 | * true. Otherwise, it was a real fault and we need to tell the Guest. */ | 310 | * true. Otherwise, it was a real fault and we need to tell the Guest. |
311 | */ | ||
277 | bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | 312 | bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) |
278 | { | 313 | { |
279 | pgd_t gpgd; | 314 | pgd_t gpgd; |
@@ -282,6 +317,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |||
282 | pte_t gpte; | 317 | pte_t gpte; |
283 | pte_t *spte; | 318 | pte_t *spte; |
284 | 319 | ||
320 | /* Mid level for PAE. */ | ||
285 | #ifdef CONFIG_X86_PAE | 321 | #ifdef CONFIG_X86_PAE |
286 | pmd_t *spmd; | 322 | pmd_t *spmd; |
287 | pmd_t gpmd; | 323 | pmd_t gpmd; |
@@ -298,22 +334,26 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |||
298 | if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) { | 334 | if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) { |
299 | /* No shadow entry: allocate a new shadow PTE page. */ | 335 | /* No shadow entry: allocate a new shadow PTE page. */ |
300 | unsigned long ptepage = get_zeroed_page(GFP_KERNEL); | 336 | unsigned long ptepage = get_zeroed_page(GFP_KERNEL); |
301 | /* This is not really the Guest's fault, but killing it is | 337 | /* |
302 | * simple for this corner case. */ | 338 | * This is not really the Guest's fault, but killing it is |
339 | * simple for this corner case. | ||
340 | */ | ||
303 | if (!ptepage) { | 341 | if (!ptepage) { |
304 | kill_guest(cpu, "out of memory allocating pte page"); | 342 | kill_guest(cpu, "out of memory allocating pte page"); |
305 | return false; | 343 | return false; |
306 | } | 344 | } |
307 | /* We check that the Guest pgd is OK. */ | 345 | /* We check that the Guest pgd is OK. */ |
308 | check_gpgd(cpu, gpgd); | 346 | check_gpgd(cpu, gpgd); |
309 | /* And we copy the flags to the shadow PGD entry. The page | 347 | /* |
310 | * number in the shadow PGD is the page we just allocated. */ | 348 | * And we copy the flags to the shadow PGD entry. The page |
349 | * number in the shadow PGD is the page we just allocated. | ||
350 | */ | ||
311 | set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd))); | 351 | set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd))); |
312 | } | 352 | } |
313 | 353 | ||
314 | #ifdef CONFIG_X86_PAE | 354 | #ifdef CONFIG_X86_PAE |
315 | gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); | 355 | gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); |
316 | /* middle level not present? We can't map it in. */ | 356 | /* Middle level not present? We can't map it in. */ |
317 | if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) | 357 | if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) |
318 | return false; | 358 | return false; |
319 | 359 | ||
@@ -324,8 +364,10 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |||
324 | /* No shadow entry: allocate a new shadow PTE page. */ | 364 | /* No shadow entry: allocate a new shadow PTE page. */ |
325 | unsigned long ptepage = get_zeroed_page(GFP_KERNEL); | 365 | unsigned long ptepage = get_zeroed_page(GFP_KERNEL); |
326 | 366 | ||
327 | /* This is not really the Guest's fault, but killing it is | 367 | /* |
328 | * simple for this corner case. */ | 368 | * This is not really the Guest's fault, but killing it is |
369 | * simple for this corner case. | ||
370 | */ | ||
329 | if (!ptepage) { | 371 | if (!ptepage) { |
330 | kill_guest(cpu, "out of memory allocating pte page"); | 372 | kill_guest(cpu, "out of memory allocating pte page"); |
331 | return false; | 373 | return false; |
@@ -334,27 +376,37 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |||
334 | /* We check that the Guest pmd is OK. */ | 376 | /* We check that the Guest pmd is OK. */ |
335 | check_gpmd(cpu, gpmd); | 377 | check_gpmd(cpu, gpmd); |
336 | 378 | ||
337 | /* And we copy the flags to the shadow PMD entry. The page | 379 | /* |
338 | * number in the shadow PMD is the page we just allocated. */ | 380 | * And we copy the flags to the shadow PMD entry. The page |
381 | * number in the shadow PMD is the page we just allocated. | ||
382 | */ | ||
339 | native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); | 383 | native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); |
340 | } | 384 | } |
341 | 385 | ||
342 | /* OK, now we look at the lower level in the Guest page table: keep its | 386 | /* |
343 | * address, because we might update it later. */ | 387 | * OK, now we look at the lower level in the Guest page table: keep its |
388 | * address, because we might update it later. | ||
389 | */ | ||
344 | gpte_ptr = gpte_addr(cpu, gpmd, vaddr); | 390 | gpte_ptr = gpte_addr(cpu, gpmd, vaddr); |
345 | #else | 391 | #else |
346 | /* OK, now we look at the lower level in the Guest page table: keep its | 392 | /* |
347 | * address, because we might update it later. */ | 393 | * OK, now we look at the lower level in the Guest page table: keep its |
394 | * address, because we might update it later. | ||
395 | */ | ||
348 | gpte_ptr = gpte_addr(cpu, gpgd, vaddr); | 396 | gpte_ptr = gpte_addr(cpu, gpgd, vaddr); |
349 | #endif | 397 | #endif |
398 | |||
399 | /* Read the actual PTE value. */ | ||
350 | gpte = lgread(cpu, gpte_ptr, pte_t); | 400 | gpte = lgread(cpu, gpte_ptr, pte_t); |
351 | 401 | ||
352 | /* If this page isn't in the Guest page tables, we can't page it in. */ | 402 | /* If this page isn't in the Guest page tables, we can't page it in. */ |
353 | if (!(pte_flags(gpte) & _PAGE_PRESENT)) | 403 | if (!(pte_flags(gpte) & _PAGE_PRESENT)) |
354 | return false; | 404 | return false; |
355 | 405 | ||
356 | /* Check they're not trying to write to a page the Guest wants | 406 | /* |
357 | * read-only (bit 2 of errcode == write). */ | 407 | * Check they're not trying to write to a page the Guest wants |
408 | * read-only (bit 2 of errcode == write). | ||
409 | */ | ||
358 | if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) | 410 | if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) |
359 | return false; | 411 | return false; |
360 | 412 | ||
@@ -362,8 +414,10 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |||
362 | if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) | 414 | if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) |
363 | return false; | 415 | return false; |
364 | 416 | ||
365 | /* Check that the Guest PTE flags are OK, and the page number is below | 417 | /* |
366 | * the pfn_limit (ie. not mapping the Launcher binary). */ | 418 | * Check that the Guest PTE flags are OK, and the page number is below |
419 | * the pfn_limit (ie. not mapping the Launcher binary). | ||
420 | */ | ||
367 | check_gpte(cpu, gpte); | 421 | check_gpte(cpu, gpte); |
368 | 422 | ||
369 | /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ | 423 | /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ |
@@ -373,29 +427,40 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |||
373 | 427 | ||
374 | /* Get the pointer to the shadow PTE entry we're going to set. */ | 428 | /* Get the pointer to the shadow PTE entry we're going to set. */ |
375 | spte = spte_addr(cpu, *spgd, vaddr); | 429 | spte = spte_addr(cpu, *spgd, vaddr); |
376 | /* If there was a valid shadow PTE entry here before, we release it. | 430 | |
377 | * This can happen with a write to a previously read-only entry. */ | 431 | /* |
432 | * If there was a valid shadow PTE entry here before, we release it. | ||
433 | * This can happen with a write to a previously read-only entry. | ||
434 | */ | ||
378 | release_pte(*spte); | 435 | release_pte(*spte); |
379 | 436 | ||
380 | /* If this is a write, we insist that the Guest page is writable (the | 437 | /* |
381 | * final arg to gpte_to_spte()). */ | 438 | * If this is a write, we insist that the Guest page is writable (the |
439 | * final arg to gpte_to_spte()). | ||
440 | */ | ||
382 | if (pte_dirty(gpte)) | 441 | if (pte_dirty(gpte)) |
383 | *spte = gpte_to_spte(cpu, gpte, 1); | 442 | *spte = gpte_to_spte(cpu, gpte, 1); |
384 | else | 443 | else |
385 | /* If this is a read, don't set the "writable" bit in the page | 444 | /* |
445 | * If this is a read, don't set the "writable" bit in the page | ||
386 | * table entry, even if the Guest says it's writable. That way | 446 | * table entry, even if the Guest says it's writable. That way |
387 | * we will come back here when a write does actually occur, so | 447 | * we will come back here when a write does actually occur, so |
388 | * we can update the Guest's _PAGE_DIRTY flag. */ | 448 | * we can update the Guest's _PAGE_DIRTY flag. |
449 | */ | ||
389 | native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); | 450 | native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); |
390 | 451 | ||
391 | /* Finally, we write the Guest PTE entry back: we've set the | 452 | /* |
392 | * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ | 453 | * Finally, we write the Guest PTE entry back: we've set the |
454 | * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. | ||
455 | */ | ||
393 | lgwrite(cpu, gpte_ptr, pte_t, gpte); | 456 | lgwrite(cpu, gpte_ptr, pte_t, gpte); |
394 | 457 | ||
395 | /* The fault is fixed, the page table is populated, the mapping | 458 | /* |
459 | * The fault is fixed, the page table is populated, the mapping | ||
396 | * manipulated, the result returned and the code complete. A small | 460 | * manipulated, the result returned and the code complete. A small |
397 | * delay and a trace of alliteration are the only indications the Guest | 461 | * delay and a trace of alliteration are the only indications the Guest |
398 | * has that a page fault occurred at all. */ | 462 | * has that a page fault occurred at all. |
463 | */ | ||
399 | return true; | 464 | return true; |
400 | } | 465 | } |
401 | 466 | ||
@@ -408,7 +473,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |||
408 | * mapped, so it's overkill. | 473 | * mapped, so it's overkill. |
409 | * | 474 | * |
410 | * This is a quick version which answers the question: is this virtual address | 475 | * This is a quick version which answers the question: is this virtual address |
411 | * mapped by the shadow page tables, and is it writable? */ | 476 | * mapped by the shadow page tables, and is it writable? |
477 | */ | ||
412 | static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) | 478 | static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) |
413 | { | 479 | { |
414 | pgd_t *spgd; | 480 | pgd_t *spgd; |
@@ -428,21 +494,26 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) | |||
428 | return false; | 494 | return false; |
429 | #endif | 495 | #endif |
430 | 496 | ||
431 | /* Check the flags on the pte entry itself: it must be present and | 497 | /* |
432 | * writable. */ | 498 | * Check the flags on the pte entry itself: it must be present and |
499 | * writable. | ||
500 | */ | ||
433 | flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr))); | 501 | flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr))); |
434 | 502 | ||
435 | return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); | 503 | return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); |
436 | } | 504 | } |
437 | 505 | ||
438 | /* So, when pin_stack_pages() asks us to pin a page, we check if it's already | 506 | /* |
507 | * So, when pin_stack_pages() asks us to pin a page, we check if it's already | ||
439 | * in the page tables, and if not, we call demand_page() with error code 2 | 508 | * in the page tables, and if not, we call demand_page() with error code 2 |
440 | * (meaning "write"). */ | 509 | * (meaning "write"). |
510 | */ | ||
441 | void pin_page(struct lg_cpu *cpu, unsigned long vaddr) | 511 | void pin_page(struct lg_cpu *cpu, unsigned long vaddr) |
442 | { | 512 | { |
443 | if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) | 513 | if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) |
444 | kill_guest(cpu, "bad stack page %#lx", vaddr); | 514 | kill_guest(cpu, "bad stack page %#lx", vaddr); |
445 | } | 515 | } |
516 | /*:*/ | ||
446 | 517 | ||
447 | #ifdef CONFIG_X86_PAE | 518 | #ifdef CONFIG_X86_PAE |
448 | static void release_pmd(pmd_t *spmd) | 519 | static void release_pmd(pmd_t *spmd) |
@@ -479,15 +550,21 @@ static void release_pgd(pgd_t *spgd) | |||
479 | } | 550 | } |
480 | 551 | ||
481 | #else /* !CONFIG_X86_PAE */ | 552 | #else /* !CONFIG_X86_PAE */ |
482 | /*H:450 If we chase down the release_pgd() code, it looks like this: */ | 553 | /*H:450 |
554 | * If we chase down the release_pgd() code, the non-PAE version looks like | ||
555 | * this. The PAE version is almost identical, but instead of calling | ||
556 | * release_pte it calls release_pmd(), which looks much like this. | ||
557 | */ | ||
483 | static void release_pgd(pgd_t *spgd) | 558 | static void release_pgd(pgd_t *spgd) |
484 | { | 559 | { |
485 | /* If the entry's not present, there's nothing to release. */ | 560 | /* If the entry's not present, there's nothing to release. */ |
486 | if (pgd_flags(*spgd) & _PAGE_PRESENT) { | 561 | if (pgd_flags(*spgd) & _PAGE_PRESENT) { |
487 | unsigned int i; | 562 | unsigned int i; |
488 | /* Converting the pfn to find the actual PTE page is easy: turn | 563 | /* |
564 | * Converting the pfn to find the actual PTE page is easy: turn | ||
489 | * the page number into a physical address, then convert to a | 565 | * the page number into a physical address, then convert to a |
490 | * virtual address (easy for kernel pages like this one). */ | 566 | * virtual address (easy for kernel pages like this one). |
567 | */ | ||
491 | pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); | 568 | pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); |
492 | /* For each entry in the page, we might need to release it. */ | 569 | /* For each entry in the page, we might need to release it. */ |
493 | for (i = 0; i < PTRS_PER_PTE; i++) | 570 | for (i = 0; i < PTRS_PER_PTE; i++) |
@@ -499,9 +576,12 @@ static void release_pgd(pgd_t *spgd) | |||
499 | } | 576 | } |
500 | } | 577 | } |
501 | #endif | 578 | #endif |
502 | /*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings() | 579 | |
580 | /*H:445 | ||
581 | * We saw flush_user_mappings() twice: once from the flush_user_mappings() | ||
503 | * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. | 582 | * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. |
504 | * It simply releases every PTE page from 0 up to the Guest's kernel address. */ | 583 | * It simply releases every PTE page from 0 up to the Guest's kernel address. |
584 | */ | ||
505 | static void flush_user_mappings(struct lguest *lg, int idx) | 585 | static void flush_user_mappings(struct lguest *lg, int idx) |
506 | { | 586 | { |
507 | unsigned int i; | 587 | unsigned int i; |
@@ -510,10 +590,12 @@ static void flush_user_mappings(struct lguest *lg, int idx) | |||
510 | release_pgd(lg->pgdirs[idx].pgdir + i); | 590 | release_pgd(lg->pgdirs[idx].pgdir + i); |
511 | } | 591 | } |
512 | 592 | ||
513 | /*H:440 (v) Flushing (throwing away) page tables, | 593 | /*H:440 |
594 | * (v) Flushing (throwing away) page tables, | ||
514 | * | 595 | * |
515 | * The Guest has a hypercall to throw away the page tables: it's used when a | 596 | * The Guest has a hypercall to throw away the page tables: it's used when a |
516 | * large number of mappings have been changed. */ | 597 | * large number of mappings have been changed. |
598 | */ | ||
517 | void guest_pagetable_flush_user(struct lg_cpu *cpu) | 599 | void guest_pagetable_flush_user(struct lg_cpu *cpu) |
518 | { | 600 | { |
519 | /* Drop the userspace part of the current page table. */ | 601 | /* Drop the userspace part of the current page table. */ |
@@ -551,9 +633,11 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) | |||
551 | return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); | 633 | return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); |
552 | } | 634 | } |
553 | 635 | ||
554 | /* We keep several page tables. This is a simple routine to find the page | 636 | /* |
637 | * We keep several page tables. This is a simple routine to find the page | ||
555 | * table (if any) corresponding to this top-level address the Guest has given | 638 | * table (if any) corresponding to this top-level address the Guest has given |
556 | * us. */ | 639 | * us. |
640 | */ | ||
557 | static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) | 641 | static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) |
558 | { | 642 | { |
559 | unsigned int i; | 643 | unsigned int i; |
@@ -563,9 +647,11 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) | |||
563 | return i; | 647 | return i; |
564 | } | 648 | } |
565 | 649 | ||
566 | /*H:435 And this is us, creating the new page directory. If we really do | 650 | /*H:435 |
651 | * And this is us, creating the new page directory. If we really do | ||
567 | * allocate a new one (and so the kernel parts are not there), we set | 652 | * allocate a new one (and so the kernel parts are not there), we set |
568 | * blank_pgdir. */ | 653 | * blank_pgdir. |
654 | */ | ||
569 | static unsigned int new_pgdir(struct lg_cpu *cpu, | 655 | static unsigned int new_pgdir(struct lg_cpu *cpu, |
570 | unsigned long gpgdir, | 656 | unsigned long gpgdir, |
571 | int *blank_pgdir) | 657 | int *blank_pgdir) |
@@ -575,8 +661,10 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, | |||
575 | pmd_t *pmd_table; | 661 | pmd_t *pmd_table; |
576 | #endif | 662 | #endif |
577 | 663 | ||
578 | /* We pick one entry at random to throw out. Choosing the Least | 664 | /* |
579 | * Recently Used might be better, but this is easy. */ | 665 | * We pick one entry at random to throw out. Choosing the Least |
666 | * Recently Used might be better, but this is easy. | ||
667 | */ | ||
580 | next = random32() % ARRAY_SIZE(cpu->lg->pgdirs); | 668 | next = random32() % ARRAY_SIZE(cpu->lg->pgdirs); |
581 | /* If it's never been allocated at all before, try now. */ | 669 | /* If it's never been allocated at all before, try now. */ |
582 | if (!cpu->lg->pgdirs[next].pgdir) { | 670 | if (!cpu->lg->pgdirs[next].pgdir) { |
@@ -587,8 +675,10 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, | |||
587 | next = cpu->cpu_pgd; | 675 | next = cpu->cpu_pgd; |
588 | else { | 676 | else { |
589 | #ifdef CONFIG_X86_PAE | 677 | #ifdef CONFIG_X86_PAE |
590 | /* In PAE mode, allocate a pmd page and populate the | 678 | /* |
591 | * last pgd entry. */ | 679 | * In PAE mode, allocate a pmd page and populate the |
680 | * last pgd entry. | ||
681 | */ | ||
592 | pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL); | 682 | pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL); |
593 | if (!pmd_table) { | 683 | if (!pmd_table) { |
594 | free_page((long)cpu->lg->pgdirs[next].pgdir); | 684 | free_page((long)cpu->lg->pgdirs[next].pgdir); |
@@ -598,8 +688,10 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, | |||
598 | set_pgd(cpu->lg->pgdirs[next].pgdir + | 688 | set_pgd(cpu->lg->pgdirs[next].pgdir + |
599 | SWITCHER_PGD_INDEX, | 689 | SWITCHER_PGD_INDEX, |
600 | __pgd(__pa(pmd_table) | _PAGE_PRESENT)); | 690 | __pgd(__pa(pmd_table) | _PAGE_PRESENT)); |
601 | /* This is a blank page, so there are no kernel | 691 | /* |
602 | * mappings: caller must map the stack! */ | 692 | * This is a blank page, so there are no kernel |
693 | * mappings: caller must map the stack! | ||
694 | */ | ||
603 | *blank_pgdir = 1; | 695 | *blank_pgdir = 1; |
604 | } | 696 | } |
605 | #else | 697 | #else |
@@ -615,19 +707,23 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, | |||
615 | return next; | 707 | return next; |
616 | } | 708 | } |
617 | 709 | ||
618 | /*H:430 (iv) Switching page tables | 710 | /*H:430 |
711 | * (iv) Switching page tables | ||
619 | * | 712 | * |
620 | * Now we've seen all the page table setting and manipulation, let's see | 713 | * Now we've seen all the page table setting and manipulation, let's see |
621 | * what happens when the Guest changes page tables (ie. changes the top-level | 714 | * what happens when the Guest changes page tables (ie. changes the top-level |
622 | * pgdir). This occurs on almost every context switch. */ | 715 | * pgdir). This occurs on almost every context switch. |
716 | */ | ||
623 | void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) | 717 | void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) |
624 | { | 718 | { |
625 | int newpgdir, repin = 0; | 719 | int newpgdir, repin = 0; |
626 | 720 | ||
627 | /* Look to see if we have this one already. */ | 721 | /* Look to see if we have this one already. */ |
628 | newpgdir = find_pgdir(cpu->lg, pgtable); | 722 | newpgdir = find_pgdir(cpu->lg, pgtable); |
629 | /* If not, we allocate or mug an existing one: if it's a fresh one, | 723 | /* |
630 | * repin gets set to 1. */ | 724 | * If not, we allocate or mug an existing one: if it's a fresh one, |
725 | * repin gets set to 1. | ||
726 | */ | ||
631 | if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) | 727 | if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) |
632 | newpgdir = new_pgdir(cpu, pgtable, &repin); | 728 | newpgdir = new_pgdir(cpu, pgtable, &repin); |
633 | /* Change the current pgd index to the new one. */ | 729 | /* Change the current pgd index to the new one. */ |
@@ -637,9 +733,11 @@ void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) | |||
637 | pin_stack_pages(cpu); | 733 | pin_stack_pages(cpu); |
638 | } | 734 | } |
639 | 735 | ||
640 | /*H:470 Finally, a routine which throws away everything: all PGD entries in all | 736 | /*H:470 |
737 | * Finally, a routine which throws away everything: all PGD entries in all | ||
641 | * the shadow page tables, including the Guest's kernel mappings. This is used | 738 | * the shadow page tables, including the Guest's kernel mappings. This is used |
642 | * when we destroy the Guest. */ | 739 | * when we destroy the Guest. |
740 | */ | ||
643 | static void release_all_pagetables(struct lguest *lg) | 741 | static void release_all_pagetables(struct lguest *lg) |
644 | { | 742 | { |
645 | unsigned int i, j; | 743 | unsigned int i, j; |
@@ -656,8 +754,10 @@ static void release_all_pagetables(struct lguest *lg) | |||
656 | spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX; | 754 | spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX; |
657 | pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); | 755 | pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); |
658 | 756 | ||
659 | /* And release the pmd entries of that pmd page, | 757 | /* |
660 | * except for the switcher pmd. */ | 758 | * And release the pmd entries of that pmd page, |
759 | * except for the switcher pmd. | ||
760 | */ | ||
661 | for (k = 0; k < SWITCHER_PMD_INDEX; k++) | 761 | for (k = 0; k < SWITCHER_PMD_INDEX; k++) |
662 | release_pmd(&pmdpage[k]); | 762 | release_pmd(&pmdpage[k]); |
663 | #endif | 763 | #endif |
@@ -667,10 +767,12 @@ static void release_all_pagetables(struct lguest *lg) | |||
667 | } | 767 | } |
668 | } | 768 | } |
669 | 769 | ||
670 | /* We also throw away everything when a Guest tells us it's changed a kernel | 770 | /* |
771 | * We also throw away everything when a Guest tells us it's changed a kernel | ||
671 | * mapping. Since kernel mappings are in every page table, it's easiest to | 772 | * mapping. Since kernel mappings are in every page table, it's easiest to |
672 | * throw them all away. This traps the Guest in amber for a while as | 773 | * throw them all away. This traps the Guest in amber for a while as |
673 | * everything faults back in, but it's rare. */ | 774 | * everything faults back in, but it's rare. |
775 | */ | ||
674 | void guest_pagetable_clear_all(struct lg_cpu *cpu) | 776 | void guest_pagetable_clear_all(struct lg_cpu *cpu) |
675 | { | 777 | { |
676 | release_all_pagetables(cpu->lg); | 778 | release_all_pagetables(cpu->lg); |
@@ -678,15 +780,19 @@ void guest_pagetable_clear_all(struct lg_cpu *cpu) | |||
678 | pin_stack_pages(cpu); | 780 | pin_stack_pages(cpu); |
679 | } | 781 | } |
680 | /*:*/ | 782 | /*:*/ |
681 | /*M:009 Since we throw away all mappings when a kernel mapping changes, our | 783 | |
784 | /*M:009 | ||
785 | * Since we throw away all mappings when a kernel mapping changes, our | ||
682 | * performance sucks for guests using highmem. In fact, a guest with | 786 | * performance sucks for guests using highmem. In fact, a guest with |
683 | * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is | 787 | * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is |
684 | * usually slower than a Guest with less memory. | 788 | * usually slower than a Guest with less memory. |
685 | * | 789 | * |
686 | * This, of course, cannot be fixed. It would take some kind of... well, I | 790 | * This, of course, cannot be fixed. It would take some kind of... well, I |
687 | * don't know, but the term "puissant code-fu" comes to mind. :*/ | 791 | * don't know, but the term "puissant code-fu" comes to mind. |
792 | :*/ | ||
688 | 793 | ||
689 | /*H:420 This is the routine which actually sets the page table entry for then | 794 | /*H:420 |
795 | * This is the routine which actually sets the page table entry for then | ||
690 | * "idx"'th shadow page table. | 796 | * "idx"'th shadow page table. |
691 | * | 797 | * |
692 | * Normally, we can just throw out the old entry and replace it with 0: if they | 798 | * Normally, we can just throw out the old entry and replace it with 0: if they |
@@ -715,31 +821,36 @@ static void do_set_pte(struct lg_cpu *cpu, int idx, | |||
715 | spmd = spmd_addr(cpu, *spgd, vaddr); | 821 | spmd = spmd_addr(cpu, *spgd, vaddr); |
716 | if (pmd_flags(*spmd) & _PAGE_PRESENT) { | 822 | if (pmd_flags(*spmd) & _PAGE_PRESENT) { |
717 | #endif | 823 | #endif |
718 | /* Otherwise, we start by releasing | 824 | /* Otherwise, start by releasing the existing entry. */ |
719 | * the existing entry. */ | ||
720 | pte_t *spte = spte_addr(cpu, *spgd, vaddr); | 825 | pte_t *spte = spte_addr(cpu, *spgd, vaddr); |
721 | release_pte(*spte); | 826 | release_pte(*spte); |
722 | 827 | ||
723 | /* If they're setting this entry as dirty or accessed, | 828 | /* |
724 | * we might as well put that entry they've given us | 829 | * If they're setting this entry as dirty or accessed, |
725 | * in now. This shaves 10% off a | 830 | * we might as well put that entry they've given us in |
726 | * copy-on-write micro-benchmark. */ | 831 | * now. This shaves 10% off a copy-on-write |
832 | * micro-benchmark. | ||
833 | */ | ||
727 | if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { | 834 | if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { |
728 | check_gpte(cpu, gpte); | 835 | check_gpte(cpu, gpte); |
729 | native_set_pte(spte, | 836 | native_set_pte(spte, |
730 | gpte_to_spte(cpu, gpte, | 837 | gpte_to_spte(cpu, gpte, |
731 | pte_flags(gpte) & _PAGE_DIRTY)); | 838 | pte_flags(gpte) & _PAGE_DIRTY)); |
732 | } else | 839 | } else { |
733 | /* Otherwise kill it and we can demand_page() | 840 | /* |
734 | * it in later. */ | 841 | * Otherwise kill it and we can demand_page() |
842 | * it in later. | ||
843 | */ | ||
735 | native_set_pte(spte, __pte(0)); | 844 | native_set_pte(spte, __pte(0)); |
845 | } | ||
736 | #ifdef CONFIG_X86_PAE | 846 | #ifdef CONFIG_X86_PAE |
737 | } | 847 | } |
738 | #endif | 848 | #endif |
739 | } | 849 | } |
740 | } | 850 | } |
741 | 851 | ||
742 | /*H:410 Updating a PTE entry is a little trickier. | 852 | /*H:410 |
853 | * Updating a PTE entry is a little trickier. | ||
743 | * | 854 | * |
744 | * We keep track of several different page tables (the Guest uses one for each | 855 | * We keep track of several different page tables (the Guest uses one for each |
745 | * process, so it makes sense to cache at least a few). Each of these have | 856 | * process, so it makes sense to cache at least a few). Each of these have |
@@ -748,12 +859,15 @@ static void do_set_pte(struct lg_cpu *cpu, int idx, | |||
748 | * all the page tables, not just the current one. This is rare. | 859 | * all the page tables, not just the current one. This is rare. |
749 | * | 860 | * |
750 | * The benefit is that when we have to track a new page table, we can keep all | 861 | * The benefit is that when we have to track a new page table, we can keep all |
751 | * the kernel mappings. This speeds up context switch immensely. */ | 862 | * the kernel mappings. This speeds up context switch immensely. |
863 | */ | ||
752 | void guest_set_pte(struct lg_cpu *cpu, | 864 | void guest_set_pte(struct lg_cpu *cpu, |
753 | unsigned long gpgdir, unsigned long vaddr, pte_t gpte) | 865 | unsigned long gpgdir, unsigned long vaddr, pte_t gpte) |
754 | { | 866 | { |
755 | /* Kernel mappings must be changed on all top levels. Slow, but doesn't | 867 | /* |
756 | * happen often. */ | 868 | * Kernel mappings must be changed on all top levels. Slow, but doesn't |
869 | * happen often. | ||
870 | */ | ||
757 | if (vaddr >= cpu->lg->kernel_address) { | 871 | if (vaddr >= cpu->lg->kernel_address) { |
758 | unsigned int i; | 872 | unsigned int i; |
759 | for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) | 873 | for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) |
@@ -795,19 +909,25 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) | |||
795 | /* ... throw it away. */ | 909 | /* ... throw it away. */ |
796 | release_pgd(lg->pgdirs[pgdir].pgdir + idx); | 910 | release_pgd(lg->pgdirs[pgdir].pgdir + idx); |
797 | } | 911 | } |
912 | |||
798 | #ifdef CONFIG_X86_PAE | 913 | #ifdef CONFIG_X86_PAE |
914 | /* For setting a mid-level, we just throw everything away. It's easy. */ | ||
799 | void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx) | 915 | void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx) |
800 | { | 916 | { |
801 | guest_pagetable_clear_all(&lg->cpus[0]); | 917 | guest_pagetable_clear_all(&lg->cpus[0]); |
802 | } | 918 | } |
803 | #endif | 919 | #endif |
804 | 920 | ||
805 | /* Once we know how much memory we have we can construct simple identity | 921 | /*H:505 |
806 | * (which set virtual == physical) and linear mappings | 922 | * To get through boot, we construct simple identity page mappings (which |
807 | * which will get the Guest far enough into the boot to create its own. | 923 | * set virtual == physical) and linear mappings which will get the Guest far |
924 | * enough into the boot to create its own. The linear mapping means we | ||
925 | * simplify the Guest boot, but it makes assumptions about their PAGE_OFFSET, | ||
926 | * as you'll see. | ||
808 | * | 927 | * |
809 | * We lay them out of the way, just below the initrd (which is why we need to | 928 | * We lay them out of the way, just below the initrd (which is why we need to |
810 | * know its size here). */ | 929 | * know its size here). |
930 | */ | ||
811 | static unsigned long setup_pagetables(struct lguest *lg, | 931 | static unsigned long setup_pagetables(struct lguest *lg, |
812 | unsigned long mem, | 932 | unsigned long mem, |
813 | unsigned long initrd_size) | 933 | unsigned long initrd_size) |
@@ -825,8 +945,10 @@ static unsigned long setup_pagetables(struct lguest *lg, | |||
825 | unsigned int phys_linear; | 945 | unsigned int phys_linear; |
826 | #endif | 946 | #endif |
827 | 947 | ||
828 | /* We have mapped_pages frames to map, so we need | 948 | /* |
829 | * linear_pages page tables to map them. */ | 949 | * We have mapped_pages frames to map, so we need linear_pages page |
950 | * tables to map them. | ||
951 | */ | ||
830 | mapped_pages = mem / PAGE_SIZE; | 952 | mapped_pages = mem / PAGE_SIZE; |
831 | linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE; | 953 | linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE; |
832 | 954 | ||
@@ -837,10 +959,16 @@ static unsigned long setup_pagetables(struct lguest *lg, | |||
837 | linear = (void *)pgdir - linear_pages * PAGE_SIZE; | 959 | linear = (void *)pgdir - linear_pages * PAGE_SIZE; |
838 | 960 | ||
839 | #ifdef CONFIG_X86_PAE | 961 | #ifdef CONFIG_X86_PAE |
962 | /* | ||
963 | * And the single mid page goes below that. We only use one, but | ||
964 | * that's enough to map 1G, which definitely gets us through boot. | ||
965 | */ | ||
840 | pmds = (void *)linear - PAGE_SIZE; | 966 | pmds = (void *)linear - PAGE_SIZE; |
841 | #endif | 967 | #endif |
842 | /* Linear mapping is easy: put every page's address into the | 968 | /* |
843 | * mapping in order. */ | 969 | * Linear mapping is easy: put every page's address into the |
970 | * mapping in order. | ||
971 | */ | ||
844 | for (i = 0; i < mapped_pages; i++) { | 972 | for (i = 0; i < mapped_pages; i++) { |
845 | pte_t pte; | 973 | pte_t pte; |
846 | pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER)); | 974 | pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER)); |
@@ -848,11 +976,14 @@ static unsigned long setup_pagetables(struct lguest *lg, | |||
848 | return -EFAULT; | 976 | return -EFAULT; |
849 | } | 977 | } |
850 | 978 | ||
851 | /* The top level points to the linear page table pages above. | ||
852 | * We setup the identity and linear mappings here. */ | ||
853 | #ifdef CONFIG_X86_PAE | 979 | #ifdef CONFIG_X86_PAE |
980 | /* | ||
981 | * Make the Guest PMD entries point to the corresponding place in the | ||
982 | * linear mapping (up to one page worth of PMD). | ||
983 | */ | ||
854 | for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD; | 984 | for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD; |
855 | i += PTRS_PER_PTE, j++) { | 985 | i += PTRS_PER_PTE, j++) { |
986 | /* FIXME: native_set_pmd is overkill here. */ | ||
856 | native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i) | 987 | native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i) |
857 | - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); | 988 | - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); |
858 | 989 | ||
@@ -860,18 +991,36 @@ static unsigned long setup_pagetables(struct lguest *lg, | |||
860 | return -EFAULT; | 991 | return -EFAULT; |
861 | } | 992 | } |
862 | 993 | ||
994 | /* One PGD entry, pointing to that PMD page. */ | ||
863 | set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT)); | 995 | set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT)); |
996 | /* Copy it in as the first PGD entry (ie. addresses 0-1G). */ | ||
864 | if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0) | 997 | if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0) |
865 | return -EFAULT; | 998 | return -EFAULT; |
999 | /* | ||
1000 | * And the third PGD entry (ie. addresses 3G-4G). | ||
1001 | * | ||
1002 | * FIXME: This assumes that PAGE_OFFSET for the Guest is 0xC0000000. | ||
1003 | */ | ||
866 | if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0) | 1004 | if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0) |
867 | return -EFAULT; | 1005 | return -EFAULT; |
868 | #else | 1006 | #else |
1007 | /* | ||
1008 | * The top level points to the linear page table pages above. | ||
1009 | * We setup the identity and linear mappings here. | ||
1010 | */ | ||
869 | phys_linear = (unsigned long)linear - mem_base; | 1011 | phys_linear = (unsigned long)linear - mem_base; |
870 | for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) { | 1012 | for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) { |
871 | pgd_t pgd; | 1013 | pgd_t pgd; |
1014 | /* | ||
1015 | * Create a PGD entry which points to the right part of the | ||
1016 | * linear PTE pages. | ||
1017 | */ | ||
872 | pgd = __pgd((phys_linear + i * sizeof(pte_t)) | | 1018 | pgd = __pgd((phys_linear + i * sizeof(pte_t)) | |
873 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); | 1019 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); |
874 | 1020 | ||
1021 | /* | ||
1022 | * Copy it into the PGD page at 0 and PAGE_OFFSET. | ||
1023 | */ | ||
875 | if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd)) | 1024 | if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd)) |
876 | || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET) | 1025 | || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET) |
877 | + i / PTRS_PER_PTE], | 1026 | + i / PTRS_PER_PTE], |
@@ -880,15 +1029,19 @@ static unsigned long setup_pagetables(struct lguest *lg, | |||
880 | } | 1029 | } |
881 | #endif | 1030 | #endif |
882 | 1031 | ||
883 | /* We return the top level (guest-physical) address: remember where | 1032 | /* |
884 | * this is. */ | 1033 | * We return the top level (guest-physical) address: we remember where |
1034 | * this is to write it into lguest_data when the Guest initializes. | ||
1035 | */ | ||
885 | return (unsigned long)pgdir - mem_base; | 1036 | return (unsigned long)pgdir - mem_base; |
886 | } | 1037 | } |
887 | 1038 | ||
888 | /*H:500 (vii) Setting up the page tables initially. | 1039 | /*H:500 |
1040 | * (vii) Setting up the page tables initially. | ||
889 | * | 1041 | * |
890 | * When a Guest is first created, the Launcher tells us where the toplevel of | 1042 | * When a Guest is first created, the Launcher tells us where the toplevel of |
891 | * its first page table is. We set some things up here: */ | 1043 | * its first page table is. We set some things up here: |
1044 | */ | ||
892 | int init_guest_pagetable(struct lguest *lg) | 1045 | int init_guest_pagetable(struct lguest *lg) |
893 | { | 1046 | { |
894 | u64 mem; | 1047 | u64 mem; |
@@ -898,21 +1051,27 @@ int init_guest_pagetable(struct lguest *lg) | |||
898 | pgd_t *pgd; | 1051 | pgd_t *pgd; |
899 | pmd_t *pmd_table; | 1052 | pmd_t *pmd_table; |
900 | #endif | 1053 | #endif |
901 | /* Get the Guest memory size and the ramdisk size from the boot header | 1054 | /* |
902 | * located at lg->mem_base (Guest address 0). */ | 1055 | * Get the Guest memory size and the ramdisk size from the boot header |
1056 | * located at lg->mem_base (Guest address 0). | ||
1057 | */ | ||
903 | if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem)) | 1058 | if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem)) |
904 | || get_user(initrd_size, &boot->hdr.ramdisk_size)) | 1059 | || get_user(initrd_size, &boot->hdr.ramdisk_size)) |
905 | return -EFAULT; | 1060 | return -EFAULT; |
906 | 1061 | ||
907 | /* We start on the first shadow page table, and give it a blank PGD | 1062 | /* |
908 | * page. */ | 1063 | * We start on the first shadow page table, and give it a blank PGD |
1064 | * page. | ||
1065 | */ | ||
909 | lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size); | 1066 | lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size); |
910 | if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir)) | 1067 | if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir)) |
911 | return lg->pgdirs[0].gpgdir; | 1068 | return lg->pgdirs[0].gpgdir; |
912 | lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); | 1069 | lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); |
913 | if (!lg->pgdirs[0].pgdir) | 1070 | if (!lg->pgdirs[0].pgdir) |
914 | return -ENOMEM; | 1071 | return -ENOMEM; |
1072 | |||
915 | #ifdef CONFIG_X86_PAE | 1073 | #ifdef CONFIG_X86_PAE |
1074 | /* For PAE, we also create the initial mid-level. */ | ||
916 | pgd = lg->pgdirs[0].pgdir; | 1075 | pgd = lg->pgdirs[0].pgdir; |
917 | pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL); | 1076 | pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL); |
918 | if (!pmd_table) | 1077 | if (!pmd_table) |
@@ -921,27 +1080,33 @@ int init_guest_pagetable(struct lguest *lg) | |||
921 | set_pgd(pgd + SWITCHER_PGD_INDEX, | 1080 | set_pgd(pgd + SWITCHER_PGD_INDEX, |
922 | __pgd(__pa(pmd_table) | _PAGE_PRESENT)); | 1081 | __pgd(__pa(pmd_table) | _PAGE_PRESENT)); |
923 | #endif | 1082 | #endif |
1083 | |||
1084 | /* This is the current page table. */ | ||
924 | lg->cpus[0].cpu_pgd = 0; | 1085 | lg->cpus[0].cpu_pgd = 0; |
925 | return 0; | 1086 | return 0; |
926 | } | 1087 | } |
927 | 1088 | ||
928 | /* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ | 1089 | /*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ |
929 | void page_table_guest_data_init(struct lg_cpu *cpu) | 1090 | void page_table_guest_data_init(struct lg_cpu *cpu) |
930 | { | 1091 | { |
931 | /* We get the kernel address: above this is all kernel memory. */ | 1092 | /* We get the kernel address: above this is all kernel memory. */ |
932 | if (get_user(cpu->lg->kernel_address, | 1093 | if (get_user(cpu->lg->kernel_address, |
933 | &cpu->lg->lguest_data->kernel_address) | 1094 | &cpu->lg->lguest_data->kernel_address) |
934 | /* We tell the Guest that it can't use the top 2 or 4 MB | 1095 | /* |
935 | * of virtual addresses used by the Switcher. */ | 1096 | * We tell the Guest that it can't use the top 2 or 4 MB |
1097 | * of virtual addresses used by the Switcher. | ||
1098 | */ | ||
936 | || put_user(RESERVE_MEM * 1024 * 1024, | 1099 | || put_user(RESERVE_MEM * 1024 * 1024, |
937 | &cpu->lg->lguest_data->reserve_mem) | 1100 | &cpu->lg->lguest_data->reserve_mem) |
938 | || put_user(cpu->lg->pgdirs[0].gpgdir, | 1101 | || put_user(cpu->lg->pgdirs[0].gpgdir, |
939 | &cpu->lg->lguest_data->pgdir)) | 1102 | &cpu->lg->lguest_data->pgdir)) |
940 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); | 1103 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); |
941 | 1104 | ||
942 | /* In flush_user_mappings() we loop from 0 to | 1105 | /* |
1106 | * In flush_user_mappings() we loop from 0 to | ||
943 | * "pgd_index(lg->kernel_address)". This assumes it won't hit the | 1107 | * "pgd_index(lg->kernel_address)". This assumes it won't hit the |
944 | * Switcher mappings, so check that now. */ | 1108 | * Switcher mappings, so check that now. |
1109 | */ | ||
945 | #ifdef CONFIG_X86_PAE | 1110 | #ifdef CONFIG_X86_PAE |
946 | if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX && | 1111 | if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX && |
947 | pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX) | 1112 | pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX) |
@@ -964,12 +1129,14 @@ void free_guest_pagetable(struct lguest *lg) | |||
964 | free_page((long)lg->pgdirs[i].pgdir); | 1129 | free_page((long)lg->pgdirs[i].pgdir); |
965 | } | 1130 | } |
966 | 1131 | ||
967 | /*H:480 (vi) Mapping the Switcher when the Guest is about to run. | 1132 | /*H:480 |
1133 | * (vi) Mapping the Switcher when the Guest is about to run. | ||
968 | * | 1134 | * |
969 | * The Switcher and the two pages for this CPU need to be visible in the | 1135 | * The Switcher and the two pages for this CPU need to be visible in the |
970 | * Guest (and not the pages for other CPUs). We have the appropriate PTE pages | 1136 | * Guest (and not the pages for other CPUs). We have the appropriate PTE pages |
971 | * for each CPU already set up, we just need to hook them in now we know which | 1137 | * for each CPU already set up, we just need to hook them in now we know which |
972 | * Guest is about to run on this CPU. */ | 1138 | * Guest is about to run on this CPU. |
1139 | */ | ||
973 | void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) | 1140 | void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) |
974 | { | 1141 | { |
975 | pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); | 1142 | pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); |
@@ -980,30 +1147,38 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) | |||
980 | pmd_t switcher_pmd; | 1147 | pmd_t switcher_pmd; |
981 | pmd_t *pmd_table; | 1148 | pmd_t *pmd_table; |
982 | 1149 | ||
1150 | /* FIXME: native_set_pmd is overkill here. */ | ||
983 | native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >> | 1151 | native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >> |
984 | PAGE_SHIFT, PAGE_KERNEL_EXEC)); | 1152 | PAGE_SHIFT, PAGE_KERNEL_EXEC)); |
985 | 1153 | ||
1154 | /* Figure out where the pmd page is, by reading the PGD, and converting | ||
1155 | * it to a virtual address. */ | ||
986 | pmd_table = __va(pgd_pfn(cpu->lg-> | 1156 | pmd_table = __va(pgd_pfn(cpu->lg-> |
987 | pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX]) | 1157 | pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX]) |
988 | << PAGE_SHIFT); | 1158 | << PAGE_SHIFT); |
1159 | /* Now write it into the shadow page table. */ | ||
989 | native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd); | 1160 | native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd); |
990 | #else | 1161 | #else |
991 | pgd_t switcher_pgd; | 1162 | pgd_t switcher_pgd; |
992 | 1163 | ||
993 | /* Make the last PGD entry for this Guest point to the Switcher's PTE | 1164 | /* |
994 | * page for this CPU (with appropriate flags). */ | 1165 | * Make the last PGD entry for this Guest point to the Switcher's PTE |
1166 | * page for this CPU (with appropriate flags). | ||
1167 | */ | ||
995 | switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC); | 1168 | switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC); |
996 | 1169 | ||
997 | cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; | 1170 | cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; |
998 | 1171 | ||
999 | #endif | 1172 | #endif |
1000 | /* We also change the Switcher PTE page. When we're running the Guest, | 1173 | /* |
1174 | * We also change the Switcher PTE page. When we're running the Guest, | ||
1001 | * we want the Guest's "regs" page to appear where the first Switcher | 1175 | * we want the Guest's "regs" page to appear where the first Switcher |
1002 | * page for this CPU is. This is an optimization: when the Switcher | 1176 | * page for this CPU is. This is an optimization: when the Switcher |
1003 | * saves the Guest registers, it saves them into the first page of this | 1177 | * saves the Guest registers, it saves them into the first page of this |
1004 | * CPU's "struct lguest_pages": if we make sure the Guest's register | 1178 | * CPU's "struct lguest_pages": if we make sure the Guest's register |
1005 | * page is already mapped there, we don't have to copy them out | 1179 | * page is already mapped there, we don't have to copy them out |
1006 | * again. */ | 1180 | * again. |
1181 | */ | ||
1007 | pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; | 1182 | pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; |
1008 | native_set_pte(®s_pte, pfn_pte(pfn, PAGE_KERNEL)); | 1183 | native_set_pte(®s_pte, pfn_pte(pfn, PAGE_KERNEL)); |
1009 | native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], | 1184 | native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], |
@@ -1019,10 +1194,12 @@ static void free_switcher_pte_pages(void) | |||
1019 | free_page((long)switcher_pte_page(i)); | 1194 | free_page((long)switcher_pte_page(i)); |
1020 | } | 1195 | } |
1021 | 1196 | ||
1022 | /*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given | 1197 | /*H:520 |
1198 | * Setting up the Switcher PTE page for given CPU is fairly easy, given | ||
1023 | * the CPU number and the "struct page"s for the Switcher code itself. | 1199 | * the CPU number and the "struct page"s for the Switcher code itself. |
1024 | * | 1200 | * |
1025 | * Currently the Switcher is less than a page long, so "pages" is always 1. */ | 1201 | * Currently the Switcher is less than a page long, so "pages" is always 1. |
1202 | */ | ||
1026 | static __init void populate_switcher_pte_page(unsigned int cpu, | 1203 | static __init void populate_switcher_pte_page(unsigned int cpu, |
1027 | struct page *switcher_page[], | 1204 | struct page *switcher_page[], |
1028 | unsigned int pages) | 1205 | unsigned int pages) |
@@ -1043,13 +1220,16 @@ static __init void populate_switcher_pte_page(unsigned int cpu, | |||
1043 | native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]), | 1220 | native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]), |
1044 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW))); | 1221 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW))); |
1045 | 1222 | ||
1046 | /* The second page contains the "struct lguest_ro_state", and is | 1223 | /* |
1047 | * read-only. */ | 1224 | * The second page contains the "struct lguest_ro_state", and is |
1225 | * read-only. | ||
1226 | */ | ||
1048 | native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]), | 1227 | native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]), |
1049 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); | 1228 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); |
1050 | } | 1229 | } |
1051 | 1230 | ||
1052 | /* We've made it through the page table code. Perhaps our tired brains are | 1231 | /* |
1232 | * We've made it through the page table code. Perhaps our tired brains are | ||
1053 | * still processing the details, or perhaps we're simply glad it's over. | 1233 | * still processing the details, or perhaps we're simply glad it's over. |
1054 | * | 1234 | * |
1055 | * If nothing else, note that all this complexity in juggling shadow page tables | 1235 | * If nothing else, note that all this complexity in juggling shadow page tables |
@@ -1058,10 +1238,13 @@ static __init void populate_switcher_pte_page(unsigned int cpu, | |||
1058 | * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD | 1238 | * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD |
1059 | * have implemented shadow page table support directly into hardware. | 1239 | * have implemented shadow page table support directly into hardware. |
1060 | * | 1240 | * |
1061 | * There is just one file remaining in the Host. */ | 1241 | * There is just one file remaining in the Host. |
1242 | */ | ||
1062 | 1243 | ||
1063 | /*H:510 At boot or module load time, init_pagetables() allocates and populates | 1244 | /*H:510 |
1064 | * the Switcher PTE page for each CPU. */ | 1245 | * At boot or module load time, init_pagetables() allocates and populates |
1246 | * the Switcher PTE page for each CPU. | ||
1247 | */ | ||
1065 | __init int init_pagetables(struct page **switcher_page, unsigned int pages) | 1248 | __init int init_pagetables(struct page **switcher_page, unsigned int pages) |
1066 | { | 1249 | { |
1067 | unsigned int i; | 1250 | unsigned int i; |
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c index 482ed5a18750..951c57b0a7e0 100644 --- a/drivers/lguest/segments.c +++ b/drivers/lguest/segments.c | |||
@@ -1,4 +1,5 @@ | |||
1 | /*P:600 The x86 architecture has segments, which involve a table of descriptors | 1 | /*P:600 |
2 | * The x86 architecture has segments, which involve a table of descriptors | ||
2 | * which can be used to do funky things with virtual address interpretation. | 3 | * which can be used to do funky things with virtual address interpretation. |
3 | * We originally used to use segments so the Guest couldn't alter the | 4 | * We originally used to use segments so the Guest couldn't alter the |
4 | * Guest<->Host Switcher, and then we had to trim Guest segments, and restore | 5 | * Guest<->Host Switcher, and then we had to trim Guest segments, and restore |
@@ -8,7 +9,8 @@ | |||
8 | * | 9 | * |
9 | * In these modern times, the segment handling code consists of simple sanity | 10 | * In these modern times, the segment handling code consists of simple sanity |
10 | * checks, and the worst you'll experience reading this code is butterfly-rash | 11 | * checks, and the worst you'll experience reading this code is butterfly-rash |
11 | * from frolicking through its parklike serenity. :*/ | 12 | * from frolicking through its parklike serenity. |
13 | :*/ | ||
12 | #include "lg.h" | 14 | #include "lg.h" |
13 | 15 | ||
14 | /*H:600 | 16 | /*H:600 |
@@ -41,10 +43,12 @@ | |||
41 | * begin. | 43 | * begin. |
42 | */ | 44 | */ |
43 | 45 | ||
44 | /* There are several entries we don't let the Guest set. The TSS entry is the | 46 | /* |
47 | * There are several entries we don't let the Guest set. The TSS entry is the | ||
45 | * "Task State Segment" which controls all kinds of delicate things. The | 48 | * "Task State Segment" which controls all kinds of delicate things. The |
46 | * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the | 49 | * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the |
47 | * the Guest can't be trusted to deal with double faults. */ | 50 | * the Guest can't be trusted to deal with double faults. |
51 | */ | ||
48 | static bool ignored_gdt(unsigned int num) | 52 | static bool ignored_gdt(unsigned int num) |
49 | { | 53 | { |
50 | return (num == GDT_ENTRY_TSS | 54 | return (num == GDT_ENTRY_TSS |
@@ -53,42 +57,52 @@ static bool ignored_gdt(unsigned int num) | |||
53 | || num == GDT_ENTRY_DOUBLEFAULT_TSS); | 57 | || num == GDT_ENTRY_DOUBLEFAULT_TSS); |
54 | } | 58 | } |
55 | 59 | ||
56 | /*H:630 Once the Guest gave us new GDT entries, we fix them up a little. We | 60 | /*H:630 |
61 | * Once the Guest gave us new GDT entries, we fix them up a little. We | ||
57 | * don't care if they're invalid: the worst that can happen is a General | 62 | * don't care if they're invalid: the worst that can happen is a General |
58 | * Protection Fault in the Switcher when it restores a Guest segment register | 63 | * Protection Fault in the Switcher when it restores a Guest segment register |
59 | * which tries to use that entry. Then we kill the Guest for causing such a | 64 | * which tries to use that entry. Then we kill the Guest for causing such a |
60 | * mess: the message will be "unhandled trap 256". */ | 65 | * mess: the message will be "unhandled trap 256". |
66 | */ | ||
61 | static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end) | 67 | static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end) |
62 | { | 68 | { |
63 | unsigned int i; | 69 | unsigned int i; |
64 | 70 | ||
65 | for (i = start; i < end; i++) { | 71 | for (i = start; i < end; i++) { |
66 | /* We never copy these ones to real GDT, so we don't care what | 72 | /* |
67 | * they say */ | 73 | * We never copy these ones to real GDT, so we don't care what |
74 | * they say | ||
75 | */ | ||
68 | if (ignored_gdt(i)) | 76 | if (ignored_gdt(i)) |
69 | continue; | 77 | continue; |
70 | 78 | ||
71 | /* Segment descriptors contain a privilege level: the Guest is | 79 | /* |
80 | * Segment descriptors contain a privilege level: the Guest is | ||
72 | * sometimes careless and leaves this as 0, even though it's | 81 | * sometimes careless and leaves this as 0, even though it's |
73 | * running at privilege level 1. If so, we fix it here. */ | 82 | * running at privilege level 1. If so, we fix it here. |
83 | */ | ||
74 | if ((cpu->arch.gdt[i].b & 0x00006000) == 0) | 84 | if ((cpu->arch.gdt[i].b & 0x00006000) == 0) |
75 | cpu->arch.gdt[i].b |= (GUEST_PL << 13); | 85 | cpu->arch.gdt[i].b |= (GUEST_PL << 13); |
76 | 86 | ||
77 | /* Each descriptor has an "accessed" bit. If we don't set it | 87 | /* |
88 | * Each descriptor has an "accessed" bit. If we don't set it | ||
78 | * now, the CPU will try to set it when the Guest first loads | 89 | * now, the CPU will try to set it when the Guest first loads |
79 | * that entry into a segment register. But the GDT isn't | 90 | * that entry into a segment register. But the GDT isn't |
80 | * writable by the Guest, so bad things can happen. */ | 91 | * writable by the Guest, so bad things can happen. |
92 | */ | ||
81 | cpu->arch.gdt[i].b |= 0x00000100; | 93 | cpu->arch.gdt[i].b |= 0x00000100; |
82 | } | 94 | } |
83 | } | 95 | } |
84 | 96 | ||
85 | /*H:610 Like the IDT, we never simply use the GDT the Guest gives us. We keep | 97 | /*H:610 |
98 | * Like the IDT, we never simply use the GDT the Guest gives us. We keep | ||
86 | * a GDT for each CPU, and copy across the Guest's entries each time we want to | 99 | * a GDT for each CPU, and copy across the Guest's entries each time we want to |
87 | * run the Guest on that CPU. | 100 | * run the Guest on that CPU. |
88 | * | 101 | * |
89 | * This routine is called at boot or modprobe time for each CPU to set up the | 102 | * This routine is called at boot or modprobe time for each CPU to set up the |
90 | * constant GDT entries: the ones which are the same no matter what Guest we're | 103 | * constant GDT entries: the ones which are the same no matter what Guest we're |
91 | * running. */ | 104 | * running. |
105 | */ | ||
92 | void setup_default_gdt_entries(struct lguest_ro_state *state) | 106 | void setup_default_gdt_entries(struct lguest_ro_state *state) |
93 | { | 107 | { |
94 | struct desc_struct *gdt = state->guest_gdt; | 108 | struct desc_struct *gdt = state->guest_gdt; |
@@ -98,30 +112,37 @@ void setup_default_gdt_entries(struct lguest_ro_state *state) | |||
98 | gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; | 112 | gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; |
99 | gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; | 113 | gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; |
100 | 114 | ||
101 | /* The TSS segment refers to the TSS entry for this particular CPU. | 115 | /* |
116 | * The TSS segment refers to the TSS entry for this particular CPU. | ||
102 | * Forgive the magic flags: the 0x8900 means the entry is Present, it's | 117 | * Forgive the magic flags: the 0x8900 means the entry is Present, it's |
103 | * privilege level 0 Available 386 TSS system segment, and the 0x67 | 118 | * privilege level 0 Available 386 TSS system segment, and the 0x67 |
104 | * means Saturn is eclipsed by Mercury in the twelfth house. */ | 119 | * means Saturn is eclipsed by Mercury in the twelfth house. |
120 | */ | ||
105 | gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16); | 121 | gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16); |
106 | gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000) | 122 | gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000) |
107 | | ((tss >> 16) & 0x000000FF); | 123 | | ((tss >> 16) & 0x000000FF); |
108 | } | 124 | } |
109 | 125 | ||
110 | /* This routine sets up the initial Guest GDT for booting. All entries start | 126 | /* |
111 | * as 0 (unusable). */ | 127 | * This routine sets up the initial Guest GDT for booting. All entries start |
128 | * as 0 (unusable). | ||
129 | */ | ||
112 | void setup_guest_gdt(struct lg_cpu *cpu) | 130 | void setup_guest_gdt(struct lg_cpu *cpu) |
113 | { | 131 | { |
114 | /* Start with full 0-4G segments... */ | 132 | /* |
133 | * Start with full 0-4G segments...except the Guest is allowed to use | ||
134 | * them, so set the privilege level appropriately in the flags. | ||
135 | */ | ||
115 | cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; | 136 | cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; |
116 | cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; | 137 | cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; |
117 | /* ...except the Guest is allowed to use them, so set the privilege | ||
118 | * level appropriately in the flags. */ | ||
119 | cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13); | 138 | cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13); |
120 | cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); | 139 | cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); |
121 | } | 140 | } |
122 | 141 | ||
123 | /*H:650 An optimization of copy_gdt(), for just the three "thead-local storage" | 142 | /*H:650 |
124 | * entries. */ | 143 | * An optimization of copy_gdt(), for just the three "thead-local storage" |
144 | * entries. | ||
145 | */ | ||
125 | void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) | 146 | void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) |
126 | { | 147 | { |
127 | unsigned int i; | 148 | unsigned int i; |
@@ -130,26 +151,34 @@ void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) | |||
130 | gdt[i] = cpu->arch.gdt[i]; | 151 | gdt[i] = cpu->arch.gdt[i]; |
131 | } | 152 | } |
132 | 153 | ||
133 | /*H:640 When the Guest is run on a different CPU, or the GDT entries have | 154 | /*H:640 |
134 | * changed, copy_gdt() is called to copy the Guest's GDT entries across to this | 155 | * When the Guest is run on a different CPU, or the GDT entries have changed, |
135 | * CPU's GDT. */ | 156 | * copy_gdt() is called to copy the Guest's GDT entries across to this CPU's |
157 | * GDT. | ||
158 | */ | ||
136 | void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt) | 159 | void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt) |
137 | { | 160 | { |
138 | unsigned int i; | 161 | unsigned int i; |
139 | 162 | ||
140 | /* The default entries from setup_default_gdt_entries() are not | 163 | /* |
141 | * replaced. See ignored_gdt() above. */ | 164 | * The default entries from setup_default_gdt_entries() are not |
165 | * replaced. See ignored_gdt() above. | ||
166 | */ | ||
142 | for (i = 0; i < GDT_ENTRIES; i++) | 167 | for (i = 0; i < GDT_ENTRIES; i++) |
143 | if (!ignored_gdt(i)) | 168 | if (!ignored_gdt(i)) |
144 | gdt[i] = cpu->arch.gdt[i]; | 169 | gdt[i] = cpu->arch.gdt[i]; |
145 | } | 170 | } |
146 | 171 | ||
147 | /*H:620 This is where the Guest asks us to load a new GDT entry | 172 | /*H:620 |
148 | * (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in. */ | 173 | * This is where the Guest asks us to load a new GDT entry |
174 | * (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in. | ||
175 | */ | ||
149 | void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) | 176 | void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) |
150 | { | 177 | { |
151 | /* We assume the Guest has the same number of GDT entries as the | 178 | /* |
152 | * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ | 179 | * We assume the Guest has the same number of GDT entries as the |
180 | * Host, otherwise we'd have to dynamically allocate the Guest GDT. | ||
181 | */ | ||
153 | if (num >= ARRAY_SIZE(cpu->arch.gdt)) | 182 | if (num >= ARRAY_SIZE(cpu->arch.gdt)) |
154 | kill_guest(cpu, "too many gdt entries %i", num); | 183 | kill_guest(cpu, "too many gdt entries %i", num); |
155 | 184 | ||
@@ -157,15 +186,19 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) | |||
157 | cpu->arch.gdt[num].a = lo; | 186 | cpu->arch.gdt[num].a = lo; |
158 | cpu->arch.gdt[num].b = hi; | 187 | cpu->arch.gdt[num].b = hi; |
159 | fixup_gdt_table(cpu, num, num+1); | 188 | fixup_gdt_table(cpu, num, num+1); |
160 | /* Mark that the GDT changed so the core knows it has to copy it again, | 189 | /* |
161 | * even if the Guest is run on the same CPU. */ | 190 | * Mark that the GDT changed so the core knows it has to copy it again, |
191 | * even if the Guest is run on the same CPU. | ||
192 | */ | ||
162 | cpu->changed |= CHANGED_GDT; | 193 | cpu->changed |= CHANGED_GDT; |
163 | } | 194 | } |
164 | 195 | ||
165 | /* This is the fast-track version for just changing the three TLS entries. | 196 | /* |
197 | * This is the fast-track version for just changing the three TLS entries. | ||
166 | * Remember that this happens on every context switch, so it's worth | 198 | * Remember that this happens on every context switch, so it's worth |
167 | * optimizing. But wouldn't it be neater to have a single hypercall to cover | 199 | * optimizing. But wouldn't it be neater to have a single hypercall to cover |
168 | * both cases? */ | 200 | * both cases? |
201 | */ | ||
169 | void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls) | 202 | void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls) |
170 | { | 203 | { |
171 | struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN]; | 204 | struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN]; |
@@ -175,7 +208,6 @@ void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls) | |||
175 | /* Note that just the TLS entries have changed. */ | 208 | /* Note that just the TLS entries have changed. */ |
176 | cpu->changed |= CHANGED_GDT_TLS; | 209 | cpu->changed |= CHANGED_GDT_TLS; |
177 | } | 210 | } |
178 | /*:*/ | ||
179 | 211 | ||
180 | /*H:660 | 212 | /*H:660 |
181 | * With this, we have finished the Host. | 213 | * With this, we have finished the Host. |
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index eaf722fe309a..6ae388849a3b 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c | |||
@@ -17,13 +17,15 @@ | |||
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 | */ | 19 | */ |
20 | /*P:450 This file contains the x86-specific lguest code. It used to be all | 20 | /*P:450 |
21 | * This file contains the x86-specific lguest code. It used to be all | ||
21 | * mixed in with drivers/lguest/core.c but several foolhardy code slashers | 22 | * mixed in with drivers/lguest/core.c but several foolhardy code slashers |
22 | * wrestled most of the dependencies out to here in preparation for porting | 23 | * wrestled most of the dependencies out to here in preparation for porting |
23 | * lguest to other architectures (see what I mean by foolhardy?). | 24 | * lguest to other architectures (see what I mean by foolhardy?). |
24 | * | 25 | * |
25 | * This also contains a couple of non-obvious setup and teardown pieces which | 26 | * This also contains a couple of non-obvious setup and teardown pieces which |
26 | * were implemented after days of debugging pain. :*/ | 27 | * were implemented after days of debugging pain. |
28 | :*/ | ||
27 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
28 | #include <linux/start_kernel.h> | 30 | #include <linux/start_kernel.h> |
29 | #include <linux/string.h> | 31 | #include <linux/string.h> |
@@ -82,25 +84,33 @@ static DEFINE_PER_CPU(struct lg_cpu *, last_cpu); | |||
82 | */ | 84 | */ |
83 | static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) | 85 | static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) |
84 | { | 86 | { |
85 | /* Copying all this data can be quite expensive. We usually run the | 87 | /* |
88 | * Copying all this data can be quite expensive. We usually run the | ||
86 | * same Guest we ran last time (and that Guest hasn't run anywhere else | 89 | * same Guest we ran last time (and that Guest hasn't run anywhere else |
87 | * meanwhile). If that's not the case, we pretend everything in the | 90 | * meanwhile). If that's not the case, we pretend everything in the |
88 | * Guest has changed. */ | 91 | * Guest has changed. |
92 | */ | ||
89 | if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) { | 93 | if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) { |
90 | __get_cpu_var(last_cpu) = cpu; | 94 | __get_cpu_var(last_cpu) = cpu; |
91 | cpu->last_pages = pages; | 95 | cpu->last_pages = pages; |
92 | cpu->changed = CHANGED_ALL; | 96 | cpu->changed = CHANGED_ALL; |
93 | } | 97 | } |
94 | 98 | ||
95 | /* These copies are pretty cheap, so we do them unconditionally: */ | 99 | /* |
96 | /* Save the current Host top-level page directory. */ | 100 | * These copies are pretty cheap, so we do them unconditionally: */ |
101 | /* Save the current Host top-level page directory. | ||
102 | */ | ||
97 | pages->state.host_cr3 = __pa(current->mm->pgd); | 103 | pages->state.host_cr3 = __pa(current->mm->pgd); |
98 | /* Set up the Guest's page tables to see this CPU's pages (and no | 104 | /* |
99 | * other CPU's pages). */ | 105 | * Set up the Guest's page tables to see this CPU's pages (and no |
106 | * other CPU's pages). | ||
107 | */ | ||
100 | map_switcher_in_guest(cpu, pages); | 108 | map_switcher_in_guest(cpu, pages); |
101 | /* Set up the two "TSS" members which tell the CPU what stack to use | 109 | /* |
110 | * Set up the two "TSS" members which tell the CPU what stack to use | ||
102 | * for traps which do directly into the Guest (ie. traps at privilege | 111 | * for traps which do directly into the Guest (ie. traps at privilege |
103 | * level 1). */ | 112 | * level 1). |
113 | */ | ||
104 | pages->state.guest_tss.sp1 = cpu->esp1; | 114 | pages->state.guest_tss.sp1 = cpu->esp1; |
105 | pages->state.guest_tss.ss1 = cpu->ss1; | 115 | pages->state.guest_tss.ss1 = cpu->ss1; |
106 | 116 | ||
@@ -125,97 +135,126 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages) | |||
125 | /* This is a dummy value we need for GCC's sake. */ | 135 | /* This is a dummy value we need for GCC's sake. */ |
126 | unsigned int clobber; | 136 | unsigned int clobber; |
127 | 137 | ||
128 | /* Copy the guest-specific information into this CPU's "struct | 138 | /* |
129 | * lguest_pages". */ | 139 | * Copy the guest-specific information into this CPU's "struct |
140 | * lguest_pages". | ||
141 | */ | ||
130 | copy_in_guest_info(cpu, pages); | 142 | copy_in_guest_info(cpu, pages); |
131 | 143 | ||
132 | /* Set the trap number to 256 (impossible value). If we fault while | 144 | /* |
145 | * Set the trap number to 256 (impossible value). If we fault while | ||
133 | * switching to the Guest (bad segment registers or bug), this will | 146 | * switching to the Guest (bad segment registers or bug), this will |
134 | * cause us to abort the Guest. */ | 147 | * cause us to abort the Guest. |
148 | */ | ||
135 | cpu->regs->trapnum = 256; | 149 | cpu->regs->trapnum = 256; |
136 | 150 | ||
137 | /* Now: we push the "eflags" register on the stack, then do an "lcall". | 151 | /* |
152 | * Now: we push the "eflags" register on the stack, then do an "lcall". | ||
138 | * This is how we change from using the kernel code segment to using | 153 | * This is how we change from using the kernel code segment to using |
139 | * the dedicated lguest code segment, as well as jumping into the | 154 | * the dedicated lguest code segment, as well as jumping into the |
140 | * Switcher. | 155 | * Switcher. |
141 | * | 156 | * |
142 | * The lcall also pushes the old code segment (KERNEL_CS) onto the | 157 | * The lcall also pushes the old code segment (KERNEL_CS) onto the |
143 | * stack, then the address of this call. This stack layout happens to | 158 | * stack, then the address of this call. This stack layout happens to |
144 | * exactly match the stack layout created by an interrupt... */ | 159 | * exactly match the stack layout created by an interrupt... |
160 | */ | ||
145 | asm volatile("pushf; lcall *lguest_entry" | 161 | asm volatile("pushf; lcall *lguest_entry" |
146 | /* This is how we tell GCC that %eax ("a") and %ebx ("b") | 162 | /* |
147 | * are changed by this routine. The "=" means output. */ | 163 | * This is how we tell GCC that %eax ("a") and %ebx ("b") |
164 | * are changed by this routine. The "=" means output. | ||
165 | */ | ||
148 | : "=a"(clobber), "=b"(clobber) | 166 | : "=a"(clobber), "=b"(clobber) |
149 | /* %eax contains the pages pointer. ("0" refers to the | 167 | /* |
168 | * %eax contains the pages pointer. ("0" refers to the | ||
150 | * 0-th argument above, ie "a"). %ebx contains the | 169 | * 0-th argument above, ie "a"). %ebx contains the |
151 | * physical address of the Guest's top-level page | 170 | * physical address of the Guest's top-level page |
152 | * directory. */ | 171 | * directory. |
172 | */ | ||
153 | : "0"(pages), "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir)) | 173 | : "0"(pages), "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir)) |
154 | /* We tell gcc that all these registers could change, | 174 | /* |
175 | * We tell gcc that all these registers could change, | ||
155 | * which means we don't have to save and restore them in | 176 | * which means we don't have to save and restore them in |
156 | * the Switcher. */ | 177 | * the Switcher. |
178 | */ | ||
157 | : "memory", "%edx", "%ecx", "%edi", "%esi"); | 179 | : "memory", "%edx", "%ecx", "%edi", "%esi"); |
158 | } | 180 | } |
159 | /*:*/ | 181 | /*:*/ |
160 | 182 | ||
161 | /*M:002 There are hooks in the scheduler which we can register to tell when we | 183 | /*M:002 |
184 | * There are hooks in the scheduler which we can register to tell when we | ||
162 | * get kicked off the CPU (preempt_notifier_register()). This would allow us | 185 | * get kicked off the CPU (preempt_notifier_register()). This would allow us |
163 | * to lazily disable SYSENTER which would regain some performance, and should | 186 | * to lazily disable SYSENTER which would regain some performance, and should |
164 | * also simplify copy_in_guest_info(). Note that we'd still need to restore | 187 | * also simplify copy_in_guest_info(). Note that we'd still need to restore |
165 | * things when we exit to Launcher userspace, but that's fairly easy. | 188 | * things when we exit to Launcher userspace, but that's fairly easy. |
166 | * | 189 | * |
167 | * We could also try using this hooks for PGE, but that might be too expensive. | 190 | * We could also try using these hooks for PGE, but that might be too expensive. |
168 | * | 191 | * |
169 | * The hooks were designed for KVM, but we can also put them to good use. :*/ | 192 | * The hooks were designed for KVM, but we can also put them to good use. |
193 | :*/ | ||
170 | 194 | ||
171 | /*H:040 This is the i386-specific code to setup and run the Guest. Interrupts | 195 | /*H:040 |
172 | * are disabled: we own the CPU. */ | 196 | * This is the i386-specific code to setup and run the Guest. Interrupts |
197 | * are disabled: we own the CPU. | ||
198 | */ | ||
173 | void lguest_arch_run_guest(struct lg_cpu *cpu) | 199 | void lguest_arch_run_guest(struct lg_cpu *cpu) |
174 | { | 200 | { |
175 | /* Remember the awfully-named TS bit? If the Guest has asked to set it | 201 | /* |
202 | * Remember the awfully-named TS bit? If the Guest has asked to set it | ||
176 | * we set it now, so we can trap and pass that trap to the Guest if it | 203 | * we set it now, so we can trap and pass that trap to the Guest if it |
177 | * uses the FPU. */ | 204 | * uses the FPU. |
205 | */ | ||
178 | if (cpu->ts) | 206 | if (cpu->ts) |
179 | unlazy_fpu(current); | 207 | unlazy_fpu(current); |
180 | 208 | ||
181 | /* SYSENTER is an optimized way of doing system calls. We can't allow | 209 | /* |
210 | * SYSENTER is an optimized way of doing system calls. We can't allow | ||
182 | * it because it always jumps to privilege level 0. A normal Guest | 211 | * it because it always jumps to privilege level 0. A normal Guest |
183 | * won't try it because we don't advertise it in CPUID, but a malicious | 212 | * won't try it because we don't advertise it in CPUID, but a malicious |
184 | * Guest (or malicious Guest userspace program) could, so we tell the | 213 | * Guest (or malicious Guest userspace program) could, so we tell the |
185 | * CPU to disable it before running the Guest. */ | 214 | * CPU to disable it before running the Guest. |
215 | */ | ||
186 | if (boot_cpu_has(X86_FEATURE_SEP)) | 216 | if (boot_cpu_has(X86_FEATURE_SEP)) |
187 | wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); | 217 | wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); |
188 | 218 | ||
189 | /* Now we actually run the Guest. It will return when something | 219 | /* |
220 | * Now we actually run the Guest. It will return when something | ||
190 | * interesting happens, and we can examine its registers to see what it | 221 | * interesting happens, and we can examine its registers to see what it |
191 | * was doing. */ | 222 | * was doing. |
223 | */ | ||
192 | run_guest_once(cpu, lguest_pages(raw_smp_processor_id())); | 224 | run_guest_once(cpu, lguest_pages(raw_smp_processor_id())); |
193 | 225 | ||
194 | /* Note that the "regs" structure contains two extra entries which are | 226 | /* |
227 | * Note that the "regs" structure contains two extra entries which are | ||
195 | * not really registers: a trap number which says what interrupt or | 228 | * not really registers: a trap number which says what interrupt or |
196 | * trap made the switcher code come back, and an error code which some | 229 | * trap made the switcher code come back, and an error code which some |
197 | * traps set. */ | 230 | * traps set. |
231 | */ | ||
198 | 232 | ||
199 | /* Restore SYSENTER if it's supposed to be on. */ | 233 | /* Restore SYSENTER if it's supposed to be on. */ |
200 | if (boot_cpu_has(X86_FEATURE_SEP)) | 234 | if (boot_cpu_has(X86_FEATURE_SEP)) |
201 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); | 235 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); |
202 | 236 | ||
203 | /* If the Guest page faulted, then the cr2 register will tell us the | 237 | /* |
238 | * If the Guest page faulted, then the cr2 register will tell us the | ||
204 | * bad virtual address. We have to grab this now, because once we | 239 | * bad virtual address. We have to grab this now, because once we |
205 | * re-enable interrupts an interrupt could fault and thus overwrite | 240 | * re-enable interrupts an interrupt could fault and thus overwrite |
206 | * cr2, or we could even move off to a different CPU. */ | 241 | * cr2, or we could even move off to a different CPU. |
242 | */ | ||
207 | if (cpu->regs->trapnum == 14) | 243 | if (cpu->regs->trapnum == 14) |
208 | cpu->arch.last_pagefault = read_cr2(); | 244 | cpu->arch.last_pagefault = read_cr2(); |
209 | /* Similarly, if we took a trap because the Guest used the FPU, | 245 | /* |
246 | * Similarly, if we took a trap because the Guest used the FPU, | ||
210 | * we have to restore the FPU it expects to see. | 247 | * we have to restore the FPU it expects to see. |
211 | * math_state_restore() may sleep and we may even move off to | 248 | * math_state_restore() may sleep and we may even move off to |
212 | * a different CPU. So all the critical stuff should be done | 249 | * a different CPU. So all the critical stuff should be done |
213 | * before this. */ | 250 | * before this. |
251 | */ | ||
214 | else if (cpu->regs->trapnum == 7) | 252 | else if (cpu->regs->trapnum == 7) |
215 | math_state_restore(); | 253 | math_state_restore(); |
216 | } | 254 | } |
217 | 255 | ||
218 | /*H:130 Now we've examined the hypercall code; our Guest can make requests. | 256 | /*H:130 |
257 | * Now we've examined the hypercall code; our Guest can make requests. | ||
219 | * Our Guest is usually so well behaved; it never tries to do things it isn't | 258 | * Our Guest is usually so well behaved; it never tries to do things it isn't |
220 | * allowed to, and uses hypercalls instead. Unfortunately, Linux's paravirtual | 259 | * allowed to, and uses hypercalls instead. Unfortunately, Linux's paravirtual |
221 | * infrastructure isn't quite complete, because it doesn't contain replacements | 260 | * infrastructure isn't quite complete, because it doesn't contain replacements |
@@ -225,26 +264,33 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) | |||
225 | * | 264 | * |
226 | * When the Guest uses one of these instructions, we get a trap (General | 265 | * When the Guest uses one of these instructions, we get a trap (General |
227 | * Protection Fault) and come here. We see if it's one of those troublesome | 266 | * Protection Fault) and come here. We see if it's one of those troublesome |
228 | * instructions and skip over it. We return true if we did. */ | 267 | * instructions and skip over it. We return true if we did. |
268 | */ | ||
229 | static int emulate_insn(struct lg_cpu *cpu) | 269 | static int emulate_insn(struct lg_cpu *cpu) |
230 | { | 270 | { |
231 | u8 insn; | 271 | u8 insn; |
232 | unsigned int insnlen = 0, in = 0, shift = 0; | 272 | unsigned int insnlen = 0, in = 0, shift = 0; |
233 | /* The eip contains the *virtual* address of the Guest's instruction: | 273 | /* |
234 | * guest_pa just subtracts the Guest's page_offset. */ | 274 | * The eip contains the *virtual* address of the Guest's instruction: |
275 | * guest_pa just subtracts the Guest's page_offset. | ||
276 | */ | ||
235 | unsigned long physaddr = guest_pa(cpu, cpu->regs->eip); | 277 | unsigned long physaddr = guest_pa(cpu, cpu->regs->eip); |
236 | 278 | ||
237 | /* This must be the Guest kernel trying to do something, not userspace! | 279 | /* |
280 | * This must be the Guest kernel trying to do something, not userspace! | ||
238 | * The bottom two bits of the CS segment register are the privilege | 281 | * The bottom two bits of the CS segment register are the privilege |
239 | * level. */ | 282 | * level. |
283 | */ | ||
240 | if ((cpu->regs->cs & 3) != GUEST_PL) | 284 | if ((cpu->regs->cs & 3) != GUEST_PL) |
241 | return 0; | 285 | return 0; |
242 | 286 | ||
243 | /* Decoding x86 instructions is icky. */ | 287 | /* Decoding x86 instructions is icky. */ |
244 | insn = lgread(cpu, physaddr, u8); | 288 | insn = lgread(cpu, physaddr, u8); |
245 | 289 | ||
246 | /* 0x66 is an "operand prefix". It means it's using the upper 16 bits | 290 | /* |
247 | of the eax register. */ | 291 | * 0x66 is an "operand prefix". It means it's using the upper 16 bits |
292 | * of the eax register. | ||
293 | */ | ||
248 | if (insn == 0x66) { | 294 | if (insn == 0x66) { |
249 | shift = 16; | 295 | shift = 16; |
250 | /* The instruction is 1 byte so far, read the next byte. */ | 296 | /* The instruction is 1 byte so far, read the next byte. */ |
@@ -252,8 +298,10 @@ static int emulate_insn(struct lg_cpu *cpu) | |||
252 | insn = lgread(cpu, physaddr + insnlen, u8); | 298 | insn = lgread(cpu, physaddr + insnlen, u8); |
253 | } | 299 | } |
254 | 300 | ||
255 | /* We can ignore the lower bit for the moment and decode the 4 opcodes | 301 | /* |
256 | * we need to emulate. */ | 302 | * We can ignore the lower bit for the moment and decode the 4 opcodes |
303 | * we need to emulate. | ||
304 | */ | ||
257 | switch (insn & 0xFE) { | 305 | switch (insn & 0xFE) { |
258 | case 0xE4: /* in <next byte>,%al */ | 306 | case 0xE4: /* in <next byte>,%al */ |
259 | insnlen += 2; | 307 | insnlen += 2; |
@@ -274,9 +322,11 @@ static int emulate_insn(struct lg_cpu *cpu) | |||
274 | return 0; | 322 | return 0; |
275 | } | 323 | } |
276 | 324 | ||
277 | /* If it was an "IN" instruction, they expect the result to be read | 325 | /* |
326 | * If it was an "IN" instruction, they expect the result to be read | ||
278 | * into %eax, so we change %eax. We always return all-ones, which | 327 | * into %eax, so we change %eax. We always return all-ones, which |
279 | * traditionally means "there's nothing there". */ | 328 | * traditionally means "there's nothing there". |
329 | */ | ||
280 | if (in) { | 330 | if (in) { |
281 | /* Lower bit tells is whether it's a 16 or 32 bit access */ | 331 | /* Lower bit tells is whether it's a 16 or 32 bit access */ |
282 | if (insn & 0x1) | 332 | if (insn & 0x1) |
@@ -290,7 +340,8 @@ static int emulate_insn(struct lg_cpu *cpu) | |||
290 | return 1; | 340 | return 1; |
291 | } | 341 | } |
292 | 342 | ||
293 | /* Our hypercalls mechanism used to be based on direct software interrupts. | 343 | /* |
344 | * Our hypercalls mechanism used to be based on direct software interrupts. | ||
294 | * After Anthony's "Refactor hypercall infrastructure" kvm patch, we decided to | 345 | * After Anthony's "Refactor hypercall infrastructure" kvm patch, we decided to |
295 | * change over to using kvm hypercalls. | 346 | * change over to using kvm hypercalls. |
296 | * | 347 | * |
@@ -318,16 +369,20 @@ static int emulate_insn(struct lg_cpu *cpu) | |||
318 | */ | 369 | */ |
319 | static void rewrite_hypercall(struct lg_cpu *cpu) | 370 | static void rewrite_hypercall(struct lg_cpu *cpu) |
320 | { | 371 | { |
321 | /* This are the opcodes we use to patch the Guest. The opcode for "int | 372 | /* |
373 | * This are the opcodes we use to patch the Guest. The opcode for "int | ||
322 | * $0x1f" is "0xcd 0x1f" but vmcall instruction is 3 bytes long, so we | 374 | * $0x1f" is "0xcd 0x1f" but vmcall instruction is 3 bytes long, so we |
323 | * complete the sequence with a NOP (0x90). */ | 375 | * complete the sequence with a NOP (0x90). |
376 | */ | ||
324 | u8 insn[3] = {0xcd, 0x1f, 0x90}; | 377 | u8 insn[3] = {0xcd, 0x1f, 0x90}; |
325 | 378 | ||
326 | __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn)); | 379 | __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn)); |
327 | /* The above write might have caused a copy of that page to be made | 380 | /* |
381 | * The above write might have caused a copy of that page to be made | ||
328 | * (if it was read-only). We need to make sure the Guest has | 382 | * (if it was read-only). We need to make sure the Guest has |
329 | * up-to-date pagetables. As this doesn't happen often, we can just | 383 | * up-to-date pagetables. As this doesn't happen often, we can just |
330 | * drop them all. */ | 384 | * drop them all. |
385 | */ | ||
331 | guest_pagetable_clear_all(cpu); | 386 | guest_pagetable_clear_all(cpu); |
332 | } | 387 | } |
333 | 388 | ||
@@ -335,9 +390,11 @@ static bool is_hypercall(struct lg_cpu *cpu) | |||
335 | { | 390 | { |
336 | u8 insn[3]; | 391 | u8 insn[3]; |
337 | 392 | ||
338 | /* This must be the Guest kernel trying to do something. | 393 | /* |
394 | * This must be the Guest kernel trying to do something. | ||
339 | * The bottom two bits of the CS segment register are the privilege | 395 | * The bottom two bits of the CS segment register are the privilege |
340 | * level. */ | 396 | * level. |
397 | */ | ||
341 | if ((cpu->regs->cs & 3) != GUEST_PL) | 398 | if ((cpu->regs->cs & 3) != GUEST_PL) |
342 | return false; | 399 | return false; |
343 | 400 | ||
@@ -351,86 +408,105 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu) | |||
351 | { | 408 | { |
352 | switch (cpu->regs->trapnum) { | 409 | switch (cpu->regs->trapnum) { |
353 | case 13: /* We've intercepted a General Protection Fault. */ | 410 | case 13: /* We've intercepted a General Protection Fault. */ |
354 | /* Check if this was one of those annoying IN or OUT | 411 | /* |
412 | * Check if this was one of those annoying IN or OUT | ||
355 | * instructions which we need to emulate. If so, we just go | 413 | * instructions which we need to emulate. If so, we just go |
356 | * back into the Guest after we've done it. */ | 414 | * back into the Guest after we've done it. |
415 | */ | ||
357 | if (cpu->regs->errcode == 0) { | 416 | if (cpu->regs->errcode == 0) { |
358 | if (emulate_insn(cpu)) | 417 | if (emulate_insn(cpu)) |
359 | return; | 418 | return; |
360 | } | 419 | } |
361 | /* If KVM is active, the vmcall instruction triggers a | 420 | /* |
362 | * General Protection Fault. Normally it triggers an | 421 | * If KVM is active, the vmcall instruction triggers a General |
363 | * invalid opcode fault (6): */ | 422 | * Protection Fault. Normally it triggers an invalid opcode |
423 | * fault (6): | ||
424 | */ | ||
364 | case 6: | 425 | case 6: |
365 | /* We need to check if ring == GUEST_PL and | 426 | /* |
366 | * faulting instruction == vmcall. */ | 427 | * We need to check if ring == GUEST_PL and faulting |
428 | * instruction == vmcall. | ||
429 | */ | ||
367 | if (is_hypercall(cpu)) { | 430 | if (is_hypercall(cpu)) { |
368 | rewrite_hypercall(cpu); | 431 | rewrite_hypercall(cpu); |
369 | return; | 432 | return; |
370 | } | 433 | } |
371 | break; | 434 | break; |
372 | case 14: /* We've intercepted a Page Fault. */ | 435 | case 14: /* We've intercepted a Page Fault. */ |
373 | /* The Guest accessed a virtual address that wasn't mapped. | 436 | /* |
437 | * The Guest accessed a virtual address that wasn't mapped. | ||
374 | * This happens a lot: we don't actually set up most of the page | 438 | * This happens a lot: we don't actually set up most of the page |
375 | * tables for the Guest at all when we start: as it runs it asks | 439 | * tables for the Guest at all when we start: as it runs it asks |
376 | * for more and more, and we set them up as required. In this | 440 | * for more and more, and we set them up as required. In this |
377 | * case, we don't even tell the Guest that the fault happened. | 441 | * case, we don't even tell the Guest that the fault happened. |
378 | * | 442 | * |
379 | * The errcode tells whether this was a read or a write, and | 443 | * The errcode tells whether this was a read or a write, and |
380 | * whether kernel or userspace code. */ | 444 | * whether kernel or userspace code. |
445 | */ | ||
381 | if (demand_page(cpu, cpu->arch.last_pagefault, | 446 | if (demand_page(cpu, cpu->arch.last_pagefault, |
382 | cpu->regs->errcode)) | 447 | cpu->regs->errcode)) |
383 | return; | 448 | return; |
384 | 449 | ||
385 | /* OK, it's really not there (or not OK): the Guest needs to | 450 | /* |
451 | * OK, it's really not there (or not OK): the Guest needs to | ||
386 | * know. We write out the cr2 value so it knows where the | 452 | * know. We write out the cr2 value so it knows where the |
387 | * fault occurred. | 453 | * fault occurred. |
388 | * | 454 | * |
389 | * Note that if the Guest were really messed up, this could | 455 | * Note that if the Guest were really messed up, this could |
390 | * happen before it's done the LHCALL_LGUEST_INIT hypercall, so | 456 | * happen before it's done the LHCALL_LGUEST_INIT hypercall, so |
391 | * lg->lguest_data could be NULL */ | 457 | * lg->lguest_data could be NULL |
458 | */ | ||
392 | if (cpu->lg->lguest_data && | 459 | if (cpu->lg->lguest_data && |
393 | put_user(cpu->arch.last_pagefault, | 460 | put_user(cpu->arch.last_pagefault, |
394 | &cpu->lg->lguest_data->cr2)) | 461 | &cpu->lg->lguest_data->cr2)) |
395 | kill_guest(cpu, "Writing cr2"); | 462 | kill_guest(cpu, "Writing cr2"); |
396 | break; | 463 | break; |
397 | case 7: /* We've intercepted a Device Not Available fault. */ | 464 | case 7: /* We've intercepted a Device Not Available fault. */ |
398 | /* If the Guest doesn't want to know, we already restored the | 465 | /* |
399 | * Floating Point Unit, so we just continue without telling | 466 | * If the Guest doesn't want to know, we already restored the |
400 | * it. */ | 467 | * Floating Point Unit, so we just continue without telling it. |
468 | */ | ||
401 | if (!cpu->ts) | 469 | if (!cpu->ts) |
402 | return; | 470 | return; |
403 | break; | 471 | break; |
404 | case 32 ... 255: | 472 | case 32 ... 255: |
405 | /* These values mean a real interrupt occurred, in which case | 473 | /* |
474 | * These values mean a real interrupt occurred, in which case | ||
406 | * the Host handler has already been run. We just do a | 475 | * the Host handler has already been run. We just do a |
407 | * friendly check if another process should now be run, then | 476 | * friendly check if another process should now be run, then |
408 | * return to run the Guest again */ | 477 | * return to run the Guest again |
478 | */ | ||
409 | cond_resched(); | 479 | cond_resched(); |
410 | return; | 480 | return; |
411 | case LGUEST_TRAP_ENTRY: | 481 | case LGUEST_TRAP_ENTRY: |
412 | /* Our 'struct hcall_args' maps directly over our regs: we set | 482 | /* |
413 | * up the pointer now to indicate a hypercall is pending. */ | 483 | * Our 'struct hcall_args' maps directly over our regs: we set |
484 | * up the pointer now to indicate a hypercall is pending. | ||
485 | */ | ||
414 | cpu->hcall = (struct hcall_args *)cpu->regs; | 486 | cpu->hcall = (struct hcall_args *)cpu->regs; |
415 | return; | 487 | return; |
416 | } | 488 | } |
417 | 489 | ||
418 | /* We didn't handle the trap, so it needs to go to the Guest. */ | 490 | /* We didn't handle the trap, so it needs to go to the Guest. */ |
419 | if (!deliver_trap(cpu, cpu->regs->trapnum)) | 491 | if (!deliver_trap(cpu, cpu->regs->trapnum)) |
420 | /* If the Guest doesn't have a handler (either it hasn't | 492 | /* |
493 | * If the Guest doesn't have a handler (either it hasn't | ||
421 | * registered any yet, or it's one of the faults we don't let | 494 | * registered any yet, or it's one of the faults we don't let |
422 | * it handle), it dies with this cryptic error message. */ | 495 | * it handle), it dies with this cryptic error message. |
496 | */ | ||
423 | kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)", | 497 | kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)", |
424 | cpu->regs->trapnum, cpu->regs->eip, | 498 | cpu->regs->trapnum, cpu->regs->eip, |
425 | cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault | 499 | cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault |
426 | : cpu->regs->errcode); | 500 | : cpu->regs->errcode); |
427 | } | 501 | } |
428 | 502 | ||
429 | /* Now we can look at each of the routines this calls, in increasing order of | 503 | /* |
504 | * Now we can look at each of the routines this calls, in increasing order of | ||
430 | * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(), | 505 | * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(), |
431 | * deliver_trap() and demand_page(). After all those, we'll be ready to | 506 | * deliver_trap() and demand_page(). After all those, we'll be ready to |
432 | * examine the Switcher, and our philosophical understanding of the Host/Guest | 507 | * examine the Switcher, and our philosophical understanding of the Host/Guest |
433 | * duality will be complete. :*/ | 508 | * duality will be complete. |
509 | :*/ | ||
434 | static void adjust_pge(void *on) | 510 | static void adjust_pge(void *on) |
435 | { | 511 | { |
436 | if (on) | 512 | if (on) |
@@ -439,13 +515,16 @@ static void adjust_pge(void *on) | |||
439 | write_cr4(read_cr4() & ~X86_CR4_PGE); | 515 | write_cr4(read_cr4() & ~X86_CR4_PGE); |
440 | } | 516 | } |
441 | 517 | ||
442 | /*H:020 Now the Switcher is mapped and every thing else is ready, we need to do | 518 | /*H:020 |
443 | * some more i386-specific initialization. */ | 519 | * Now the Switcher is mapped and every thing else is ready, we need to do |
520 | * some more i386-specific initialization. | ||
521 | */ | ||
444 | void __init lguest_arch_host_init(void) | 522 | void __init lguest_arch_host_init(void) |
445 | { | 523 | { |
446 | int i; | 524 | int i; |
447 | 525 | ||
448 | /* Most of the i386/switcher.S doesn't care that it's been moved; on | 526 | /* |
527 | * Most of the i386/switcher.S doesn't care that it's been moved; on | ||
449 | * Intel, jumps are relative, and it doesn't access any references to | 528 | * Intel, jumps are relative, and it doesn't access any references to |
450 | * external code or data. | 529 | * external code or data. |
451 | * | 530 | * |
@@ -453,7 +532,8 @@ void __init lguest_arch_host_init(void) | |||
453 | * addresses are placed in a table (default_idt_entries), so we need to | 532 | * addresses are placed in a table (default_idt_entries), so we need to |
454 | * update the table with the new addresses. switcher_offset() is a | 533 | * update the table with the new addresses. switcher_offset() is a |
455 | * convenience function which returns the distance between the | 534 | * convenience function which returns the distance between the |
456 | * compiled-in switcher code and the high-mapped copy we just made. */ | 535 | * compiled-in switcher code and the high-mapped copy we just made. |
536 | */ | ||
457 | for (i = 0; i < IDT_ENTRIES; i++) | 537 | for (i = 0; i < IDT_ENTRIES; i++) |
458 | default_idt_entries[i] += switcher_offset(); | 538 | default_idt_entries[i] += switcher_offset(); |
459 | 539 | ||
@@ -468,63 +548,81 @@ void __init lguest_arch_host_init(void) | |||
468 | for_each_possible_cpu(i) { | 548 | for_each_possible_cpu(i) { |
469 | /* lguest_pages() returns this CPU's two pages. */ | 549 | /* lguest_pages() returns this CPU's two pages. */ |
470 | struct lguest_pages *pages = lguest_pages(i); | 550 | struct lguest_pages *pages = lguest_pages(i); |
471 | /* This is a convenience pointer to make the code fit one | 551 | /* This is a convenience pointer to make the code neater. */ |
472 | * statement to a line. */ | ||
473 | struct lguest_ro_state *state = &pages->state; | 552 | struct lguest_ro_state *state = &pages->state; |
474 | 553 | ||
475 | /* The Global Descriptor Table: the Host has a different one | 554 | /* |
555 | * The Global Descriptor Table: the Host has a different one | ||
476 | * for each CPU. We keep a descriptor for the GDT which says | 556 | * for each CPU. We keep a descriptor for the GDT which says |
477 | * where it is and how big it is (the size is actually the last | 557 | * where it is and how big it is (the size is actually the last |
478 | * byte, not the size, hence the "-1"). */ | 558 | * byte, not the size, hence the "-1"). |
559 | */ | ||
479 | state->host_gdt_desc.size = GDT_SIZE-1; | 560 | state->host_gdt_desc.size = GDT_SIZE-1; |
480 | state->host_gdt_desc.address = (long)get_cpu_gdt_table(i); | 561 | state->host_gdt_desc.address = (long)get_cpu_gdt_table(i); |
481 | 562 | ||
482 | /* All CPUs on the Host use the same Interrupt Descriptor | 563 | /* |
564 | * All CPUs on the Host use the same Interrupt Descriptor | ||
483 | * Table, so we just use store_idt(), which gets this CPU's IDT | 565 | * Table, so we just use store_idt(), which gets this CPU's IDT |
484 | * descriptor. */ | 566 | * descriptor. |
567 | */ | ||
485 | store_idt(&state->host_idt_desc); | 568 | store_idt(&state->host_idt_desc); |
486 | 569 | ||
487 | /* The descriptors for the Guest's GDT and IDT can be filled | 570 | /* |
571 | * The descriptors for the Guest's GDT and IDT can be filled | ||
488 | * out now, too. We copy the GDT & IDT into ->guest_gdt and | 572 | * out now, too. We copy the GDT & IDT into ->guest_gdt and |
489 | * ->guest_idt before actually running the Guest. */ | 573 | * ->guest_idt before actually running the Guest. |
574 | */ | ||
490 | state->guest_idt_desc.size = sizeof(state->guest_idt)-1; | 575 | state->guest_idt_desc.size = sizeof(state->guest_idt)-1; |
491 | state->guest_idt_desc.address = (long)&state->guest_idt; | 576 | state->guest_idt_desc.address = (long)&state->guest_idt; |
492 | state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1; | 577 | state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1; |
493 | state->guest_gdt_desc.address = (long)&state->guest_gdt; | 578 | state->guest_gdt_desc.address = (long)&state->guest_gdt; |
494 | 579 | ||
495 | /* We know where we want the stack to be when the Guest enters | 580 | /* |
581 | * We know where we want the stack to be when the Guest enters | ||
496 | * the Switcher: in pages->regs. The stack grows upwards, so | 582 | * the Switcher: in pages->regs. The stack grows upwards, so |
497 | * we start it at the end of that structure. */ | 583 | * we start it at the end of that structure. |
584 | */ | ||
498 | state->guest_tss.sp0 = (long)(&pages->regs + 1); | 585 | state->guest_tss.sp0 = (long)(&pages->regs + 1); |
499 | /* And this is the GDT entry to use for the stack: we keep a | 586 | /* |
500 | * couple of special LGUEST entries. */ | 587 | * And this is the GDT entry to use for the stack: we keep a |
588 | * couple of special LGUEST entries. | ||
589 | */ | ||
501 | state->guest_tss.ss0 = LGUEST_DS; | 590 | state->guest_tss.ss0 = LGUEST_DS; |
502 | 591 | ||
503 | /* x86 can have a finegrained bitmap which indicates what I/O | 592 | /* |
593 | * x86 can have a finegrained bitmap which indicates what I/O | ||
504 | * ports the process can use. We set it to the end of our | 594 | * ports the process can use. We set it to the end of our |
505 | * structure, meaning "none". */ | 595 | * structure, meaning "none". |
596 | */ | ||
506 | state->guest_tss.io_bitmap_base = sizeof(state->guest_tss); | 597 | state->guest_tss.io_bitmap_base = sizeof(state->guest_tss); |
507 | 598 | ||
508 | /* Some GDT entries are the same across all Guests, so we can | 599 | /* |
509 | * set them up now. */ | 600 | * Some GDT entries are the same across all Guests, so we can |
601 | * set them up now. | ||
602 | */ | ||
510 | setup_default_gdt_entries(state); | 603 | setup_default_gdt_entries(state); |
511 | /* Most IDT entries are the same for all Guests, too.*/ | 604 | /* Most IDT entries are the same for all Guests, too.*/ |
512 | setup_default_idt_entries(state, default_idt_entries); | 605 | setup_default_idt_entries(state, default_idt_entries); |
513 | 606 | ||
514 | /* The Host needs to be able to use the LGUEST segments on this | 607 | /* |
515 | * CPU, too, so put them in the Host GDT. */ | 608 | * The Host needs to be able to use the LGUEST segments on this |
609 | * CPU, too, so put them in the Host GDT. | ||
610 | */ | ||
516 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; | 611 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; |
517 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; | 612 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; |
518 | } | 613 | } |
519 | 614 | ||
520 | /* In the Switcher, we want the %cs segment register to use the | 615 | /* |
616 | * In the Switcher, we want the %cs segment register to use the | ||
521 | * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so | 617 | * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so |
522 | * it will be undisturbed when we switch. To change %cs and jump we | 618 | * it will be undisturbed when we switch. To change %cs and jump we |
523 | * need this structure to feed to Intel's "lcall" instruction. */ | 619 | * need this structure to feed to Intel's "lcall" instruction. |
620 | */ | ||
524 | lguest_entry.offset = (long)switch_to_guest + switcher_offset(); | 621 | lguest_entry.offset = (long)switch_to_guest + switcher_offset(); |
525 | lguest_entry.segment = LGUEST_CS; | 622 | lguest_entry.segment = LGUEST_CS; |
526 | 623 | ||
527 | /* Finally, we need to turn off "Page Global Enable". PGE is an | 624 | /* |
625 | * Finally, we need to turn off "Page Global Enable". PGE is an | ||
528 | * optimization where page table entries are specially marked to show | 626 | * optimization where page table entries are specially marked to show |
529 | * they never change. The Host kernel marks all the kernel pages this | 627 | * they never change. The Host kernel marks all the kernel pages this |
530 | * way because it's always present, even when userspace is running. | 628 | * way because it's always present, even when userspace is running. |
@@ -534,16 +632,21 @@ void __init lguest_arch_host_init(void) | |||
534 | * you'll get really weird bugs that you'll chase for two days. | 632 | * you'll get really weird bugs that you'll chase for two days. |
535 | * | 633 | * |
536 | * I used to turn PGE off every time we switched to the Guest and back | 634 | * I used to turn PGE off every time we switched to the Guest and back |
537 | * on when we return, but that slowed the Switcher down noticibly. */ | 635 | * on when we return, but that slowed the Switcher down noticibly. |
636 | */ | ||
538 | 637 | ||
539 | /* We don't need the complexity of CPUs coming and going while we're | 638 | /* |
540 | * doing this. */ | 639 | * We don't need the complexity of CPUs coming and going while we're |
640 | * doing this. | ||
641 | */ | ||
541 | get_online_cpus(); | 642 | get_online_cpus(); |
542 | if (cpu_has_pge) { /* We have a broader idea of "global". */ | 643 | if (cpu_has_pge) { /* We have a broader idea of "global". */ |
543 | /* Remember that this was originally set (for cleanup). */ | 644 | /* Remember that this was originally set (for cleanup). */ |
544 | cpu_had_pge = 1; | 645 | cpu_had_pge = 1; |
545 | /* adjust_pge is a helper function which sets or unsets the PGE | 646 | /* |
546 | * bit on its CPU, depending on the argument (0 == unset). */ | 647 | * adjust_pge is a helper function which sets or unsets the PGE |
648 | * bit on its CPU, depending on the argument (0 == unset). | ||
649 | */ | ||
547 | on_each_cpu(adjust_pge, (void *)0, 1); | 650 | on_each_cpu(adjust_pge, (void *)0, 1); |
548 | /* Turn off the feature in the global feature set. */ | 651 | /* Turn off the feature in the global feature set. */ |
549 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); | 652 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); |
@@ -590,26 +693,32 @@ int lguest_arch_init_hypercalls(struct lg_cpu *cpu) | |||
590 | { | 693 | { |
591 | u32 tsc_speed; | 694 | u32 tsc_speed; |
592 | 695 | ||
593 | /* The pointer to the Guest's "struct lguest_data" is the only argument. | 696 | /* |
594 | * We check that address now. */ | 697 | * The pointer to the Guest's "struct lguest_data" is the only argument. |
698 | * We check that address now. | ||
699 | */ | ||
595 | if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1, | 700 | if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1, |
596 | sizeof(*cpu->lg->lguest_data))) | 701 | sizeof(*cpu->lg->lguest_data))) |
597 | return -EFAULT; | 702 | return -EFAULT; |
598 | 703 | ||
599 | /* Having checked it, we simply set lg->lguest_data to point straight | 704 | /* |
705 | * Having checked it, we simply set lg->lguest_data to point straight | ||
600 | * into the Launcher's memory at the right place and then use | 706 | * into the Launcher's memory at the right place and then use |
601 | * copy_to_user/from_user from now on, instead of lgread/write. I put | 707 | * copy_to_user/from_user from now on, instead of lgread/write. I put |
602 | * this in to show that I'm not immune to writing stupid | 708 | * this in to show that I'm not immune to writing stupid |
603 | * optimizations. */ | 709 | * optimizations. |
710 | */ | ||
604 | cpu->lg->lguest_data = cpu->lg->mem_base + cpu->hcall->arg1; | 711 | cpu->lg->lguest_data = cpu->lg->mem_base + cpu->hcall->arg1; |
605 | 712 | ||
606 | /* We insist that the Time Stamp Counter exist and doesn't change with | 713 | /* |
714 | * We insist that the Time Stamp Counter exist and doesn't change with | ||
607 | * cpu frequency. Some devious chip manufacturers decided that TSC | 715 | * cpu frequency. Some devious chip manufacturers decided that TSC |
608 | * changes could be handled in software. I decided that time going | 716 | * changes could be handled in software. I decided that time going |
609 | * backwards might be good for benchmarks, but it's bad for users. | 717 | * backwards might be good for benchmarks, but it's bad for users. |
610 | * | 718 | * |
611 | * We also insist that the TSC be stable: the kernel detects unreliable | 719 | * We also insist that the TSC be stable: the kernel detects unreliable |
612 | * TSCs for its own purposes, and we use that here. */ | 720 | * TSCs for its own purposes, and we use that here. |
721 | */ | ||
613 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable()) | 722 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable()) |
614 | tsc_speed = tsc_khz; | 723 | tsc_speed = tsc_khz; |
615 | else | 724 | else |
@@ -625,38 +734,47 @@ int lguest_arch_init_hypercalls(struct lg_cpu *cpu) | |||
625 | } | 734 | } |
626 | /*:*/ | 735 | /*:*/ |
627 | 736 | ||
628 | /*L:030 lguest_arch_setup_regs() | 737 | /*L:030 |
738 | * lguest_arch_setup_regs() | ||
629 | * | 739 | * |
630 | * Most of the Guest's registers are left alone: we used get_zeroed_page() to | 740 | * Most of the Guest's registers are left alone: we used get_zeroed_page() to |
631 | * allocate the structure, so they will be 0. */ | 741 | * allocate the structure, so they will be 0. |
742 | */ | ||
632 | void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start) | 743 | void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start) |
633 | { | 744 | { |
634 | struct lguest_regs *regs = cpu->regs; | 745 | struct lguest_regs *regs = cpu->regs; |
635 | 746 | ||
636 | /* There are four "segment" registers which the Guest needs to boot: | 747 | /* |
748 | * There are four "segment" registers which the Guest needs to boot: | ||
637 | * The "code segment" register (cs) refers to the kernel code segment | 749 | * The "code segment" register (cs) refers to the kernel code segment |
638 | * __KERNEL_CS, and the "data", "extra" and "stack" segment registers | 750 | * __KERNEL_CS, and the "data", "extra" and "stack" segment registers |
639 | * refer to the kernel data segment __KERNEL_DS. | 751 | * refer to the kernel data segment __KERNEL_DS. |
640 | * | 752 | * |
641 | * The privilege level is packed into the lower bits. The Guest runs | 753 | * The privilege level is packed into the lower bits. The Guest runs |
642 | * at privilege level 1 (GUEST_PL).*/ | 754 | * at privilege level 1 (GUEST_PL). |
755 | */ | ||
643 | regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL; | 756 | regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL; |
644 | regs->cs = __KERNEL_CS|GUEST_PL; | 757 | regs->cs = __KERNEL_CS|GUEST_PL; |
645 | 758 | ||
646 | /* The "eflags" register contains miscellaneous flags. Bit 1 (0x002) | 759 | /* |
760 | * The "eflags" register contains miscellaneous flags. Bit 1 (0x002) | ||
647 | * is supposed to always be "1". Bit 9 (0x200) controls whether | 761 | * is supposed to always be "1". Bit 9 (0x200) controls whether |
648 | * interrupts are enabled. We always leave interrupts enabled while | 762 | * interrupts are enabled. We always leave interrupts enabled while |
649 | * running the Guest. */ | 763 | * running the Guest. |
764 | */ | ||
650 | regs->eflags = X86_EFLAGS_IF | 0x2; | 765 | regs->eflags = X86_EFLAGS_IF | 0x2; |
651 | 766 | ||
652 | /* The "Extended Instruction Pointer" register says where the Guest is | 767 | /* |
653 | * running. */ | 768 | * The "Extended Instruction Pointer" register says where the Guest is |
769 | * running. | ||
770 | */ | ||
654 | regs->eip = start; | 771 | regs->eip = start; |
655 | 772 | ||
656 | /* %esi points to our boot information, at physical address 0, so don't | 773 | /* |
657 | * touch it. */ | 774 | * %esi points to our boot information, at physical address 0, so don't |
775 | * touch it. | ||
776 | */ | ||
658 | 777 | ||
659 | /* There are a couple of GDT entries the Guest expects when first | 778 | /* There are a couple of GDT entries the Guest expects at boot. */ |
660 | * booting. */ | ||
661 | setup_guest_gdt(cpu); | 779 | setup_guest_gdt(cpu); |
662 | } | 780 | } |
diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S index 3fc15318a80f..40634b0db9f7 100644 --- a/drivers/lguest/x86/switcher_32.S +++ b/drivers/lguest/x86/switcher_32.S | |||
@@ -1,12 +1,15 @@ | |||
1 | /*P:900 This is the Switcher: code which sits at 0xFFC00000 astride both the | 1 | /*P:900 |
2 | * Host and Guest to do the low-level Guest<->Host switch. It is as simple as | 2 | * This is the Switcher: code which sits at 0xFFC00000 (or 0xFFE00000) astride |
3 | * it can be made, but it's naturally very specific to x86. | 3 | * both the Host and Guest to do the low-level Guest<->Host switch. It is as |
4 | * simple as it can be made, but it's naturally very specific to x86. | ||
4 | * | 5 | * |
5 | * You have now completed Preparation. If this has whet your appetite; if you | 6 | * You have now completed Preparation. If this has whet your appetite; if you |
6 | * are feeling invigorated and refreshed then the next, more challenging stage | 7 | * are feeling invigorated and refreshed then the next, more challenging stage |
7 | * can be found in "make Guest". :*/ | 8 | * can be found in "make Guest". |
9 | :*/ | ||
8 | 10 | ||
9 | /*M:012 Lguest is meant to be simple: my rule of thumb is that 1% more LOC must | 11 | /*M:012 |
12 | * Lguest is meant to be simple: my rule of thumb is that 1% more LOC must | ||
10 | * gain at least 1% more performance. Since neither LOC nor performance can be | 13 | * gain at least 1% more performance. Since neither LOC nor performance can be |
11 | * measured beforehand, it generally means implementing a feature then deciding | 14 | * measured beforehand, it generally means implementing a feature then deciding |
12 | * if it's worth it. And once it's implemented, who can say no? | 15 | * if it's worth it. And once it's implemented, who can say no? |
@@ -31,11 +34,14 @@ | |||
31 | * Host (which is actually really easy). | 34 | * Host (which is actually really easy). |
32 | * | 35 | * |
33 | * Two questions remain. Would the performance gain outweigh the complexity? | 36 | * Two questions remain. Would the performance gain outweigh the complexity? |
34 | * And who would write the verse documenting it? :*/ | 37 | * And who would write the verse documenting it? |
38 | :*/ | ||
35 | 39 | ||
36 | /*M:011 Lguest64 handles NMI. This gave me NMI envy (until I looked at their | 40 | /*M:011 |
41 | * Lguest64 handles NMI. This gave me NMI envy (until I looked at their | ||
37 | * code). It's worth doing though, since it would let us use oprofile in the | 42 | * code). It's worth doing though, since it would let us use oprofile in the |
38 | * Host when a Guest is running. :*/ | 43 | * Host when a Guest is running. |
44 | :*/ | ||
39 | 45 | ||
40 | /*S:100 | 46 | /*S:100 |
41 | * Welcome to the Switcher itself! | 47 | * Welcome to the Switcher itself! |
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index bcec78ffc765..248e00ec4dc1 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
@@ -52,8 +52,10 @@ struct virtio_pci_device | |||
52 | char (*msix_names)[256]; | 52 | char (*msix_names)[256]; |
53 | /* Number of available vectors */ | 53 | /* Number of available vectors */ |
54 | unsigned msix_vectors; | 54 | unsigned msix_vectors; |
55 | /* Vectors allocated */ | 55 | /* Vectors allocated, excluding per-vq vectors if any */ |
56 | unsigned msix_used_vectors; | 56 | unsigned msix_used_vectors; |
57 | /* Whether we have vector per vq */ | ||
58 | bool per_vq_vectors; | ||
57 | }; | 59 | }; |
58 | 60 | ||
59 | /* Constants for MSI-X */ | 61 | /* Constants for MSI-X */ |
@@ -258,7 +260,6 @@ static void vp_free_vectors(struct virtio_device *vdev) | |||
258 | 260 | ||
259 | for (i = 0; i < vp_dev->msix_used_vectors; ++i) | 261 | for (i = 0; i < vp_dev->msix_used_vectors; ++i) |
260 | free_irq(vp_dev->msix_entries[i].vector, vp_dev); | 262 | free_irq(vp_dev->msix_entries[i].vector, vp_dev); |
261 | vp_dev->msix_used_vectors = 0; | ||
262 | 263 | ||
263 | if (vp_dev->msix_enabled) { | 264 | if (vp_dev->msix_enabled) { |
264 | /* Disable the vector used for configuration */ | 265 | /* Disable the vector used for configuration */ |
@@ -267,80 +268,77 @@ static void vp_free_vectors(struct virtio_device *vdev) | |||
267 | /* Flush the write out to device */ | 268 | /* Flush the write out to device */ |
268 | ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | 269 | ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); |
269 | 270 | ||
270 | vp_dev->msix_enabled = 0; | ||
271 | pci_disable_msix(vp_dev->pci_dev); | 271 | pci_disable_msix(vp_dev->pci_dev); |
272 | vp_dev->msix_enabled = 0; | ||
273 | vp_dev->msix_vectors = 0; | ||
272 | } | 274 | } |
273 | } | ||
274 | 275 | ||
275 | static int vp_enable_msix(struct pci_dev *dev, struct msix_entry *entries, | 276 | vp_dev->msix_used_vectors = 0; |
276 | int *options, int noptions) | 277 | kfree(vp_dev->msix_names); |
277 | { | 278 | vp_dev->msix_names = NULL; |
278 | int i; | 279 | kfree(vp_dev->msix_entries); |
279 | for (i = 0; i < noptions; ++i) | 280 | vp_dev->msix_entries = NULL; |
280 | if (!pci_enable_msix(dev, entries, options[i])) | ||
281 | return options[i]; | ||
282 | return -EBUSY; | ||
283 | } | 281 | } |
284 | 282 | ||
285 | static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs) | 283 | static int vp_request_vectors(struct virtio_device *vdev, int nvectors, |
284 | bool per_vq_vectors) | ||
286 | { | 285 | { |
287 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 286 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
288 | const char *name = dev_name(&vp_dev->vdev.dev); | 287 | const char *name = dev_name(&vp_dev->vdev.dev); |
289 | unsigned i, v; | 288 | unsigned i, v; |
290 | int err = -ENOMEM; | 289 | int err = -ENOMEM; |
291 | /* We want at most one vector per queue and one for config changes. | 290 | |
292 | * Fallback to separate vectors for config and a shared for queues. | 291 | if (!nvectors) { |
293 | * Finally fall back to regular interrupts. */ | 292 | /* Can't allocate MSI-X vectors, use regular interrupt */ |
294 | int options[] = { max_vqs + 1, 2 }; | 293 | vp_dev->msix_vectors = 0; |
295 | int nvectors = max(options[0], options[1]); | 294 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, |
295 | IRQF_SHARED, name, vp_dev); | ||
296 | if (err) | ||
297 | return err; | ||
298 | vp_dev->intx_enabled = 1; | ||
299 | return 0; | ||
300 | } | ||
296 | 301 | ||
297 | vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, | 302 | vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, |
298 | GFP_KERNEL); | 303 | GFP_KERNEL); |
299 | if (!vp_dev->msix_entries) | 304 | if (!vp_dev->msix_entries) |
300 | goto error_entries; | 305 | goto error; |
301 | vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, | 306 | vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, |
302 | GFP_KERNEL); | 307 | GFP_KERNEL); |
303 | if (!vp_dev->msix_names) | 308 | if (!vp_dev->msix_names) |
304 | goto error_names; | 309 | goto error; |
305 | 310 | ||
306 | for (i = 0; i < nvectors; ++i) | 311 | for (i = 0; i < nvectors; ++i) |
307 | vp_dev->msix_entries[i].entry = i; | 312 | vp_dev->msix_entries[i].entry = i; |
308 | 313 | ||
309 | err = vp_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, | 314 | err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors); |
310 | options, ARRAY_SIZE(options)); | 315 | if (err > 0) |
311 | if (err < 0) { | 316 | err = -ENOSPC; |
312 | /* Can't allocate enough MSI-X vectors, use regular interrupt */ | 317 | if (err) |
313 | vp_dev->msix_vectors = 0; | 318 | goto error; |
314 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, | 319 | vp_dev->msix_vectors = nvectors; |
315 | IRQF_SHARED, name, vp_dev); | 320 | vp_dev->msix_enabled = 1; |
316 | if (err) | 321 | |
317 | goto error_irq; | 322 | /* Set the vector used for configuration */ |
318 | vp_dev->intx_enabled = 1; | 323 | v = vp_dev->msix_used_vectors; |
319 | } else { | 324 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, |
320 | vp_dev->msix_vectors = err; | 325 | "%s-config", name); |
321 | vp_dev->msix_enabled = 1; | 326 | err = request_irq(vp_dev->msix_entries[v].vector, |
322 | 327 | vp_config_changed, 0, vp_dev->msix_names[v], | |
323 | /* Set the vector used for configuration */ | 328 | vp_dev); |
324 | v = vp_dev->msix_used_vectors; | 329 | if (err) |
325 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, | 330 | goto error; |
326 | "%s-config", name); | 331 | ++vp_dev->msix_used_vectors; |
327 | err = request_irq(vp_dev->msix_entries[v].vector, | 332 | |
328 | vp_config_changed, 0, vp_dev->msix_names[v], | 333 | iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); |
329 | vp_dev); | 334 | /* Verify we had enough resources to assign the vector */ |
330 | if (err) | 335 | v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); |
331 | goto error_irq; | 336 | if (v == VIRTIO_MSI_NO_VECTOR) { |
332 | ++vp_dev->msix_used_vectors; | 337 | err = -EBUSY; |
333 | 338 | goto error; | |
334 | iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
335 | /* Verify we had enough resources to assign the vector */ | ||
336 | v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
337 | if (v == VIRTIO_MSI_NO_VECTOR) { | ||
338 | err = -EBUSY; | ||
339 | goto error_irq; | ||
340 | } | ||
341 | } | 339 | } |
342 | 340 | ||
343 | if (vp_dev->msix_vectors && vp_dev->msix_vectors != max_vqs + 1) { | 341 | if (!per_vq_vectors) { |
344 | /* Shared vector for all VQs */ | 342 | /* Shared vector for all VQs */ |
345 | v = vp_dev->msix_used_vectors; | 343 | v = vp_dev->msix_used_vectors; |
346 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, | 344 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, |
@@ -349,28 +347,25 @@ static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs) | |||
349 | vp_vring_interrupt, 0, vp_dev->msix_names[v], | 347 | vp_vring_interrupt, 0, vp_dev->msix_names[v], |
350 | vp_dev); | 348 | vp_dev); |
351 | if (err) | 349 | if (err) |
352 | goto error_irq; | 350 | goto error; |
353 | ++vp_dev->msix_used_vectors; | 351 | ++vp_dev->msix_used_vectors; |
354 | } | 352 | } |
355 | return 0; | 353 | return 0; |
356 | error_irq: | 354 | error: |
357 | vp_free_vectors(vdev); | 355 | vp_free_vectors(vdev); |
358 | kfree(vp_dev->msix_names); | ||
359 | error_names: | ||
360 | kfree(vp_dev->msix_entries); | ||
361 | error_entries: | ||
362 | return err; | 356 | return err; |
363 | } | 357 | } |
364 | 358 | ||
365 | static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | 359 | static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, |
366 | void (*callback)(struct virtqueue *vq), | 360 | void (*callback)(struct virtqueue *vq), |
367 | const char *name) | 361 | const char *name, |
362 | u16 vector) | ||
368 | { | 363 | { |
369 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 364 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
370 | struct virtio_pci_vq_info *info; | 365 | struct virtio_pci_vq_info *info; |
371 | struct virtqueue *vq; | 366 | struct virtqueue *vq; |
372 | unsigned long flags, size; | 367 | unsigned long flags, size; |
373 | u16 num, vector; | 368 | u16 num; |
374 | int err; | 369 | int err; |
375 | 370 | ||
376 | /* Select the queue we're interested in */ | 371 | /* Select the queue we're interested in */ |
@@ -389,7 +384,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
389 | 384 | ||
390 | info->queue_index = index; | 385 | info->queue_index = index; |
391 | info->num = num; | 386 | info->num = num; |
392 | info->vector = VIRTIO_MSI_NO_VECTOR; | 387 | info->vector = vector; |
393 | 388 | ||
394 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); | 389 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); |
395 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); | 390 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); |
@@ -413,22 +408,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
413 | vq->priv = info; | 408 | vq->priv = info; |
414 | info->vq = vq; | 409 | info->vq = vq; |
415 | 410 | ||
416 | /* allocate per-vq vector if available and necessary */ | 411 | if (vector != VIRTIO_MSI_NO_VECTOR) { |
417 | if (callback && vp_dev->msix_used_vectors < vp_dev->msix_vectors) { | ||
418 | vector = vp_dev->msix_used_vectors; | ||
419 | snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names, | ||
420 | "%s-%s", dev_name(&vp_dev->vdev.dev), name); | ||
421 | err = request_irq(vp_dev->msix_entries[vector].vector, | ||
422 | vring_interrupt, 0, | ||
423 | vp_dev->msix_names[vector], vq); | ||
424 | if (err) | ||
425 | goto out_request_irq; | ||
426 | info->vector = vector; | ||
427 | ++vp_dev->msix_used_vectors; | ||
428 | } else | ||
429 | vector = VP_MSIX_VQ_VECTOR; | ||
430 | |||
431 | if (callback && vp_dev->msix_enabled) { | ||
432 | iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | 412 | iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); |
433 | vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | 413 | vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); |
434 | if (vector == VIRTIO_MSI_NO_VECTOR) { | 414 | if (vector == VIRTIO_MSI_NO_VECTOR) { |
@@ -444,11 +424,6 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
444 | return vq; | 424 | return vq; |
445 | 425 | ||
446 | out_assign: | 426 | out_assign: |
447 | if (info->vector != VIRTIO_MSI_NO_VECTOR) { | ||
448 | free_irq(vp_dev->msix_entries[info->vector].vector, vq); | ||
449 | --vp_dev->msix_used_vectors; | ||
450 | } | ||
451 | out_request_irq: | ||
452 | vring_del_virtqueue(vq); | 427 | vring_del_virtqueue(vq); |
453 | out_activate_queue: | 428 | out_activate_queue: |
454 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | 429 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
@@ -462,12 +437,13 @@ static void vp_del_vq(struct virtqueue *vq) | |||
462 | { | 437 | { |
463 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); | 438 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); |
464 | struct virtio_pci_vq_info *info = vq->priv; | 439 | struct virtio_pci_vq_info *info = vq->priv; |
465 | unsigned long size; | 440 | unsigned long flags, size; |
466 | 441 | ||
467 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | 442 | spin_lock_irqsave(&vp_dev->lock, flags); |
443 | list_del(&info->node); | ||
444 | spin_unlock_irqrestore(&vp_dev->lock, flags); | ||
468 | 445 | ||
469 | if (info->vector != VIRTIO_MSI_NO_VECTOR) | 446 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); |
470 | free_irq(vp_dev->msix_entries[info->vector].vector, vq); | ||
471 | 447 | ||
472 | if (vp_dev->msix_enabled) { | 448 | if (vp_dev->msix_enabled) { |
473 | iowrite16(VIRTIO_MSI_NO_VECTOR, | 449 | iowrite16(VIRTIO_MSI_NO_VECTOR, |
@@ -489,36 +465,62 @@ static void vp_del_vq(struct virtqueue *vq) | |||
489 | /* the config->del_vqs() implementation */ | 465 | /* the config->del_vqs() implementation */ |
490 | static void vp_del_vqs(struct virtio_device *vdev) | 466 | static void vp_del_vqs(struct virtio_device *vdev) |
491 | { | 467 | { |
468 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
492 | struct virtqueue *vq, *n; | 469 | struct virtqueue *vq, *n; |
470 | struct virtio_pci_vq_info *info; | ||
493 | 471 | ||
494 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) | 472 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) { |
473 | info = vq->priv; | ||
474 | if (vp_dev->per_vq_vectors) | ||
475 | free_irq(vp_dev->msix_entries[info->vector].vector, vq); | ||
495 | vp_del_vq(vq); | 476 | vp_del_vq(vq); |
477 | } | ||
478 | vp_dev->per_vq_vectors = false; | ||
496 | 479 | ||
497 | vp_free_vectors(vdev); | 480 | vp_free_vectors(vdev); |
498 | } | 481 | } |
499 | 482 | ||
500 | /* the config->find_vqs() implementation */ | 483 | static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
501 | static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 484 | struct virtqueue *vqs[], |
502 | struct virtqueue *vqs[], | 485 | vq_callback_t *callbacks[], |
503 | vq_callback_t *callbacks[], | 486 | const char *names[], |
504 | const char *names[]) | 487 | int nvectors, |
488 | bool per_vq_vectors) | ||
505 | { | 489 | { |
506 | int vectors = 0; | 490 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
507 | int i, err; | 491 | u16 vector; |
508 | 492 | int i, err, allocated_vectors; | |
509 | /* How many vectors would we like? */ | ||
510 | for (i = 0; i < nvqs; ++i) | ||
511 | if (callbacks[i]) | ||
512 | ++vectors; | ||
513 | 493 | ||
514 | err = vp_request_vectors(vdev, vectors); | 494 | err = vp_request_vectors(vdev, nvectors, per_vq_vectors); |
515 | if (err) | 495 | if (err) |
516 | goto error_request; | 496 | goto error_request; |
517 | 497 | ||
498 | vp_dev->per_vq_vectors = per_vq_vectors; | ||
499 | allocated_vectors = vp_dev->msix_used_vectors; | ||
518 | for (i = 0; i < nvqs; ++i) { | 500 | for (i = 0; i < nvqs; ++i) { |
519 | vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]); | 501 | if (!callbacks[i] || !vp_dev->msix_enabled) |
520 | if (IS_ERR(vqs[i])) | 502 | vector = VIRTIO_MSI_NO_VECTOR; |
503 | else if (vp_dev->per_vq_vectors) | ||
504 | vector = allocated_vectors++; | ||
505 | else | ||
506 | vector = VP_MSIX_VQ_VECTOR; | ||
507 | vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i], vector); | ||
508 | if (IS_ERR(vqs[i])) { | ||
509 | err = PTR_ERR(vqs[i]); | ||
521 | goto error_find; | 510 | goto error_find; |
511 | } | ||
512 | /* allocate per-vq irq if available and necessary */ | ||
513 | if (vp_dev->per_vq_vectors && vector != VIRTIO_MSI_NO_VECTOR) { | ||
514 | snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names, | ||
515 | "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]); | ||
516 | err = request_irq(vp_dev->msix_entries[vector].vector, | ||
517 | vring_interrupt, 0, | ||
518 | vp_dev->msix_names[vector], vqs[i]); | ||
519 | if (err) { | ||
520 | vp_del_vq(vqs[i]); | ||
521 | goto error_find; | ||
522 | } | ||
523 | } | ||
522 | } | 524 | } |
523 | return 0; | 525 | return 0; |
524 | 526 | ||
@@ -526,7 +528,37 @@ error_find: | |||
526 | vp_del_vqs(vdev); | 528 | vp_del_vqs(vdev); |
527 | 529 | ||
528 | error_request: | 530 | error_request: |
529 | return PTR_ERR(vqs[i]); | 531 | return err; |
532 | } | ||
533 | |||
534 | /* the config->find_vqs() implementation */ | ||
535 | static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, | ||
536 | struct virtqueue *vqs[], | ||
537 | vq_callback_t *callbacks[], | ||
538 | const char *names[]) | ||
539 | { | ||
540 | int vectors = 0; | ||
541 | int i, uninitialized_var(err); | ||
542 | |||
543 | /* How many vectors would we like? */ | ||
544 | for (i = 0; i < nvqs; ++i) | ||
545 | if (callbacks[i]) | ||
546 | ++vectors; | ||
547 | |||
548 | /* We want at most one vector per queue and one for config changes. */ | ||
549 | err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, | ||
550 | vectors + 1, true); | ||
551 | if (!err) | ||
552 | return 0; | ||
553 | /* Fallback to separate vectors for config and a shared for queues. */ | ||
554 | err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, | ||
555 | 2, false); | ||
556 | if (!err) | ||
557 | return 0; | ||
558 | /* Finally fall back to regular interrupts. */ | ||
559 | err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, | ||
560 | 0, false); | ||
561 | return err; | ||
530 | } | 562 | } |
531 | 563 | ||
532 | static struct virtio_config_ops virtio_pci_config_ops = { | 564 | static struct virtio_config_ops virtio_pci_config_ops = { |
diff --git a/include/linux/lguest.h b/include/linux/lguest.h index dbf2479e808e..2fb1dcbcb5aa 100644 --- a/include/linux/lguest.h +++ b/include/linux/lguest.h | |||
@@ -1,5 +1,7 @@ | |||
1 | /* Things the lguest guest needs to know. Note: like all lguest interfaces, | 1 | /* |
2 | * this is subject to wild and random change between versions. */ | 2 | * Things the lguest guest needs to know. Note: like all lguest interfaces, |
3 | * this is subject to wild and random change between versions. | ||
4 | */ | ||
3 | #ifndef _LINUX_LGUEST_H | 5 | #ifndef _LINUX_LGUEST_H |
4 | #define _LINUX_LGUEST_H | 6 | #define _LINUX_LGUEST_H |
5 | 7 | ||
@@ -11,32 +13,41 @@ | |||
11 | #define LG_CLOCK_MIN_DELTA 100UL | 13 | #define LG_CLOCK_MIN_DELTA 100UL |
12 | #define LG_CLOCK_MAX_DELTA ULONG_MAX | 14 | #define LG_CLOCK_MAX_DELTA ULONG_MAX |
13 | 15 | ||
14 | /*G:031 The second method of communicating with the Host is to via "struct | 16 | /*G:031 |
17 | * The second method of communicating with the Host is to via "struct | ||
15 | * lguest_data". Once the Guest's initialization hypercall tells the Host where | 18 | * lguest_data". Once the Guest's initialization hypercall tells the Host where |
16 | * this is, the Guest and Host both publish information in it. :*/ | 19 | * this is, the Guest and Host both publish information in it. |
17 | struct lguest_data | 20 | :*/ |
18 | { | 21 | struct lguest_data { |
19 | /* 512 == enabled (same as eflags in normal hardware). The Guest | 22 | /* |
20 | * changes interrupts so often that a hypercall is too slow. */ | 23 | * 512 == enabled (same as eflags in normal hardware). The Guest |
24 | * changes interrupts so often that a hypercall is too slow. | ||
25 | */ | ||
21 | unsigned int irq_enabled; | 26 | unsigned int irq_enabled; |
22 | /* Fine-grained interrupt disabling by the Guest */ | 27 | /* Fine-grained interrupt disabling by the Guest */ |
23 | DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS); | 28 | DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS); |
24 | 29 | ||
25 | /* The Host writes the virtual address of the last page fault here, | 30 | /* |
31 | * The Host writes the virtual address of the last page fault here, | ||
26 | * which saves the Guest a hypercall. CR2 is the native register where | 32 | * which saves the Guest a hypercall. CR2 is the native register where |
27 | * this address would normally be found. */ | 33 | * this address would normally be found. |
34 | */ | ||
28 | unsigned long cr2; | 35 | unsigned long cr2; |
29 | 36 | ||
30 | /* Wallclock time set by the Host. */ | 37 | /* Wallclock time set by the Host. */ |
31 | struct timespec time; | 38 | struct timespec time; |
32 | 39 | ||
33 | /* Interrupt pending set by the Host. The Guest should do a hypercall | 40 | /* |
34 | * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). */ | 41 | * Interrupt pending set by the Host. The Guest should do a hypercall |
42 | * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). | ||
43 | */ | ||
35 | int irq_pending; | 44 | int irq_pending; |
36 | 45 | ||
37 | /* Async hypercall ring. Instead of directly making hypercalls, we can | 46 | /* |
47 | * Async hypercall ring. Instead of directly making hypercalls, we can | ||
38 | * place them in here for processing the next time the Host wants. | 48 | * place them in here for processing the next time the Host wants. |
39 | * This batching can be quite efficient. */ | 49 | * This batching can be quite efficient. |
50 | */ | ||
40 | 51 | ||
41 | /* 0xFF == done (set by Host), 0 == pending (set by Guest). */ | 52 | /* 0xFF == done (set by Host), 0 == pending (set by Guest). */ |
42 | u8 hcall_status[LHCALL_RING_SIZE]; | 53 | u8 hcall_status[LHCALL_RING_SIZE]; |
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index bfefbdf7498a..495203ff221c 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h | |||
@@ -29,8 +29,10 @@ struct lguest_device_desc { | |||
29 | __u8 type; | 29 | __u8 type; |
30 | /* The number of virtqueues (first in config array) */ | 30 | /* The number of virtqueues (first in config array) */ |
31 | __u8 num_vq; | 31 | __u8 num_vq; |
32 | /* The number of bytes of feature bits. Multiply by 2: one for host | 32 | /* |
33 | * features and one for Guest acknowledgements. */ | 33 | * The number of bytes of feature bits. Multiply by 2: one for host |
34 | * features and one for Guest acknowledgements. | ||
35 | */ | ||
34 | __u8 feature_len; | 36 | __u8 feature_len; |
35 | /* The number of bytes of the config array after virtqueues. */ | 37 | /* The number of bytes of the config array after virtqueues. */ |
36 | __u8 config_len; | 38 | __u8 config_len; |
@@ -39,8 +41,10 @@ struct lguest_device_desc { | |||
39 | __u8 config[0]; | 41 | __u8 config[0]; |
40 | }; | 42 | }; |
41 | 43 | ||
42 | /*D:135 This is how we expect the device configuration field for a virtqueue | 44 | /*D:135 |
43 | * to be laid out in config space. */ | 45 | * This is how we expect the device configuration field for a virtqueue |
46 | * to be laid out in config space. | ||
47 | */ | ||
44 | struct lguest_vqconfig { | 48 | struct lguest_vqconfig { |
45 | /* The number of entries in the virtio_ring */ | 49 | /* The number of entries in the virtio_ring */ |
46 | __u16 num; | 50 | __u16 num; |
@@ -61,7 +65,9 @@ enum lguest_req | |||
61 | LHREQ_EVENTFD, /* + address, fd. */ | 65 | LHREQ_EVENTFD, /* + address, fd. */ |
62 | }; | 66 | }; |
63 | 67 | ||
64 | /* The alignment to use between consumer and producer parts of vring. | 68 | /* |
65 | * x86 pagesize for historical reasons. */ | 69 | * The alignment to use between consumer and producer parts of vring. |
70 | * x86 pagesize for historical reasons. | ||
71 | */ | ||
66 | #define LGUEST_VRING_ALIGN 4096 | 72 | #define LGUEST_VRING_ALIGN 4096 |
67 | #endif /* _LINUX_LGUEST_LAUNCHER */ | 73 | #endif /* _LINUX_LGUEST_LAUNCHER */ |
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h index be7d255fc7cf..8dab9f2b8832 100644 --- a/include/linux/virtio_blk.h +++ b/include/linux/virtio_blk.h | |||
@@ -20,8 +20,7 @@ | |||
20 | 20 | ||
21 | #define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */ | 21 | #define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */ |
22 | 22 | ||
23 | struct virtio_blk_config | 23 | struct virtio_blk_config { |
24 | { | ||
25 | /* The capacity (in 512-byte sectors). */ | 24 | /* The capacity (in 512-byte sectors). */ |
26 | __u64 capacity; | 25 | __u64 capacity; |
27 | /* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */ | 26 | /* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */ |
@@ -50,8 +49,7 @@ struct virtio_blk_config | |||
50 | #define VIRTIO_BLK_T_BARRIER 0x80000000 | 49 | #define VIRTIO_BLK_T_BARRIER 0x80000000 |
51 | 50 | ||
52 | /* This is the first element of the read scatter-gather list. */ | 51 | /* This is the first element of the read scatter-gather list. */ |
53 | struct virtio_blk_outhdr | 52 | struct virtio_blk_outhdr { |
54 | { | ||
55 | /* VIRTIO_BLK_T* */ | 53 | /* VIRTIO_BLK_T* */ |
56 | __u32 type; | 54 | __u32 type; |
57 | /* io priority. */ | 55 | /* io priority. */ |
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 99f514575f6a..e547e3c8ee9a 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h | |||
@@ -79,8 +79,7 @@ | |||
79 | * the dev->feature bits if it wants. | 79 | * the dev->feature bits if it wants. |
80 | */ | 80 | */ |
81 | typedef void vq_callback_t(struct virtqueue *); | 81 | typedef void vq_callback_t(struct virtqueue *); |
82 | struct virtio_config_ops | 82 | struct virtio_config_ops { |
83 | { | ||
84 | void (*get)(struct virtio_device *vdev, unsigned offset, | 83 | void (*get)(struct virtio_device *vdev, unsigned offset, |
85 | void *buf, unsigned len); | 84 | void *buf, unsigned len); |
86 | void (*set)(struct virtio_device *vdev, unsigned offset, | 85 | void (*set)(struct virtio_device *vdev, unsigned offset, |
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 9c543d6ac535..d8dd539c9f48 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h | |||
@@ -31,8 +31,7 @@ | |||
31 | 31 | ||
32 | #define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ | 32 | #define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ |
33 | 33 | ||
34 | struct virtio_net_config | 34 | struct virtio_net_config { |
35 | { | ||
36 | /* The config defining mac address (if VIRTIO_NET_F_MAC) */ | 35 | /* The config defining mac address (if VIRTIO_NET_F_MAC) */ |
37 | __u8 mac[6]; | 36 | __u8 mac[6]; |
38 | /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */ | 37 | /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */ |
@@ -41,8 +40,7 @@ struct virtio_net_config | |||
41 | 40 | ||
42 | /* This is the first element of the scatter-gather list. If you don't | 41 | /* This is the first element of the scatter-gather list. If you don't |
43 | * specify GSO or CSUM features, you can simply ignore the header. */ | 42 | * specify GSO or CSUM features, you can simply ignore the header. */ |
44 | struct virtio_net_hdr | 43 | struct virtio_net_hdr { |
45 | { | ||
46 | #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset | 44 | #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset |
47 | __u8 flags; | 45 | __u8 flags; |
48 | #define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame | 46 | #define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame |
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index 693e0ec5afa6..e4d144b132b5 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h | |||
@@ -30,8 +30,7 @@ | |||
30 | #define VIRTIO_RING_F_INDIRECT_DESC 28 | 30 | #define VIRTIO_RING_F_INDIRECT_DESC 28 |
31 | 31 | ||
32 | /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ | 32 | /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ |
33 | struct vring_desc | 33 | struct vring_desc { |
34 | { | ||
35 | /* Address (guest-physical). */ | 34 | /* Address (guest-physical). */ |
36 | __u64 addr; | 35 | __u64 addr; |
37 | /* Length. */ | 36 | /* Length. */ |
@@ -42,24 +41,21 @@ struct vring_desc | |||
42 | __u16 next; | 41 | __u16 next; |
43 | }; | 42 | }; |
44 | 43 | ||
45 | struct vring_avail | 44 | struct vring_avail { |
46 | { | ||
47 | __u16 flags; | 45 | __u16 flags; |
48 | __u16 idx; | 46 | __u16 idx; |
49 | __u16 ring[]; | 47 | __u16 ring[]; |
50 | }; | 48 | }; |
51 | 49 | ||
52 | /* u32 is used here for ids for padding reasons. */ | 50 | /* u32 is used here for ids for padding reasons. */ |
53 | struct vring_used_elem | 51 | struct vring_used_elem { |
54 | { | ||
55 | /* Index of start of used descriptor chain. */ | 52 | /* Index of start of used descriptor chain. */ |
56 | __u32 id; | 53 | __u32 id; |
57 | /* Total length of the descriptor chain which was used (written to) */ | 54 | /* Total length of the descriptor chain which was used (written to) */ |
58 | __u32 len; | 55 | __u32 len; |
59 | }; | 56 | }; |
60 | 57 | ||
61 | struct vring_used | 58 | struct vring_used { |
62 | { | ||
63 | __u16 flags; | 59 | __u16 flags; |
64 | __u16 idx; | 60 | __u16 idx; |
65 | struct vring_used_elem ring[]; | 61 | struct vring_used_elem ring[]; |