diff options
Diffstat (limited to 'Documentation/lguest/lguest.c')
| -rw-r--r-- | Documentation/lguest/lguest.c | 721 |
1 files changed, 483 insertions, 238 deletions
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c index 9ebcd6ef361b..950cde6d6e58 100644 --- a/Documentation/lguest/lguest.c +++ b/Documentation/lguest/lguest.c | |||
| @@ -1,7 +1,9 @@ | |||
| 1 | /*P:100 This is the Launcher code, a simple program which lays out the | 1 | /*P:100 |
| 2 | * "physical" memory for the new Guest by mapping the kernel image and | 2 | * This is the Launcher code, a simple program which lays out the "physical" |
| 3 | * the virtual devices, then opens /dev/lguest to tell the kernel | 3 | * memory for the new Guest by mapping the kernel image and the virtual |
| 4 | * about the Guest and control it. :*/ | 4 | * devices, then opens /dev/lguest to tell the kernel about the Guest and |
| 5 | * control it. | ||
| 6 | :*/ | ||
| 5 | #define _LARGEFILE64_SOURCE | 7 | #define _LARGEFILE64_SOURCE |
| 6 | #define _GNU_SOURCE | 8 | #define _GNU_SOURCE |
| 7 | #include <stdio.h> | 9 | #include <stdio.h> |
| @@ -46,13 +48,15 @@ | |||
| 46 | #include "linux/virtio_rng.h" | 48 | #include "linux/virtio_rng.h" |
| 47 | #include "linux/virtio_ring.h" | 49 | #include "linux/virtio_ring.h" |
| 48 | #include "asm/bootparam.h" | 50 | #include "asm/bootparam.h" |
| 49 | /*L:110 We can ignore the 39 include files we need for this program, but I do | 51 | /*L:110 |
| 50 | * want to draw attention to the use of kernel-style types. | 52 | * We can ignore the 42 include files we need for this program, but I do want |
| 53 | * to draw attention to the use of kernel-style types. | ||
| 51 | * | 54 | * |
| 52 | * As Linus said, "C is a Spartan language, and so should your naming be." I | 55 | * As Linus said, "C is a Spartan language, and so should your naming be." I |
| 53 | * like these abbreviations, so we define them here. Note that u64 is always | 56 | * like these abbreviations, so we define them here. Note that u64 is always |
| 54 | * unsigned long long, which works on all Linux systems: this means that we can | 57 | * unsigned long long, which works on all Linux systems: this means that we can |
| 55 | * use %llu in printf for any u64. */ | 58 | * use %llu in printf for any u64. |
| 59 | */ | ||
| 56 | typedef unsigned long long u64; | 60 | typedef unsigned long long u64; |
| 57 | typedef uint32_t u32; | 61 | typedef uint32_t u32; |
| 58 | typedef uint16_t u16; | 62 | typedef uint16_t u16; |
| @@ -69,8 +73,10 @@ typedef uint8_t u8; | |||
| 69 | /* This will occupy 3 pages: it must be a power of 2. */ | 73 | /* This will occupy 3 pages: it must be a power of 2. */ |
| 70 | #define VIRTQUEUE_NUM 256 | 74 | #define VIRTQUEUE_NUM 256 |
| 71 | 75 | ||
| 72 | /*L:120 verbose is both a global flag and a macro. The C preprocessor allows | 76 | /*L:120 |
| 73 | * this, and although I wouldn't recommend it, it works quite nicely here. */ | 77 | * verbose is both a global flag and a macro. The C preprocessor allows |
| 78 | * this, and although I wouldn't recommend it, it works quite nicely here. | ||
| 79 | */ | ||
| 74 | static bool verbose; | 80 | static bool verbose; |
| 75 | #define verbose(args...) \ | 81 | #define verbose(args...) \ |
| 76 | do { if (verbose) printf(args); } while(0) | 82 | do { if (verbose) printf(args); } while(0) |
| @@ -87,8 +93,7 @@ static int lguest_fd; | |||
| 87 | static unsigned int __thread cpu_id; | 93 | static unsigned int __thread cpu_id; |
| 88 | 94 | ||
| 89 | /* This is our list of devices. */ | 95 | /* This is our list of devices. */ |
| 90 | struct device_list | 96 | struct device_list { |
| 91 | { | ||
| 92 | /* Counter to assign interrupt numbers. */ | 97 | /* Counter to assign interrupt numbers. */ |
| 93 | unsigned int next_irq; | 98 | unsigned int next_irq; |
| 94 | 99 | ||
| @@ -100,8 +105,7 @@ struct device_list | |||
| 100 | 105 | ||
| 101 | /* A single linked list of devices. */ | 106 | /* A single linked list of devices. */ |
| 102 | struct device *dev; | 107 | struct device *dev; |
| 103 | /* And a pointer to the last device for easy append and also for | 108 | /* And a pointer to the last device for easy append. */ |
| 104 | * configuration appending. */ | ||
| 105 | struct device *lastdev; | 109 | struct device *lastdev; |
| 106 | }; | 110 | }; |
| 107 | 111 | ||
| @@ -109,8 +113,7 @@ struct device_list | |||
| 109 | static struct device_list devices; | 113 | static struct device_list devices; |
| 110 | 114 | ||
| 111 | /* The device structure describes a single device. */ | 115 | /* The device structure describes a single device. */ |
| 112 | struct device | 116 | struct device { |
| 113 | { | ||
| 114 | /* The linked-list pointer. */ | 117 | /* The linked-list pointer. */ |
| 115 | struct device *next; | 118 | struct device *next; |
| 116 | 119 | ||
| @@ -135,8 +138,7 @@ struct device | |||
| 135 | }; | 138 | }; |
| 136 | 139 | ||
| 137 | /* The virtqueue structure describes a queue attached to a device. */ | 140 | /* The virtqueue structure describes a queue attached to a device. */ |
| 138 | struct virtqueue | 141 | struct virtqueue { |
| 139 | { | ||
| 140 | struct virtqueue *next; | 142 | struct virtqueue *next; |
| 141 | 143 | ||
| 142 | /* Which device owns me. */ | 144 | /* Which device owns me. */ |
| @@ -168,20 +170,24 @@ static char **main_args; | |||
| 168 | /* The original tty settings to restore on exit. */ | 170 | /* The original tty settings to restore on exit. */ |
| 169 | static struct termios orig_term; | 171 | static struct termios orig_term; |
| 170 | 172 | ||
| 171 | /* We have to be careful with barriers: our devices are all run in separate | 173 | /* |
| 174 | * We have to be careful with barriers: our devices are all run in separate | ||
| 172 | * threads and so we need to make sure that changes visible to the Guest happen | 175 | * threads and so we need to make sure that changes visible to the Guest happen |
| 173 | * in precise order. */ | 176 | * in precise order. |
| 177 | */ | ||
| 174 | #define wmb() __asm__ __volatile__("" : : : "memory") | 178 | #define wmb() __asm__ __volatile__("" : : : "memory") |
| 175 | #define mb() __asm__ __volatile__("" : : : "memory") | 179 | #define mb() __asm__ __volatile__("" : : : "memory") |
| 176 | 180 | ||
| 177 | /* Convert an iovec element to the given type. | 181 | /* |
| 182 | * Convert an iovec element to the given type. | ||
| 178 | * | 183 | * |
| 179 | * This is a fairly ugly trick: we need to know the size of the type and | 184 | * This is a fairly ugly trick: we need to know the size of the type and |
| 180 | * alignment requirement to check the pointer is kosher. It's also nice to | 185 | * alignment requirement to check the pointer is kosher. It's also nice to |
| 181 | * have the name of the type in case we report failure. | 186 | * have the name of the type in case we report failure. |
| 182 | * | 187 | * |
| 183 | * Typing those three things all the time is cumbersome and error prone, so we | 188 | * Typing those three things all the time is cumbersome and error prone, so we |
| 184 | * have a macro which sets them all up and passes to the real function. */ | 189 | * have a macro which sets them all up and passes to the real function. |
| 190 | */ | ||
| 185 | #define convert(iov, type) \ | 191 | #define convert(iov, type) \ |
| 186 | ((type *)_convert((iov), sizeof(type), __alignof__(type), #type)) | 192 | ((type *)_convert((iov), sizeof(type), __alignof__(type), #type)) |
| 187 | 193 | ||
| @@ -198,8 +204,10 @@ static void *_convert(struct iovec *iov, size_t size, size_t align, | |||
| 198 | /* Wrapper for the last available index. Makes it easier to change. */ | 204 | /* Wrapper for the last available index. Makes it easier to change. */ |
| 199 | #define lg_last_avail(vq) ((vq)->last_avail_idx) | 205 | #define lg_last_avail(vq) ((vq)->last_avail_idx) |
| 200 | 206 | ||
| 201 | /* The virtio configuration space is defined to be little-endian. x86 is | 207 | /* |
| 202 | * little-endian too, but it's nice to be explicit so we have these helpers. */ | 208 | * The virtio configuration space is defined to be little-endian. x86 is |
| 209 | * little-endian too, but it's nice to be explicit so we have these helpers. | ||
| 210 | */ | ||
| 203 | #define cpu_to_le16(v16) (v16) | 211 | #define cpu_to_le16(v16) (v16) |
| 204 | #define cpu_to_le32(v32) (v32) | 212 | #define cpu_to_le32(v32) (v32) |
| 205 | #define cpu_to_le64(v64) (v64) | 213 | #define cpu_to_le64(v64) (v64) |
| @@ -241,11 +249,12 @@ static u8 *get_feature_bits(struct device *dev) | |||
| 241 | + dev->num_vq * sizeof(struct lguest_vqconfig); | 249 | + dev->num_vq * sizeof(struct lguest_vqconfig); |
| 242 | } | 250 | } |
| 243 | 251 | ||
| 244 | /*L:100 The Launcher code itself takes us out into userspace, that scary place | 252 | /*L:100 |
| 245 | * where pointers run wild and free! Unfortunately, like most userspace | 253 | * The Launcher code itself takes us out into userspace, that scary place where |
| 246 | * programs, it's quite boring (which is why everyone likes to hack on the | 254 | * pointers run wild and free! Unfortunately, like most userspace programs, |
| 247 | * kernel!). Perhaps if you make up an Lguest Drinking Game at this point, it | 255 | * it's quite boring (which is why everyone likes to hack on the kernel!). |
| 248 | * will get you through this section. Or, maybe not. | 256 | * Perhaps if you make up an Lguest Drinking Game at this point, it will get |
| 257 | * you through this section. Or, maybe not. | ||
| 249 | * | 258 | * |
| 250 | * The Launcher sets up a big chunk of memory to be the Guest's "physical" | 259 | * The Launcher sets up a big chunk of memory to be the Guest's "physical" |
| 251 | * memory and stores it in "guest_base". In other words, Guest physical == | 260 | * memory and stores it in "guest_base". In other words, Guest physical == |
| @@ -253,7 +262,8 @@ static u8 *get_feature_bits(struct device *dev) | |||
| 253 | * | 262 | * |
| 254 | * This can be tough to get your head around, but usually it just means that we | 263 | * This can be tough to get your head around, but usually it just means that we |
| 255 | * use these trivial conversion functions when the Guest gives us it's | 264 | * use these trivial conversion functions when the Guest gives us it's |
| 256 | * "physical" addresses: */ | 265 | * "physical" addresses: |
| 266 | */ | ||
| 257 | static void *from_guest_phys(unsigned long addr) | 267 | static void *from_guest_phys(unsigned long addr) |
| 258 | { | 268 | { |
| 259 | return guest_base + addr; | 269 | return guest_base + addr; |
| @@ -268,7 +278,8 @@ static unsigned long to_guest_phys(const void *addr) | |||
| 268 | * Loading the Kernel. | 278 | * Loading the Kernel. |
| 269 | * | 279 | * |
| 270 | * We start with couple of simple helper routines. open_or_die() avoids | 280 | * We start with couple of simple helper routines. open_or_die() avoids |
| 271 | * error-checking code cluttering the callers: */ | 281 | * error-checking code cluttering the callers: |
| 282 | */ | ||
| 272 | static int open_or_die(const char *name, int flags) | 283 | static int open_or_die(const char *name, int flags) |
| 273 | { | 284 | { |
| 274 | int fd = open(name, flags); | 285 | int fd = open(name, flags); |
| @@ -283,12 +294,19 @@ static void *map_zeroed_pages(unsigned int num) | |||
| 283 | int fd = open_or_die("/dev/zero", O_RDONLY); | 294 | int fd = open_or_die("/dev/zero", O_RDONLY); |
| 284 | void *addr; | 295 | void *addr; |
| 285 | 296 | ||
| 286 | /* We use a private mapping (ie. if we write to the page, it will be | 297 | /* |
| 287 | * copied). */ | 298 | * We use a private mapping (ie. if we write to the page, it will be |
| 299 | * copied). | ||
| 300 | */ | ||
| 288 | addr = mmap(NULL, getpagesize() * num, | 301 | addr = mmap(NULL, getpagesize() * num, |
| 289 | PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, fd, 0); | 302 | PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, fd, 0); |
| 290 | if (addr == MAP_FAILED) | 303 | if (addr == MAP_FAILED) |
| 291 | err(1, "Mmaping %u pages of /dev/zero", num); | 304 | err(1, "Mmaping %u pages of /dev/zero", num); |
| 305 | |||
| 306 | /* | ||
| 307 | * One neat mmap feature is that you can close the fd, and it | ||
| 308 | * stays mapped. | ||
| 309 | */ | ||
| 292 | close(fd); | 310 | close(fd); |
| 293 | 311 | ||
| 294 | return addr; | 312 | return addr; |
| @@ -305,20 +323,24 @@ static void *get_pages(unsigned int num) | |||
| 305 | return addr; | 323 | return addr; |
| 306 | } | 324 | } |
| 307 | 325 | ||
| 308 | /* This routine is used to load the kernel or initrd. It tries mmap, but if | 326 | /* |
| 327 | * This routine is used to load the kernel or initrd. It tries mmap, but if | ||
| 309 | * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries), | 328 | * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries), |
| 310 | * it falls back to reading the memory in. */ | 329 | * it falls back to reading the memory in. |
| 330 | */ | ||
| 311 | static void map_at(int fd, void *addr, unsigned long offset, unsigned long len) | 331 | static void map_at(int fd, void *addr, unsigned long offset, unsigned long len) |
| 312 | { | 332 | { |
| 313 | ssize_t r; | 333 | ssize_t r; |
| 314 | 334 | ||
| 315 | /* We map writable even though for some segments are marked read-only. | 335 | /* |
| 336 | * We map writable even though for some segments are marked read-only. | ||
| 316 | * The kernel really wants to be writable: it patches its own | 337 | * The kernel really wants to be writable: it patches its own |
| 317 | * instructions. | 338 | * instructions. |
| 318 | * | 339 | * |
| 319 | * MAP_PRIVATE means that the page won't be copied until a write is | 340 | * MAP_PRIVATE means that the page won't be copied until a write is |
| 320 | * done to it. This allows us to share untouched memory between | 341 | * done to it. This allows us to share untouched memory between |
| 321 | * Guests. */ | 342 | * Guests. |
| 343 | */ | ||
| 322 | if (mmap(addr, len, PROT_READ|PROT_WRITE|PROT_EXEC, | 344 | if (mmap(addr, len, PROT_READ|PROT_WRITE|PROT_EXEC, |
| 323 | MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED) | 345 | MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED) |
| 324 | return; | 346 | return; |
| @@ -329,7 +351,8 @@ static void map_at(int fd, void *addr, unsigned long offset, unsigned long len) | |||
| 329 | err(1, "Reading offset %lu len %lu gave %zi", offset, len, r); | 351 | err(1, "Reading offset %lu len %lu gave %zi", offset, len, r); |
| 330 | } | 352 | } |
| 331 | 353 | ||
| 332 | /* This routine takes an open vmlinux image, which is in ELF, and maps it into | 354 | /* |
| 355 | * This routine takes an open vmlinux image, which is in ELF, and maps it into | ||
| 333 | * the Guest memory. ELF = Embedded Linking Format, which is the format used | 356 | * the Guest memory. ELF = Embedded Linking Format, which is the format used |
| 334 | * by all modern binaries on Linux including the kernel. | 357 | * by all modern binaries on Linux including the kernel. |
| 335 | * | 358 | * |
| @@ -337,23 +360,28 @@ static void map_at(int fd, void *addr, unsigned long offset, unsigned long len) | |||
| 337 | * address. We use the physical address; the Guest will map itself to the | 360 | * address. We use the physical address; the Guest will map itself to the |
| 338 | * virtual address. | 361 | * virtual address. |
| 339 | * | 362 | * |
| 340 | * We return the starting address. */ | 363 | * We return the starting address. |
| 364 | */ | ||
| 341 | static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) | 365 | static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) |
| 342 | { | 366 | { |
| 343 | Elf32_Phdr phdr[ehdr->e_phnum]; | 367 | Elf32_Phdr phdr[ehdr->e_phnum]; |
| 344 | unsigned int i; | 368 | unsigned int i; |
| 345 | 369 | ||
| 346 | /* Sanity checks on the main ELF header: an x86 executable with a | 370 | /* |
| 347 | * reasonable number of correctly-sized program headers. */ | 371 | * Sanity checks on the main ELF header: an x86 executable with a |
| 372 | * reasonable number of correctly-sized program headers. | ||
| 373 | */ | ||
| 348 | if (ehdr->e_type != ET_EXEC | 374 | if (ehdr->e_type != ET_EXEC |
| 349 | || ehdr->e_machine != EM_386 | 375 | || ehdr->e_machine != EM_386 |
| 350 | || ehdr->e_phentsize != sizeof(Elf32_Phdr) | 376 | || ehdr->e_phentsize != sizeof(Elf32_Phdr) |
| 351 | || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr)) | 377 | || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr)) |
| 352 | errx(1, "Malformed elf header"); | 378 | errx(1, "Malformed elf header"); |
| 353 | 379 | ||
| 354 | /* An ELF executable contains an ELF header and a number of "program" | 380 | /* |
| 381 | * An ELF executable contains an ELF header and a number of "program" | ||
| 355 | * headers which indicate which parts ("segments") of the program to | 382 | * headers which indicate which parts ("segments") of the program to |
| 356 | * load where. */ | 383 | * load where. |
| 384 | */ | ||
| 357 | 385 | ||
| 358 | /* We read in all the program headers at once: */ | 386 | /* We read in all the program headers at once: */ |
| 359 | if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0) | 387 | if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0) |
| @@ -361,8 +389,10 @@ static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) | |||
| 361 | if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr)) | 389 | if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr)) |
| 362 | err(1, "Reading program headers"); | 390 | err(1, "Reading program headers"); |
| 363 | 391 | ||
| 364 | /* Try all the headers: there are usually only three. A read-only one, | 392 | /* |
| 365 | * a read-write one, and a "note" section which we don't load. */ | 393 | * Try all the headers: there are usually only three. A read-only one, |
| 394 | * a read-write one, and a "note" section which we don't load. | ||
| 395 | */ | ||
| 366 | for (i = 0; i < ehdr->e_phnum; i++) { | 396 | for (i = 0; i < ehdr->e_phnum; i++) { |
| 367 | /* If this isn't a loadable segment, we ignore it */ | 397 | /* If this isn't a loadable segment, we ignore it */ |
| 368 | if (phdr[i].p_type != PT_LOAD) | 398 | if (phdr[i].p_type != PT_LOAD) |
| @@ -380,13 +410,15 @@ static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) | |||
| 380 | return ehdr->e_entry; | 410 | return ehdr->e_entry; |
| 381 | } | 411 | } |
| 382 | 412 | ||
| 383 | /*L:150 A bzImage, unlike an ELF file, is not meant to be loaded. You're | 413 | /*L:150 |
| 384 | * supposed to jump into it and it will unpack itself. We used to have to | 414 | * A bzImage, unlike an ELF file, is not meant to be loaded. You're supposed |
| 385 | * perform some hairy magic because the unpacking code scared me. | 415 | * to jump into it and it will unpack itself. We used to have to perform some |
| 416 | * hairy magic because the unpacking code scared me. | ||
| 386 | * | 417 | * |
| 387 | * Fortunately, Jeremy Fitzhardinge convinced me it wasn't that hard and wrote | 418 | * Fortunately, Jeremy Fitzhardinge convinced me it wasn't that hard and wrote |
| 388 | * a small patch to jump over the tricky bits in the Guest, so now we just read | 419 | * a small patch to jump over the tricky bits in the Guest, so now we just read |
| 389 | * the funky header so we know where in the file to load, and away we go! */ | 420 | * the funky header so we know where in the file to load, and away we go! |
| 421 | */ | ||
| 390 | static unsigned long load_bzimage(int fd) | 422 | static unsigned long load_bzimage(int fd) |
| 391 | { | 423 | { |
| 392 | struct boot_params boot; | 424 | struct boot_params boot; |
| @@ -394,8 +426,10 @@ static unsigned long load_bzimage(int fd) | |||
| 394 | /* Modern bzImages get loaded at 1M. */ | 426 | /* Modern bzImages get loaded at 1M. */ |
| 395 | void *p = from_guest_phys(0x100000); | 427 | void *p = from_guest_phys(0x100000); |
| 396 | 428 | ||
| 397 | /* Go back to the start of the file and read the header. It should be | 429 | /* |
| 398 | * a Linux boot header (see Documentation/x86/i386/boot.txt) */ | 430 | * Go back to the start of the file and read the header. It should be |
| 431 | * a Linux boot header (see Documentation/x86/i386/boot.txt) | ||
| 432 | */ | ||
| 399 | lseek(fd, 0, SEEK_SET); | 433 | lseek(fd, 0, SEEK_SET); |
| 400 | read(fd, &boot, sizeof(boot)); | 434 | read(fd, &boot, sizeof(boot)); |
| 401 | 435 | ||
| @@ -414,9 +448,11 @@ static unsigned long load_bzimage(int fd) | |||
| 414 | return boot.hdr.code32_start; | 448 | return boot.hdr.code32_start; |
| 415 | } | 449 | } |
| 416 | 450 | ||
| 417 | /*L:140 Loading the kernel is easy when it's a "vmlinux", but most kernels | 451 | /*L:140 |
| 452 | * Loading the kernel is easy when it's a "vmlinux", but most kernels | ||
| 418 | * come wrapped up in the self-decompressing "bzImage" format. With a little | 453 | * come wrapped up in the self-decompressing "bzImage" format. With a little |
| 419 | * work, we can load those, too. */ | 454 | * work, we can load those, too. |
| 455 | */ | ||
| 420 | static unsigned long load_kernel(int fd) | 456 | static unsigned long load_kernel(int fd) |
| 421 | { | 457 | { |
| 422 | Elf32_Ehdr hdr; | 458 | Elf32_Ehdr hdr; |
| @@ -433,24 +469,28 @@ static unsigned long load_kernel(int fd) | |||
| 433 | return load_bzimage(fd); | 469 | return load_bzimage(fd); |
| 434 | } | 470 | } |
| 435 | 471 | ||
| 436 | /* This is a trivial little helper to align pages. Andi Kleen hated it because | 472 | /* |
| 473 | * This is a trivial little helper to align pages. Andi Kleen hated it because | ||
| 437 | * it calls getpagesize() twice: "it's dumb code." | 474 | * it calls getpagesize() twice: "it's dumb code." |
| 438 | * | 475 | * |
| 439 | * Kernel guys get really het up about optimization, even when it's not | 476 | * Kernel guys get really het up about optimization, even when it's not |
| 440 | * necessary. I leave this code as a reaction against that. */ | 477 | * necessary. I leave this code as a reaction against that. |
| 478 | */ | ||
| 441 | static inline unsigned long page_align(unsigned long addr) | 479 | static inline unsigned long page_align(unsigned long addr) |
| 442 | { | 480 | { |
| 443 | /* Add upwards and truncate downwards. */ | 481 | /* Add upwards and truncate downwards. */ |
| 444 | return ((addr + getpagesize()-1) & ~(getpagesize()-1)); | 482 | return ((addr + getpagesize()-1) & ~(getpagesize()-1)); |
| 445 | } | 483 | } |
| 446 | 484 | ||
| 447 | /*L:180 An "initial ram disk" is a disk image loaded into memory along with | 485 | /*L:180 |
| 448 | * the kernel which the kernel can use to boot from without needing any | 486 | * An "initial ram disk" is a disk image loaded into memory along with the |
| 449 | * drivers. Most distributions now use this as standard: the initrd contains | 487 | * kernel which the kernel can use to boot from without needing any drivers. |
| 450 | * the code to load the appropriate driver modules for the current machine. | 488 | * Most distributions now use this as standard: the initrd contains the code to |
| 489 | * load the appropriate driver modules for the current machine. | ||
| 451 | * | 490 | * |
| 452 | * Importantly, James Morris works for RedHat, and Fedora uses initrds for its | 491 | * Importantly, James Morris works for RedHat, and Fedora uses initrds for its |
| 453 | * kernels. He sent me this (and tells me when I break it). */ | 492 | * kernels. He sent me this (and tells me when I break it). |
| 493 | */ | ||
| 454 | static unsigned long load_initrd(const char *name, unsigned long mem) | 494 | static unsigned long load_initrd(const char *name, unsigned long mem) |
| 455 | { | 495 | { |
| 456 | int ifd; | 496 | int ifd; |
| @@ -462,12 +502,16 @@ static unsigned long load_initrd(const char *name, unsigned long mem) | |||
| 462 | if (fstat(ifd, &st) < 0) | 502 | if (fstat(ifd, &st) < 0) |
| 463 | err(1, "fstat() on initrd '%s'", name); | 503 | err(1, "fstat() on initrd '%s'", name); |
| 464 | 504 | ||
| 465 | /* We map the initrd at the top of memory, but mmap wants it to be | 505 | /* |
| 466 | * page-aligned, so we round the size up for that. */ | 506 | * We map the initrd at the top of memory, but mmap wants it to be |
| 507 | * page-aligned, so we round the size up for that. | ||
| 508 | */ | ||
| 467 | len = page_align(st.st_size); | 509 | len = page_align(st.st_size); |
| 468 | map_at(ifd, from_guest_phys(mem - len), 0, st.st_size); | 510 | map_at(ifd, from_guest_phys(mem - len), 0, st.st_size); |
| 469 | /* Once a file is mapped, you can close the file descriptor. It's a | 511 | /* |
| 470 | * little odd, but quite useful. */ | 512 | * Once a file is mapped, you can close the file descriptor. It's a |
| 513 | * little odd, but quite useful. | ||
| 514 | */ | ||
| 471 | close(ifd); | 515 | close(ifd); |
| 472 | verbose("mapped initrd %s size=%lu @ %p\n", name, len, (void*)mem-len); | 516 | verbose("mapped initrd %s size=%lu @ %p\n", name, len, (void*)mem-len); |
| 473 | 517 | ||
| @@ -476,8 +520,10 @@ static unsigned long load_initrd(const char *name, unsigned long mem) | |||
| 476 | } | 520 | } |
| 477 | /*:*/ | 521 | /*:*/ |
| 478 | 522 | ||
| 479 | /* Simple routine to roll all the commandline arguments together with spaces | 523 | /* |
| 480 | * between them. */ | 524 | * Simple routine to roll all the commandline arguments together with spaces |
| 525 | * between them. | ||
| 526 | */ | ||
| 481 | static void concat(char *dst, char *args[]) | 527 | static void concat(char *dst, char *args[]) |
| 482 | { | 528 | { |
| 483 | unsigned int i, len = 0; | 529 | unsigned int i, len = 0; |
| @@ -494,10 +540,12 @@ static void concat(char *dst, char *args[]) | |||
| 494 | dst[len] = '\0'; | 540 | dst[len] = '\0'; |
| 495 | } | 541 | } |
| 496 | 542 | ||
| 497 | /*L:185 This is where we actually tell the kernel to initialize the Guest. We | 543 | /*L:185 |
| 544 | * This is where we actually tell the kernel to initialize the Guest. We | ||
| 498 | * saw the arguments it expects when we looked at initialize() in lguest_user.c: | 545 | * saw the arguments it expects when we looked at initialize() in lguest_user.c: |
| 499 | * the base of Guest "physical" memory, the top physical page to allow and the | 546 | * the base of Guest "physical" memory, the top physical page to allow and the |
| 500 | * entry point for the Guest. */ | 547 | * entry point for the Guest. |
| 548 | */ | ||
| 501 | static void tell_kernel(unsigned long start) | 549 | static void tell_kernel(unsigned long start) |
| 502 | { | 550 | { |
| 503 | unsigned long args[] = { LHREQ_INITIALIZE, | 551 | unsigned long args[] = { LHREQ_INITIALIZE, |
| @@ -511,7 +559,7 @@ static void tell_kernel(unsigned long start) | |||
| 511 | } | 559 | } |
| 512 | /*:*/ | 560 | /*:*/ |
| 513 | 561 | ||
| 514 | /* | 562 | /*L:200 |
| 515 | * Device Handling. | 563 | * Device Handling. |
| 516 | * | 564 | * |
| 517 | * When the Guest gives us a buffer, it sends an array of addresses and sizes. | 565 | * When the Guest gives us a buffer, it sends an array of addresses and sizes. |
| @@ -522,20 +570,26 @@ static void tell_kernel(unsigned long start) | |||
| 522 | static void *_check_pointer(unsigned long addr, unsigned int size, | 570 | static void *_check_pointer(unsigned long addr, unsigned int size, |
| 523 | unsigned int line) | 571 | unsigned int line) |
| 524 | { | 572 | { |
| 525 | /* We have to separately check addr and addr+size, because size could | 573 | /* |
| 526 | * be huge and addr + size might wrap around. */ | 574 | * We have to separately check addr and addr+size, because size could |
| 575 | * be huge and addr + size might wrap around. | ||
| 576 | */ | ||
| 527 | if (addr >= guest_limit || addr + size >= guest_limit) | 577 | if (addr >= guest_limit || addr + size >= guest_limit) |
| 528 | errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr); | 578 | errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr); |
| 529 | /* We return a pointer for the caller's convenience, now we know it's | 579 | /* |
| 530 | * safe to use. */ | 580 | * We return a pointer for the caller's convenience, now we know it's |
| 581 | * safe to use. | ||
| 582 | */ | ||
| 531 | return from_guest_phys(addr); | 583 | return from_guest_phys(addr); |
| 532 | } | 584 | } |
| 533 | /* A macro which transparently hands the line number to the real function. */ | 585 | /* A macro which transparently hands the line number to the real function. */ |
| 534 | #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) | 586 | #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) |
| 535 | 587 | ||
| 536 | /* Each buffer in the virtqueues is actually a chain of descriptors. This | 588 | /* |
| 589 | * Each buffer in the virtqueues is actually a chain of descriptors. This | ||
| 537 | * function returns the next descriptor in the chain, or vq->vring.num if we're | 590 | * function returns the next descriptor in the chain, or vq->vring.num if we're |
| 538 | * at the end. */ | 591 | * at the end. |
| 592 | */ | ||
| 539 | static unsigned next_desc(struct vring_desc *desc, | 593 | static unsigned next_desc(struct vring_desc *desc, |
| 540 | unsigned int i, unsigned int max) | 594 | unsigned int i, unsigned int max) |
| 541 | { | 595 | { |
| @@ -556,7 +610,10 @@ static unsigned next_desc(struct vring_desc *desc, | |||
| 556 | return next; | 610 | return next; |
| 557 | } | 611 | } |
| 558 | 612 | ||
| 559 | /* This actually sends the interrupt for this virtqueue */ | 613 | /* |
| 614 | * This actually sends the interrupt for this virtqueue, if we've used a | ||
| 615 | * buffer. | ||
| 616 | */ | ||
| 560 | static void trigger_irq(struct virtqueue *vq) | 617 | static void trigger_irq(struct virtqueue *vq) |
| 561 | { | 618 | { |
| 562 | unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; | 619 | unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; |
| @@ -576,12 +633,14 @@ static void trigger_irq(struct virtqueue *vq) | |||
| 576 | err(1, "Triggering irq %i", vq->config.irq); | 633 | err(1, "Triggering irq %i", vq->config.irq); |
| 577 | } | 634 | } |
| 578 | 635 | ||
| 579 | /* This looks in the virtqueue and for the first available buffer, and converts | 636 | /* |
| 637 | * This looks in the virtqueue for the first available buffer, and converts | ||
| 580 | * it to an iovec for convenient access. Since descriptors consist of some | 638 | * it to an iovec for convenient access. Since descriptors consist of some |
| 581 | * number of output then some number of input descriptors, it's actually two | 639 | * number of output then some number of input descriptors, it's actually two |
| 582 | * iovecs, but we pack them into one and note how many of each there were. | 640 | * iovecs, but we pack them into one and note how many of each there were. |
| 583 | * | 641 | * |
| 584 | * This function returns the descriptor number found. */ | 642 | * This function waits if necessary, and returns the descriptor number found. |
| 643 | */ | ||
| 585 | static unsigned wait_for_vq_desc(struct virtqueue *vq, | 644 | static unsigned wait_for_vq_desc(struct virtqueue *vq, |
| 586 | struct iovec iov[], | 645 | struct iovec iov[], |
| 587 | unsigned int *out_num, unsigned int *in_num) | 646 | unsigned int *out_num, unsigned int *in_num) |
| @@ -590,17 +649,23 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
| 590 | struct vring_desc *desc; | 649 | struct vring_desc *desc; |
| 591 | u16 last_avail = lg_last_avail(vq); | 650 | u16 last_avail = lg_last_avail(vq); |
| 592 | 651 | ||
| 652 | /* There's nothing available? */ | ||
| 593 | while (last_avail == vq->vring.avail->idx) { | 653 | while (last_avail == vq->vring.avail->idx) { |
| 594 | u64 event; | 654 | u64 event; |
| 595 | 655 | ||
| 596 | /* OK, tell Guest about progress up to now. */ | 656 | /* |
| 657 | * Since we're about to sleep, now is a good time to tell the | ||
| 658 | * Guest about what we've used up to now. | ||
| 659 | */ | ||
| 597 | trigger_irq(vq); | 660 | trigger_irq(vq); |
| 598 | 661 | ||
| 599 | /* OK, now we need to know about added descriptors. */ | 662 | /* OK, now we need to know about added descriptors. */ |
| 600 | vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; | 663 | vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; |
| 601 | 664 | ||
| 602 | /* They could have slipped one in as we were doing that: make | 665 | /* |
| 603 | * sure it's written, then check again. */ | 666 | * They could have slipped one in as we were doing that: make |
| 667 | * sure it's written, then check again. | ||
| 668 | */ | ||
| 604 | mb(); | 669 | mb(); |
| 605 | if (last_avail != vq->vring.avail->idx) { | 670 | if (last_avail != vq->vring.avail->idx) { |
| 606 | vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; | 671 | vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; |
| @@ -620,8 +685,10 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
| 620 | errx(1, "Guest moved used index from %u to %u", | 685 | errx(1, "Guest moved used index from %u to %u", |
| 621 | last_avail, vq->vring.avail->idx); | 686 | last_avail, vq->vring.avail->idx); |
| 622 | 687 | ||
| 623 | /* Grab the next descriptor number they're advertising, and increment | 688 | /* |
| 624 | * the index we've seen. */ | 689 | * Grab the next descriptor number they're advertising, and increment |
| 690 | * the index we've seen. | ||
| 691 | */ | ||
| 625 | head = vq->vring.avail->ring[last_avail % vq->vring.num]; | 692 | head = vq->vring.avail->ring[last_avail % vq->vring.num]; |
| 626 | lg_last_avail(vq)++; | 693 | lg_last_avail(vq)++; |
| 627 | 694 | ||
| @@ -636,8 +703,10 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
| 636 | desc = vq->vring.desc; | 703 | desc = vq->vring.desc; |
| 637 | i = head; | 704 | i = head; |
| 638 | 705 | ||
| 639 | /* If this is an indirect entry, then this buffer contains a descriptor | 706 | /* |
| 640 | * table which we handle as if it's any normal descriptor chain. */ | 707 | * If this is an indirect entry, then this buffer contains a descriptor |
| 708 | * table which we handle as if it's any normal descriptor chain. | ||
| 709 | */ | ||
| 641 | if (desc[i].flags & VRING_DESC_F_INDIRECT) { | 710 | if (desc[i].flags & VRING_DESC_F_INDIRECT) { |
| 642 | if (desc[i].len % sizeof(struct vring_desc)) | 711 | if (desc[i].len % sizeof(struct vring_desc)) |
| 643 | errx(1, "Invalid size for indirect buffer table"); | 712 | errx(1, "Invalid size for indirect buffer table"); |
| @@ -656,8 +725,10 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
| 656 | if (desc[i].flags & VRING_DESC_F_WRITE) | 725 | if (desc[i].flags & VRING_DESC_F_WRITE) |
| 657 | (*in_num)++; | 726 | (*in_num)++; |
| 658 | else { | 727 | else { |
| 659 | /* If it's an output descriptor, they're all supposed | 728 | /* |
| 660 | * to come before any input descriptors. */ | 729 | * If it's an output descriptor, they're all supposed |
| 730 | * to come before any input descriptors. | ||
| 731 | */ | ||
| 661 | if (*in_num) | 732 | if (*in_num) |
| 662 | errx(1, "Descriptor has out after in"); | 733 | errx(1, "Descriptor has out after in"); |
| 663 | (*out_num)++; | 734 | (*out_num)++; |
| @@ -671,14 +742,19 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
| 671 | return head; | 742 | return head; |
| 672 | } | 743 | } |
| 673 | 744 | ||
| 674 | /* After we've used one of their buffers, we tell them about it. We'll then | 745 | /* |
| 675 | * want to send them an interrupt, using trigger_irq(). */ | 746 | * After we've used one of their buffers, we tell the Guest about it. Sometime |
| 747 | * later we'll want to send them an interrupt using trigger_irq(); note that | ||
| 748 | * wait_for_vq_desc() does that for us if it has to wait. | ||
| 749 | */ | ||
| 676 | static void add_used(struct virtqueue *vq, unsigned int head, int len) | 750 | static void add_used(struct virtqueue *vq, unsigned int head, int len) |
| 677 | { | 751 | { |
| 678 | struct vring_used_elem *used; | 752 | struct vring_used_elem *used; |
| 679 | 753 | ||
| 680 | /* The virtqueue contains a ring of used buffers. Get a pointer to the | 754 | /* |
| 681 | * next entry in that used ring. */ | 755 | * The virtqueue contains a ring of used buffers. Get a pointer to the |
| 756 | * next entry in that used ring. | ||
| 757 | */ | ||
| 682 | used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num]; | 758 | used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num]; |
| 683 | used->id = head; | 759 | used->id = head; |
| 684 | used->len = len; | 760 | used->len = len; |
| @@ -698,9 +774,9 @@ static void add_used_and_trigger(struct virtqueue *vq, unsigned head, int len) | |||
| 698 | /* | 774 | /* |
| 699 | * The Console | 775 | * The Console |
| 700 | * | 776 | * |
| 701 | * We associate some data with the console for our exit hack. */ | 777 | * We associate some data with the console for our exit hack. |
| 702 | struct console_abort | 778 | */ |
| 703 | { | 779 | struct console_abort { |
| 704 | /* How many times have they hit ^C? */ | 780 | /* How many times have they hit ^C? */ |
| 705 | int count; | 781 | int count; |
| 706 | /* When did they start? */ | 782 | /* When did they start? */ |
| @@ -715,30 +791,35 @@ static void console_input(struct virtqueue *vq) | |||
| 715 | struct console_abort *abort = vq->dev->priv; | 791 | struct console_abort *abort = vq->dev->priv; |
| 716 | struct iovec iov[vq->vring.num]; | 792 | struct iovec iov[vq->vring.num]; |
| 717 | 793 | ||
| 718 | /* Make sure there's a descriptor waiting. */ | 794 | /* Make sure there's a descriptor available. */ |
| 719 | head = wait_for_vq_desc(vq, iov, &out_num, &in_num); | 795 | head = wait_for_vq_desc(vq, iov, &out_num, &in_num); |
| 720 | if (out_num) | 796 | if (out_num) |
| 721 | errx(1, "Output buffers in console in queue?"); | 797 | errx(1, "Output buffers in console in queue?"); |
| 722 | 798 | ||
| 723 | /* Read it in. */ | 799 | /* Read into it. This is where we usually wait. */ |
| 724 | len = readv(STDIN_FILENO, iov, in_num); | 800 | len = readv(STDIN_FILENO, iov, in_num); |
| 725 | if (len <= 0) { | 801 | if (len <= 0) { |
| 726 | /* Ran out of input? */ | 802 | /* Ran out of input? */ |
| 727 | warnx("Failed to get console input, ignoring console."); | 803 | warnx("Failed to get console input, ignoring console."); |
| 728 | /* For simplicity, dying threads kill the whole Launcher. So | 804 | /* |
| 729 | * just nap here. */ | 805 | * For simplicity, dying threads kill the whole Launcher. So |
| 806 | * just nap here. | ||
| 807 | */ | ||
| 730 | for (;;) | 808 | for (;;) |
| 731 | pause(); | 809 | pause(); |
| 732 | } | 810 | } |
| 733 | 811 | ||
| 812 | /* Tell the Guest we used a buffer. */ | ||
| 734 | add_used_and_trigger(vq, head, len); | 813 | add_used_and_trigger(vq, head, len); |
| 735 | 814 | ||
| 736 | /* Three ^C within one second? Exit. | 815 | /* |
| 816 | * Three ^C within one second? Exit. | ||
| 737 | * | 817 | * |
| 738 | * This is such a hack, but works surprisingly well. Each ^C has to | 818 | * This is such a hack, but works surprisingly well. Each ^C has to |
| 739 | * be in a buffer by itself, so they can't be too fast. But we check | 819 | * be in a buffer by itself, so they can't be too fast. But we check |
| 740 | * that we get three within about a second, so they can't be too | 820 | * that we get three within about a second, so they can't be too |
| 741 | * slow. */ | 821 | * slow. |
| 822 | */ | ||
| 742 | if (len != 1 || ((char *)iov[0].iov_base)[0] != 3) { | 823 | if (len != 1 || ((char *)iov[0].iov_base)[0] != 3) { |
| 743 | abort->count = 0; | 824 | abort->count = 0; |
| 744 | return; | 825 | return; |
| @@ -763,15 +844,23 @@ static void console_output(struct virtqueue *vq) | |||
| 763 | unsigned int head, out, in; | 844 | unsigned int head, out, in; |
| 764 | struct iovec iov[vq->vring.num]; | 845 | struct iovec iov[vq->vring.num]; |
| 765 | 846 | ||
| 847 | /* We usually wait in here, for the Guest to give us something. */ | ||
| 766 | head = wait_for_vq_desc(vq, iov, &out, &in); | 848 | head = wait_for_vq_desc(vq, iov, &out, &in); |
| 767 | if (in) | 849 | if (in) |
| 768 | errx(1, "Input buffers in console output queue?"); | 850 | errx(1, "Input buffers in console output queue?"); |
| 851 | |||
| 852 | /* writev can return a partial write, so we loop here. */ | ||
| 769 | while (!iov_empty(iov, out)) { | 853 | while (!iov_empty(iov, out)) { |
| 770 | int len = writev(STDOUT_FILENO, iov, out); | 854 | int len = writev(STDOUT_FILENO, iov, out); |
| 771 | if (len <= 0) | 855 | if (len <= 0) |
| 772 | err(1, "Write to stdout gave %i", len); | 856 | err(1, "Write to stdout gave %i", len); |
| 773 | iov_consume(iov, out, len); | 857 | iov_consume(iov, out, len); |
| 774 | } | 858 | } |
| 859 | |||
| 860 | /* | ||
| 861 | * We're finished with that buffer: if we're going to sleep, | ||
| 862 | * wait_for_vq_desc() will prod the Guest with an interrupt. | ||
| 863 | */ | ||
| 775 | add_used(vq, head, 0); | 864 | add_used(vq, head, 0); |
| 776 | } | 865 | } |
| 777 | 866 | ||
| @@ -791,15 +880,30 @@ static void net_output(struct virtqueue *vq) | |||
| 791 | unsigned int head, out, in; | 880 | unsigned int head, out, in; |
| 792 | struct iovec iov[vq->vring.num]; | 881 | struct iovec iov[vq->vring.num]; |
| 793 | 882 | ||
| 883 | /* We usually wait in here for the Guest to give us a packet. */ | ||
| 794 | head = wait_for_vq_desc(vq, iov, &out, &in); | 884 | head = wait_for_vq_desc(vq, iov, &out, &in); |
| 795 | if (in) | 885 | if (in) |
| 796 | errx(1, "Input buffers in net output queue?"); | 886 | errx(1, "Input buffers in net output queue?"); |
| 887 | /* | ||
| 888 | * Send the whole thing through to /dev/net/tun. It expects the exact | ||
| 889 | * same format: what a coincidence! | ||
| 890 | */ | ||
| 797 | if (writev(net_info->tunfd, iov, out) < 0) | 891 | if (writev(net_info->tunfd, iov, out) < 0) |
| 798 | errx(1, "Write to tun failed?"); | 892 | errx(1, "Write to tun failed?"); |
| 893 | |||
| 894 | /* | ||
| 895 | * Done with that one; wait_for_vq_desc() will send the interrupt if | ||
| 896 | * all packets are processed. | ||
| 897 | */ | ||
| 799 | add_used(vq, head, 0); | 898 | add_used(vq, head, 0); |
| 800 | } | 899 | } |
| 801 | 900 | ||
| 802 | /* Will reading from this file descriptor block? */ | 901 | /* |
| 902 | * Handling network input is a bit trickier, because I've tried to optimize it. | ||
| 903 | * | ||
| 904 | * First we have a helper routine which tells is if from this file descriptor | ||
| 905 | * (ie. the /dev/net/tun device) will block: | ||
| 906 | */ | ||
| 803 | static bool will_block(int fd) | 907 | static bool will_block(int fd) |
| 804 | { | 908 | { |
| 805 | fd_set fdset; | 909 | fd_set fdset; |
| @@ -809,8 +913,11 @@ static bool will_block(int fd) | |||
| 809 | return select(fd+1, &fdset, NULL, NULL, &zero) != 1; | 913 | return select(fd+1, &fdset, NULL, NULL, &zero) != 1; |
| 810 | } | 914 | } |
| 811 | 915 | ||
| 812 | /* This is where we handle packets coming in from the tun device to our | 916 | /* |
| 813 | * Guest. */ | 917 | * This handles packets coming in from the tun device to our Guest. Like all |
| 918 | * service routines, it gets called again as soon as it returns, so you don't | ||
| 919 | * see a while(1) loop here. | ||
| 920 | */ | ||
| 814 | static void net_input(struct virtqueue *vq) | 921 | static void net_input(struct virtqueue *vq) |
| 815 | { | 922 | { |
| 816 | int len; | 923 | int len; |
| @@ -818,21 +925,38 @@ static void net_input(struct virtqueue *vq) | |||
| 818 | struct iovec iov[vq->vring.num]; | 925 | struct iovec iov[vq->vring.num]; |
| 819 | struct net_info *net_info = vq->dev->priv; | 926 | struct net_info *net_info = vq->dev->priv; |
| 820 | 927 | ||
| 928 | /* | ||
| 929 | * Get a descriptor to write an incoming packet into. This will also | ||
| 930 | * send an interrupt if they're out of descriptors. | ||
| 931 | */ | ||
| 821 | head = wait_for_vq_desc(vq, iov, &out, &in); | 932 | head = wait_for_vq_desc(vq, iov, &out, &in); |
| 822 | if (out) | 933 | if (out) |
| 823 | errx(1, "Output buffers in net input queue?"); | 934 | errx(1, "Output buffers in net input queue?"); |
| 824 | 935 | ||
| 825 | /* Deliver interrupt now, since we're about to sleep. */ | 936 | /* |
| 937 | * If it looks like we'll block reading from the tun device, send them | ||
| 938 | * an interrupt. | ||
| 939 | */ | ||
| 826 | if (vq->pending_used && will_block(net_info->tunfd)) | 940 | if (vq->pending_used && will_block(net_info->tunfd)) |
| 827 | trigger_irq(vq); | 941 | trigger_irq(vq); |
| 828 | 942 | ||
| 943 | /* | ||
| 944 | * Read in the packet. This is where we normally wait (when there's no | ||
| 945 | * incoming network traffic). | ||
| 946 | */ | ||
| 829 | len = readv(net_info->tunfd, iov, in); | 947 | len = readv(net_info->tunfd, iov, in); |
| 830 | if (len <= 0) | 948 | if (len <= 0) |
| 831 | err(1, "Failed to read from tun."); | 949 | err(1, "Failed to read from tun."); |
| 950 | |||
| 951 | /* | ||
| 952 | * Mark that packet buffer as used, but don't interrupt here. We want | ||
| 953 | * to wait until we've done as much work as we can. | ||
| 954 | */ | ||
| 832 | add_used(vq, head, len); | 955 | add_used(vq, head, len); |
| 833 | } | 956 | } |
| 957 | /*:*/ | ||
| 834 | 958 | ||
| 835 | /* This is the helper to create threads. */ | 959 | /* This is the helper to create threads: run the service routine in a loop. */ |
| 836 | static int do_thread(void *_vq) | 960 | static int do_thread(void *_vq) |
| 837 | { | 961 | { |
| 838 | struct virtqueue *vq = _vq; | 962 | struct virtqueue *vq = _vq; |
| @@ -842,8 +966,10 @@ static int do_thread(void *_vq) | |||
| 842 | return 0; | 966 | return 0; |
| 843 | } | 967 | } |
| 844 | 968 | ||
| 845 | /* When a child dies, we kill our entire process group with SIGTERM. This | 969 | /* |
| 846 | * also has the side effect that the shell restores the console for us! */ | 970 | * When a child dies, we kill our entire process group with SIGTERM. This |
| 971 | * also has the side effect that the shell restores the console for us! | ||
| 972 | */ | ||
| 847 | static void kill_launcher(int signal) | 973 | static void kill_launcher(int signal) |
| 848 | { | 974 | { |
| 849 | kill(0, SIGTERM); | 975 | kill(0, SIGTERM); |
| @@ -878,11 +1004,15 @@ static void reset_device(struct device *dev) | |||
| 878 | signal(SIGCHLD, (void *)kill_launcher); | 1004 | signal(SIGCHLD, (void *)kill_launcher); |
| 879 | } | 1005 | } |
| 880 | 1006 | ||
| 1007 | /*L:216 | ||
| 1008 | * This actually creates the thread which services the virtqueue for a device. | ||
| 1009 | */ | ||
| 881 | static void create_thread(struct virtqueue *vq) | 1010 | static void create_thread(struct virtqueue *vq) |
| 882 | { | 1011 | { |
| 883 | /* Create stack for thread and run it. Since stack grows | 1012 | /* |
| 884 | * upwards, we point the stack pointer to the end of this | 1013 | * Create stack for thread. Since the stack grows upwards, we point |
| 885 | * region. */ | 1014 | * the stack pointer to the end of this region. |
| 1015 | */ | ||
| 886 | char *stack = malloc(32768); | 1016 | char *stack = malloc(32768); |
| 887 | unsigned long args[] = { LHREQ_EVENTFD, | 1017 | unsigned long args[] = { LHREQ_EVENTFD, |
| 888 | vq->config.pfn*getpagesize(), 0 }; | 1018 | vq->config.pfn*getpagesize(), 0 }; |
| @@ -893,17 +1023,22 @@ static void create_thread(struct virtqueue *vq) | |||
| 893 | err(1, "Creating eventfd"); | 1023 | err(1, "Creating eventfd"); |
| 894 | args[2] = vq->eventfd; | 1024 | args[2] = vq->eventfd; |
| 895 | 1025 | ||
| 896 | /* Attach an eventfd to this virtqueue: it will go off | 1026 | /* |
| 897 | * when the Guest does an LHCALL_NOTIFY for this vq. */ | 1027 | * Attach an eventfd to this virtqueue: it will go off when the Guest |
| 1028 | * does an LHCALL_NOTIFY for this vq. | ||
| 1029 | */ | ||
| 898 | if (write(lguest_fd, &args, sizeof(args)) != 0) | 1030 | if (write(lguest_fd, &args, sizeof(args)) != 0) |
| 899 | err(1, "Attaching eventfd"); | 1031 | err(1, "Attaching eventfd"); |
| 900 | 1032 | ||
| 901 | /* CLONE_VM: because it has to access the Guest memory, and | 1033 | /* |
| 902 | * SIGCHLD so we get a signal if it dies. */ | 1034 | * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so |
| 1035 | * we get a signal if it dies. | ||
| 1036 | */ | ||
| 903 | vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq); | 1037 | vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq); |
| 904 | if (vq->thread == (pid_t)-1) | 1038 | if (vq->thread == (pid_t)-1) |
| 905 | err(1, "Creating clone"); | 1039 | err(1, "Creating clone"); |
| 906 | /* We close our local copy, now the child has it. */ | 1040 | |
| 1041 | /* We close our local copy now the child has it. */ | ||
| 907 | close(vq->eventfd); | 1042 | close(vq->eventfd); |
| 908 | } | 1043 | } |
| 909 | 1044 | ||
| @@ -955,7 +1090,10 @@ static void update_device_status(struct device *dev) | |||
| 955 | } | 1090 | } |
| 956 | } | 1091 | } |
| 957 | 1092 | ||
| 958 | /* This is the generic routine we call when the Guest uses LHCALL_NOTIFY. */ | 1093 | /*L:215 |
| 1094 | * This is the generic routine we call when the Guest uses LHCALL_NOTIFY. In | ||
| 1095 | * particular, it's used to notify us of device status changes during boot. | ||
| 1096 | */ | ||
| 959 | static void handle_output(unsigned long addr) | 1097 | static void handle_output(unsigned long addr) |
| 960 | { | 1098 | { |
| 961 | struct device *i; | 1099 | struct device *i; |
| @@ -964,25 +1102,42 @@ static void handle_output(unsigned long addr) | |||
| 964 | for (i = devices.dev; i; i = i->next) { | 1102 | for (i = devices.dev; i; i = i->next) { |
| 965 | struct virtqueue *vq; | 1103 | struct virtqueue *vq; |
| 966 | 1104 | ||
| 967 | /* Notifications to device descriptors update device status. */ | 1105 | /* |
| 1106 | * Notifications to device descriptors mean they updated the | ||
| 1107 | * device status. | ||
| 1108 | */ | ||
| 968 | if (from_guest_phys(addr) == i->desc) { | 1109 | if (from_guest_phys(addr) == i->desc) { |
| 969 | update_device_status(i); | 1110 | update_device_status(i); |
| 970 | return; | 1111 | return; |
| 971 | } | 1112 | } |
| 972 | 1113 | ||
| 973 | /* Devices *can* be used before status is set to DRIVER_OK. */ | 1114 | /* |
| 1115 | * Devices *can* be used before status is set to DRIVER_OK. | ||
| 1116 | * The original plan was that they would never do this: they | ||
| 1117 | * would always finish setting up their status bits before | ||
| 1118 | * actually touching the virtqueues. In practice, we allowed | ||
| 1119 | * them to, and they do (eg. the disk probes for partition | ||
| 1120 | * tables as part of initialization). | ||
| 1121 | * | ||
| 1122 | * If we see this, we start the device: once it's running, we | ||
| 1123 | * expect the device to catch all the notifications. | ||
| 1124 | */ | ||
| 974 | for (vq = i->vq; vq; vq = vq->next) { | 1125 | for (vq = i->vq; vq; vq = vq->next) { |
| 975 | if (addr != vq->config.pfn*getpagesize()) | 1126 | if (addr != vq->config.pfn*getpagesize()) |
| 976 | continue; | 1127 | continue; |
| 977 | if (i->running) | 1128 | if (i->running) |
| 978 | errx(1, "Notification on running %s", i->name); | 1129 | errx(1, "Notification on running %s", i->name); |
| 1130 | /* This just calls create_thread() for each virtqueue */ | ||
| 979 | start_device(i); | 1131 | start_device(i); |
| 980 | return; | 1132 | return; |
| 981 | } | 1133 | } |
| 982 | } | 1134 | } |
| 983 | 1135 | ||
| 984 | /* Early console write is done using notify on a nul-terminated string | 1136 | /* |
| 985 | * in Guest memory. */ | 1137 | * Early console write is done using notify on a nul-terminated string |
| 1138 | * in Guest memory. It's also great for hacking debugging messages | ||
| 1139 | * into a Guest. | ||
| 1140 | */ | ||
| 986 | if (addr >= guest_limit) | 1141 | if (addr >= guest_limit) |
| 987 | errx(1, "Bad NOTIFY %#lx", addr); | 1142 | errx(1, "Bad NOTIFY %#lx", addr); |
| 988 | 1143 | ||
| @@ -998,10 +1153,12 @@ static void handle_output(unsigned long addr) | |||
| 998 | * routines to allocate and manage them. | 1153 | * routines to allocate and manage them. |
| 999 | */ | 1154 | */ |
| 1000 | 1155 | ||
| 1001 | /* The layout of the device page is a "struct lguest_device_desc" followed by a | 1156 | /* |
| 1157 | * The layout of the device page is a "struct lguest_device_desc" followed by a | ||
| 1002 | * number of virtqueue descriptors, then two sets of feature bits, then an | 1158 | * number of virtqueue descriptors, then two sets of feature bits, then an |
| 1003 | * array of configuration bytes. This routine returns the configuration | 1159 | * array of configuration bytes. This routine returns the configuration |
| 1004 | * pointer. */ | 1160 | * pointer. |
| 1161 | */ | ||
| 1005 | static u8 *device_config(const struct device *dev) | 1162 | static u8 *device_config(const struct device *dev) |
| 1006 | { | 1163 | { |
| 1007 | return (void *)(dev->desc + 1) | 1164 | return (void *)(dev->desc + 1) |
| @@ -1009,9 +1166,11 @@ static u8 *device_config(const struct device *dev) | |||
| 1009 | + dev->feature_len * 2; | 1166 | + dev->feature_len * 2; |
| 1010 | } | 1167 | } |
| 1011 | 1168 | ||
| 1012 | /* This routine allocates a new "struct lguest_device_desc" from descriptor | 1169 | /* |
| 1170 | * This routine allocates a new "struct lguest_device_desc" from descriptor | ||
| 1013 | * table page just above the Guest's normal memory. It returns a pointer to | 1171 | * table page just above the Guest's normal memory. It returns a pointer to |
| 1014 | * that descriptor. */ | 1172 | * that descriptor. |
| 1173 | */ | ||
| 1015 | static struct lguest_device_desc *new_dev_desc(u16 type) | 1174 | static struct lguest_device_desc *new_dev_desc(u16 type) |
| 1016 | { | 1175 | { |
| 1017 | struct lguest_device_desc d = { .type = type }; | 1176 | struct lguest_device_desc d = { .type = type }; |
| @@ -1032,8 +1191,10 @@ static struct lguest_device_desc *new_dev_desc(u16 type) | |||
| 1032 | return memcpy(p, &d, sizeof(d)); | 1191 | return memcpy(p, &d, sizeof(d)); |
| 1033 | } | 1192 | } |
| 1034 | 1193 | ||
| 1035 | /* Each device descriptor is followed by the description of its virtqueues. We | 1194 | /* |
| 1036 | * specify how many descriptors the virtqueue is to have. */ | 1195 | * Each device descriptor is followed by the description of its virtqueues. We |
| 1196 | * specify how many descriptors the virtqueue is to have. | ||
| 1197 | */ | ||
| 1037 | static void add_virtqueue(struct device *dev, unsigned int num_descs, | 1198 | static void add_virtqueue(struct device *dev, unsigned int num_descs, |
| 1038 | void (*service)(struct virtqueue *)) | 1199 | void (*service)(struct virtqueue *)) |
| 1039 | { | 1200 | { |
| @@ -1050,6 +1211,11 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, | |||
| 1050 | vq->next = NULL; | 1211 | vq->next = NULL; |
| 1051 | vq->last_avail_idx = 0; | 1212 | vq->last_avail_idx = 0; |
| 1052 | vq->dev = dev; | 1213 | vq->dev = dev; |
| 1214 | |||
| 1215 | /* | ||
| 1216 | * This is the routine the service thread will run, and its Process ID | ||
| 1217 | * once it's running. | ||
| 1218 | */ | ||
| 1053 | vq->service = service; | 1219 | vq->service = service; |
| 1054 | vq->thread = (pid_t)-1; | 1220 | vq->thread = (pid_t)-1; |
| 1055 | 1221 | ||
| @@ -1061,10 +1227,12 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, | |||
| 1061 | /* Initialize the vring. */ | 1227 | /* Initialize the vring. */ |
| 1062 | vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN); | 1228 | vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN); |
| 1063 | 1229 | ||
| 1064 | /* Append virtqueue to this device's descriptor. We use | 1230 | /* |
| 1231 | * Append virtqueue to this device's descriptor. We use | ||
| 1065 | * device_config() to get the end of the device's current virtqueues; | 1232 | * device_config() to get the end of the device's current virtqueues; |
| 1066 | * we check that we haven't added any config or feature information | 1233 | * we check that we haven't added any config or feature information |
| 1067 | * yet, otherwise we'd be overwriting them. */ | 1234 | * yet, otherwise we'd be overwriting them. |
| 1235 | */ | ||
| 1068 | assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); | 1236 | assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); |
| 1069 | memcpy(device_config(dev), &vq->config, sizeof(vq->config)); | 1237 | memcpy(device_config(dev), &vq->config, sizeof(vq->config)); |
| 1070 | dev->num_vq++; | 1238 | dev->num_vq++; |
| @@ -1072,14 +1240,18 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, | |||
| 1072 | 1240 | ||
| 1073 | verbose("Virtqueue page %#lx\n", to_guest_phys(p)); | 1241 | verbose("Virtqueue page %#lx\n", to_guest_phys(p)); |
| 1074 | 1242 | ||
| 1075 | /* Add to tail of list, so dev->vq is first vq, dev->vq->next is | 1243 | /* |
| 1076 | * second. */ | 1244 | * Add to tail of list, so dev->vq is first vq, dev->vq->next is |
| 1245 | * second. | ||
| 1246 | */ | ||
| 1077 | for (i = &dev->vq; *i; i = &(*i)->next); | 1247 | for (i = &dev->vq; *i; i = &(*i)->next); |
| 1078 | *i = vq; | 1248 | *i = vq; |
| 1079 | } | 1249 | } |
| 1080 | 1250 | ||
| 1081 | /* The first half of the feature bitmask is for us to advertise features. The | 1251 | /* |
| 1082 | * second half is for the Guest to accept features. */ | 1252 | * The first half of the feature bitmask is for us to advertise features. The |
| 1253 | * second half is for the Guest to accept features. | ||
| 1254 | */ | ||
| 1083 | static void add_feature(struct device *dev, unsigned bit) | 1255 | static void add_feature(struct device *dev, unsigned bit) |
| 1084 | { | 1256 | { |
| 1085 | u8 *features = get_feature_bits(dev); | 1257 | u8 *features = get_feature_bits(dev); |
| @@ -1093,9 +1265,11 @@ static void add_feature(struct device *dev, unsigned bit) | |||
| 1093 | features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT)); | 1265 | features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT)); |
| 1094 | } | 1266 | } |
| 1095 | 1267 | ||
| 1096 | /* This routine sets the configuration fields for an existing device's | 1268 | /* |
| 1269 | * This routine sets the configuration fields for an existing device's | ||
| 1097 | * descriptor. It only works for the last device, but that's OK because that's | 1270 | * descriptor. It only works for the last device, but that's OK because that's |
| 1098 | * how we use it. */ | 1271 | * how we use it. |
| 1272 | */ | ||
| 1099 | static void set_config(struct device *dev, unsigned len, const void *conf) | 1273 | static void set_config(struct device *dev, unsigned len, const void *conf) |
| 1100 | { | 1274 | { |
| 1101 | /* Check we haven't overflowed our single page. */ | 1275 | /* Check we haven't overflowed our single page. */ |
| @@ -1105,12 +1279,18 @@ static void set_config(struct device *dev, unsigned len, const void *conf) | |||
| 1105 | /* Copy in the config information, and store the length. */ | 1279 | /* Copy in the config information, and store the length. */ |
| 1106 | memcpy(device_config(dev), conf, len); | 1280 | memcpy(device_config(dev), conf, len); |
| 1107 | dev->desc->config_len = len; | 1281 | dev->desc->config_len = len; |
| 1282 | |||
| 1283 | /* Size must fit in config_len field (8 bits)! */ | ||
| 1284 | assert(dev->desc->config_len == len); | ||
| 1108 | } | 1285 | } |
| 1109 | 1286 | ||
| 1110 | /* This routine does all the creation and setup of a new device, including | 1287 | /* |
| 1111 | * calling new_dev_desc() to allocate the descriptor and device memory. | 1288 | * This routine does all the creation and setup of a new device, including |
| 1289 | * calling new_dev_desc() to allocate the descriptor and device memory. We | ||
| 1290 | * don't actually start the service threads until later. | ||
| 1112 | * | 1291 | * |
| 1113 | * See what I mean about userspace being boring? */ | 1292 | * See what I mean about userspace being boring? |
| 1293 | */ | ||
| 1114 | static struct device *new_device(const char *name, u16 type) | 1294 | static struct device *new_device(const char *name, u16 type) |
| 1115 | { | 1295 | { |
| 1116 | struct device *dev = malloc(sizeof(*dev)); | 1296 | struct device *dev = malloc(sizeof(*dev)); |
| @@ -1123,10 +1303,12 @@ static struct device *new_device(const char *name, u16 type) | |||
| 1123 | dev->num_vq = 0; | 1303 | dev->num_vq = 0; |
| 1124 | dev->running = false; | 1304 | dev->running = false; |
| 1125 | 1305 | ||
| 1126 | /* Append to device list. Prepending to a single-linked list is | 1306 | /* |
| 1307 | * Append to device list. Prepending to a single-linked list is | ||
| 1127 | * easier, but the user expects the devices to be arranged on the bus | 1308 | * easier, but the user expects the devices to be arranged on the bus |
| 1128 | * in command-line order. The first network device on the command line | 1309 | * in command-line order. The first network device on the command line |
| 1129 | * is eth0, the first block device /dev/vda, etc. */ | 1310 | * is eth0, the first block device /dev/vda, etc. |
| 1311 | */ | ||
| 1130 | if (devices.lastdev) | 1312 | if (devices.lastdev) |
| 1131 | devices.lastdev->next = dev; | 1313 | devices.lastdev->next = dev; |
| 1132 | else | 1314 | else |
| @@ -1136,8 +1318,10 @@ static struct device *new_device(const char *name, u16 type) | |||
| 1136 | return dev; | 1318 | return dev; |
| 1137 | } | 1319 | } |
| 1138 | 1320 | ||
| 1139 | /* Our first setup routine is the console. It's a fairly simple device, but | 1321 | /* |
| 1140 | * UNIX tty handling makes it uglier than it could be. */ | 1322 | * Our first setup routine is the console. It's a fairly simple device, but |
| 1323 | * UNIX tty handling makes it uglier than it could be. | ||
| 1324 | */ | ||
| 1141 | static void setup_console(void) | 1325 | static void setup_console(void) |
| 1142 | { | 1326 | { |
| 1143 | struct device *dev; | 1327 | struct device *dev; |
| @@ -1145,8 +1329,10 @@ static void setup_console(void) | |||
| 1145 | /* If we can save the initial standard input settings... */ | 1329 | /* If we can save the initial standard input settings... */ |
| 1146 | if (tcgetattr(STDIN_FILENO, &orig_term) == 0) { | 1330 | if (tcgetattr(STDIN_FILENO, &orig_term) == 0) { |
| 1147 | struct termios term = orig_term; | 1331 | struct termios term = orig_term; |
| 1148 | /* Then we turn off echo, line buffering and ^C etc. We want a | 1332 | /* |
| 1149 | * raw input stream to the Guest. */ | 1333 | * Then we turn off echo, line buffering and ^C etc: We want a |
| 1334 | * raw input stream to the Guest. | ||
| 1335 | */ | ||
| 1150 | term.c_lflag &= ~(ISIG|ICANON|ECHO); | 1336 | term.c_lflag &= ~(ISIG|ICANON|ECHO); |
| 1151 | tcsetattr(STDIN_FILENO, TCSANOW, &term); | 1337 | tcsetattr(STDIN_FILENO, TCSANOW, &term); |
| 1152 | } | 1338 | } |
| @@ -1157,10 +1343,12 @@ static void setup_console(void) | |||
| 1157 | dev->priv = malloc(sizeof(struct console_abort)); | 1343 | dev->priv = malloc(sizeof(struct console_abort)); |
| 1158 | ((struct console_abort *)dev->priv)->count = 0; | 1344 | ((struct console_abort *)dev->priv)->count = 0; |
| 1159 | 1345 | ||
| 1160 | /* The console needs two virtqueues: the input then the output. When | 1346 | /* |
| 1347 | * The console needs two virtqueues: the input then the output. When | ||
| 1161 | * they put something the input queue, we make sure we're listening to | 1348 | * they put something the input queue, we make sure we're listening to |
| 1162 | * stdin. When they put something in the output queue, we write it to | 1349 | * stdin. When they put something in the output queue, we write it to |
| 1163 | * stdout. */ | 1350 | * stdout. |
| 1351 | */ | ||
| 1164 | add_virtqueue(dev, VIRTQUEUE_NUM, console_input); | 1352 | add_virtqueue(dev, VIRTQUEUE_NUM, console_input); |
| 1165 | add_virtqueue(dev, VIRTQUEUE_NUM, console_output); | 1353 | add_virtqueue(dev, VIRTQUEUE_NUM, console_output); |
| 1166 | 1354 | ||
| @@ -1168,7 +1356,8 @@ static void setup_console(void) | |||
| 1168 | } | 1356 | } |
| 1169 | /*:*/ | 1357 | /*:*/ |
| 1170 | 1358 | ||
| 1171 | /*M:010 Inter-guest networking is an interesting area. Simplest is to have a | 1359 | /*M:010 |
| 1360 | * Inter-guest networking is an interesting area. Simplest is to have a | ||
| 1172 | * --sharenet=<name> option which opens or creates a named pipe. This can be | 1361 | * --sharenet=<name> option which opens or creates a named pipe. This can be |
| 1173 | * used to send packets to another guest in a 1:1 manner. | 1362 | * used to send packets to another guest in a 1:1 manner. |
| 1174 | * | 1363 | * |
| @@ -1182,7 +1371,8 @@ static void setup_console(void) | |||
| 1182 | * multiple inter-guest channels behind one interface, although it would | 1371 | * multiple inter-guest channels behind one interface, although it would |
| 1183 | * require some manner of hotplugging new virtio channels. | 1372 | * require some manner of hotplugging new virtio channels. |
| 1184 | * | 1373 | * |
| 1185 | * Finally, we could implement a virtio network switch in the kernel. :*/ | 1374 | * Finally, we could implement a virtio network switch in the kernel. |
| 1375 | :*/ | ||
| 1186 | 1376 | ||
| 1187 | static u32 str2ip(const char *ipaddr) | 1377 | static u32 str2ip(const char *ipaddr) |
| 1188 | { | 1378 | { |
| @@ -1207,11 +1397,13 @@ static void str2mac(const char *macaddr, unsigned char mac[6]) | |||
| 1207 | mac[5] = m[5]; | 1397 | mac[5] = m[5]; |
| 1208 | } | 1398 | } |
| 1209 | 1399 | ||
| 1210 | /* This code is "adapted" from libbridge: it attaches the Host end of the | 1400 | /* |
| 1401 | * This code is "adapted" from libbridge: it attaches the Host end of the | ||
| 1211 | * network device to the bridge device specified by the command line. | 1402 | * network device to the bridge device specified by the command line. |
| 1212 | * | 1403 | * |
| 1213 | * This is yet another James Morris contribution (I'm an IP-level guy, so I | 1404 | * This is yet another James Morris contribution (I'm an IP-level guy, so I |
| 1214 | * dislike bridging), and I just try not to break it. */ | 1405 | * dislike bridging), and I just try not to break it. |
| 1406 | */ | ||
| 1215 | static void add_to_bridge(int fd, const char *if_name, const char *br_name) | 1407 | static void add_to_bridge(int fd, const char *if_name, const char *br_name) |
| 1216 | { | 1408 | { |
| 1217 | int ifidx; | 1409 | int ifidx; |
| @@ -1231,9 +1423,11 @@ static void add_to_bridge(int fd, const char *if_name, const char *br_name) | |||
| 1231 | err(1, "can't add %s to bridge %s", if_name, br_name); | 1423 | err(1, "can't add %s to bridge %s", if_name, br_name); |
| 1232 | } | 1424 | } |
| 1233 | 1425 | ||
| 1234 | /* This sets up the Host end of the network device with an IP address, brings | 1426 | /* |
| 1427 | * This sets up the Host end of the network device with an IP address, brings | ||
| 1235 | * it up so packets will flow, the copies the MAC address into the hwaddr | 1428 | * it up so packets will flow, the copies the MAC address into the hwaddr |
| 1236 | * pointer. */ | 1429 | * pointer. |
| 1430 | */ | ||
| 1237 | static void configure_device(int fd, const char *tapif, u32 ipaddr) | 1431 | static void configure_device(int fd, const char *tapif, u32 ipaddr) |
| 1238 | { | 1432 | { |
| 1239 | struct ifreq ifr; | 1433 | struct ifreq ifr; |
| @@ -1260,10 +1454,12 @@ static int get_tun_device(char tapif[IFNAMSIZ]) | |||
| 1260 | /* Start with this zeroed. Messy but sure. */ | 1454 | /* Start with this zeroed. Messy but sure. */ |
| 1261 | memset(&ifr, 0, sizeof(ifr)); | 1455 | memset(&ifr, 0, sizeof(ifr)); |
| 1262 | 1456 | ||
| 1263 | /* We open the /dev/net/tun device and tell it we want a tap device. A | 1457 | /* |
| 1458 | * We open the /dev/net/tun device and tell it we want a tap device. A | ||
| 1264 | * tap device is like a tun device, only somehow different. To tell | 1459 | * tap device is like a tun device, only somehow different. To tell |
| 1265 | * the truth, I completely blundered my way through this code, but it | 1460 | * the truth, I completely blundered my way through this code, but it |
| 1266 | * works now! */ | 1461 | * works now! |
| 1462 | */ | ||
| 1267 | netfd = open_or_die("/dev/net/tun", O_RDWR); | 1463 | netfd = open_or_die("/dev/net/tun", O_RDWR); |
| 1268 | ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR; | 1464 | ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR; |
| 1269 | strcpy(ifr.ifr_name, "tap%d"); | 1465 | strcpy(ifr.ifr_name, "tap%d"); |
| @@ -1274,18 +1470,22 @@ static int get_tun_device(char tapif[IFNAMSIZ]) | |||
| 1274 | TUN_F_CSUM|TUN_F_TSO4|TUN_F_TSO6|TUN_F_TSO_ECN) != 0) | 1470 | TUN_F_CSUM|TUN_F_TSO4|TUN_F_TSO6|TUN_F_TSO_ECN) != 0) |
| 1275 | err(1, "Could not set features for tun device"); | 1471 | err(1, "Could not set features for tun device"); |
| 1276 | 1472 | ||
| 1277 | /* We don't need checksums calculated for packets coming in this | 1473 | /* |
| 1278 | * device: trust us! */ | 1474 | * We don't need checksums calculated for packets coming in this |
| 1475 | * device: trust us! | ||
| 1476 | */ | ||
| 1279 | ioctl(netfd, TUNSETNOCSUM, 1); | 1477 | ioctl(netfd, TUNSETNOCSUM, 1); |
| 1280 | 1478 | ||
| 1281 | memcpy(tapif, ifr.ifr_name, IFNAMSIZ); | 1479 | memcpy(tapif, ifr.ifr_name, IFNAMSIZ); |
| 1282 | return netfd; | 1480 | return netfd; |
| 1283 | } | 1481 | } |
| 1284 | 1482 | ||
| 1285 | /*L:195 Our network is a Host<->Guest network. This can either use bridging or | 1483 | /*L:195 |
| 1484 | * Our network is a Host<->Guest network. This can either use bridging or | ||
| 1286 | * routing, but the principle is the same: it uses the "tun" device to inject | 1485 | * routing, but the principle is the same: it uses the "tun" device to inject |
| 1287 | * packets into the Host as if they came in from a normal network card. We | 1486 | * packets into the Host as if they came in from a normal network card. We |
| 1288 | * just shunt packets between the Guest and the tun device. */ | 1487 | * just shunt packets between the Guest and the tun device. |
| 1488 | */ | ||
| 1289 | static void setup_tun_net(char *arg) | 1489 | static void setup_tun_net(char *arg) |
| 1290 | { | 1490 | { |
| 1291 | struct device *dev; | 1491 | struct device *dev; |
| @@ -1302,13 +1502,14 @@ static void setup_tun_net(char *arg) | |||
| 1302 | dev = new_device("net", VIRTIO_ID_NET); | 1502 | dev = new_device("net", VIRTIO_ID_NET); |
| 1303 | dev->priv = net_info; | 1503 | dev->priv = net_info; |
| 1304 | 1504 | ||
| 1305 | /* Network devices need a receive and a send queue, just like | 1505 | /* Network devices need a recv and a send queue, just like console. */ |
| 1306 | * console. */ | ||
| 1307 | add_virtqueue(dev, VIRTQUEUE_NUM, net_input); | 1506 | add_virtqueue(dev, VIRTQUEUE_NUM, net_input); |
| 1308 | add_virtqueue(dev, VIRTQUEUE_NUM, net_output); | 1507 | add_virtqueue(dev, VIRTQUEUE_NUM, net_output); |
| 1309 | 1508 | ||
| 1310 | /* We need a socket to perform the magic network ioctls to bring up the | 1509 | /* |
| 1311 | * tap interface, connect to the bridge etc. Any socket will do! */ | 1510 | * We need a socket to perform the magic network ioctls to bring up the |
| 1511 | * tap interface, connect to the bridge etc. Any socket will do! | ||
| 1512 | */ | ||
| 1312 | ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); | 1513 | ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); |
| 1313 | if (ipfd < 0) | 1514 | if (ipfd < 0) |
| 1314 | err(1, "opening IP socket"); | 1515 | err(1, "opening IP socket"); |
| @@ -1362,39 +1563,31 @@ static void setup_tun_net(char *arg) | |||
| 1362 | verbose("device %u: tun %s: %s\n", | 1563 | verbose("device %u: tun %s: %s\n", |
| 1363 | devices.device_num, tapif, arg); | 1564 | devices.device_num, tapif, arg); |
| 1364 | } | 1565 | } |
| 1365 | 1566 | /*:*/ | |
| 1366 | /* Our block (disk) device should be really simple: the Guest asks for a block | ||
| 1367 | * number and we read or write that position in the file. Unfortunately, that | ||
| 1368 | * was amazingly slow: the Guest waits until the read is finished before | ||
| 1369 | * running anything else, even if it could have been doing useful work. | ||
| 1370 | * | ||
| 1371 | * We could use async I/O, except it's reputed to suck so hard that characters | ||
| 1372 | * actually go missing from your code when you try to use it. | ||
| 1373 | * | ||
| 1374 | * So we farm the I/O out to thread, and communicate with it via a pipe. */ | ||
| 1375 | 1567 | ||
| 1376 | /* This hangs off device->priv. */ | 1568 | /* This hangs off device->priv. */ |
| 1377 | struct vblk_info | 1569 | struct vblk_info { |
| 1378 | { | ||
| 1379 | /* The size of the file. */ | 1570 | /* The size of the file. */ |
| 1380 | off64_t len; | 1571 | off64_t len; |
| 1381 | 1572 | ||
| 1382 | /* The file descriptor for the file. */ | 1573 | /* The file descriptor for the file. */ |
| 1383 | int fd; | 1574 | int fd; |
| 1384 | 1575 | ||
| 1385 | /* IO thread listens on this file descriptor [0]. */ | ||
| 1386 | int workpipe[2]; | ||
| 1387 | |||
| 1388 | /* IO thread writes to this file descriptor to mark it done, then | ||
| 1389 | * Launcher triggers interrupt to Guest. */ | ||
| 1390 | int done_fd; | ||
| 1391 | }; | 1576 | }; |
| 1392 | 1577 | ||
| 1393 | /*L:210 | 1578 | /*L:210 |
| 1394 | * The Disk | 1579 | * The Disk |
| 1395 | * | 1580 | * |
| 1396 | * Remember that the block device is handled by a separate I/O thread. We head | 1581 | * The disk only has one virtqueue, so it only has one thread. It is really |
| 1397 | * straight into the core of that thread here: | 1582 | * simple: the Guest asks for a block number and we read or write that position |
| 1583 | * in the file. | ||
| 1584 | * | ||
| 1585 | * Before we serviced each virtqueue in a separate thread, that was unacceptably | ||
| 1586 | * slow: the Guest waits until the read is finished before running anything | ||
| 1587 | * else, even if it could have been doing useful work. | ||
| 1588 | * | ||
| 1589 | * We could have used async I/O, except it's reputed to suck so hard that | ||
| 1590 | * characters actually go missing from your code when you try to use it. | ||
| 1398 | */ | 1591 | */ |
| 1399 | static void blk_request(struct virtqueue *vq) | 1592 | static void blk_request(struct virtqueue *vq) |
| 1400 | { | 1593 | { |
| @@ -1406,47 +1599,64 @@ static void blk_request(struct virtqueue *vq) | |||
| 1406 | struct iovec iov[vq->vring.num]; | 1599 | struct iovec iov[vq->vring.num]; |
| 1407 | off64_t off; | 1600 | off64_t off; |
| 1408 | 1601 | ||
| 1409 | /* Get the next request. */ | 1602 | /* |
| 1603 | * Get the next request, where we normally wait. It triggers the | ||
| 1604 | * interrupt to acknowledge previously serviced requests (if any). | ||
| 1605 | */ | ||
| 1410 | head = wait_for_vq_desc(vq, iov, &out_num, &in_num); | 1606 | head = wait_for_vq_desc(vq, iov, &out_num, &in_num); |
| 1411 | 1607 | ||
| 1412 | /* Every block request should contain at least one output buffer | 1608 | /* |
| 1609 | * Every block request should contain at least one output buffer | ||
| 1413 | * (detailing the location on disk and the type of request) and one | 1610 | * (detailing the location on disk and the type of request) and one |
| 1414 | * input buffer (to hold the result). */ | 1611 | * input buffer (to hold the result). |
| 1612 | */ | ||
| 1415 | if (out_num == 0 || in_num == 0) | 1613 | if (out_num == 0 || in_num == 0) |
| 1416 | errx(1, "Bad virtblk cmd %u out=%u in=%u", | 1614 | errx(1, "Bad virtblk cmd %u out=%u in=%u", |
| 1417 | head, out_num, in_num); | 1615 | head, out_num, in_num); |
| 1418 | 1616 | ||
| 1419 | out = convert(&iov[0], struct virtio_blk_outhdr); | 1617 | out = convert(&iov[0], struct virtio_blk_outhdr); |
| 1420 | in = convert(&iov[out_num+in_num-1], u8); | 1618 | in = convert(&iov[out_num+in_num-1], u8); |
| 1619 | /* | ||
| 1620 | * For historical reasons, block operations are expressed in 512 byte | ||
| 1621 | * "sectors". | ||
| 1622 | */ | ||
| 1421 | off = out->sector * 512; | 1623 | off = out->sector * 512; |
| 1422 | 1624 | ||
| 1423 | /* The block device implements "barriers", where the Guest indicates | 1625 | /* |
| 1626 | * The block device implements "barriers", where the Guest indicates | ||
| 1424 | * that it wants all previous writes to occur before this write. We | 1627 | * that it wants all previous writes to occur before this write. We |
| 1425 | * don't have a way of asking our kernel to do a barrier, so we just | 1628 | * don't have a way of asking our kernel to do a barrier, so we just |
| 1426 | * synchronize all the data in the file. Pretty poor, no? */ | 1629 | * synchronize all the data in the file. Pretty poor, no? |
| 1630 | */ | ||
| 1427 | if (out->type & VIRTIO_BLK_T_BARRIER) | 1631 | if (out->type & VIRTIO_BLK_T_BARRIER) |
| 1428 | fdatasync(vblk->fd); | 1632 | fdatasync(vblk->fd); |
| 1429 | 1633 | ||
| 1430 | /* In general the virtio block driver is allowed to try SCSI commands. | 1634 | /* |
| 1431 | * It'd be nice if we supported eject, for example, but we don't. */ | 1635 | * In general the virtio block driver is allowed to try SCSI commands. |
| 1636 | * It'd be nice if we supported eject, for example, but we don't. | ||
| 1637 | */ | ||
| 1432 | if (out->type & VIRTIO_BLK_T_SCSI_CMD) { | 1638 | if (out->type & VIRTIO_BLK_T_SCSI_CMD) { |
| 1433 | fprintf(stderr, "Scsi commands unsupported\n"); | 1639 | fprintf(stderr, "Scsi commands unsupported\n"); |
| 1434 | *in = VIRTIO_BLK_S_UNSUPP; | 1640 | *in = VIRTIO_BLK_S_UNSUPP; |
| 1435 | wlen = sizeof(*in); | 1641 | wlen = sizeof(*in); |
| 1436 | } else if (out->type & VIRTIO_BLK_T_OUT) { | 1642 | } else if (out->type & VIRTIO_BLK_T_OUT) { |
| 1437 | /* Write */ | 1643 | /* |
| 1438 | 1644 | * Write | |
| 1439 | /* Move to the right location in the block file. This can fail | 1645 | * |
| 1440 | * if they try to write past end. */ | 1646 | * Move to the right location in the block file. This can fail |
| 1647 | * if they try to write past end. | ||
| 1648 | */ | ||
| 1441 | if (lseek64(vblk->fd, off, SEEK_SET) != off) | 1649 | if (lseek64(vblk->fd, off, SEEK_SET) != off) |
| 1442 | err(1, "Bad seek to sector %llu", out->sector); | 1650 | err(1, "Bad seek to sector %llu", out->sector); |
| 1443 | 1651 | ||
| 1444 | ret = writev(vblk->fd, iov+1, out_num-1); | 1652 | ret = writev(vblk->fd, iov+1, out_num-1); |
| 1445 | verbose("WRITE to sector %llu: %i\n", out->sector, ret); | 1653 | verbose("WRITE to sector %llu: %i\n", out->sector, ret); |
| 1446 | 1654 | ||
| 1447 | /* Grr... Now we know how long the descriptor they sent was, we | 1655 | /* |
| 1656 | * Grr... Now we know how long the descriptor they sent was, we | ||
| 1448 | * make sure they didn't try to write over the end of the block | 1657 | * make sure they didn't try to write over the end of the block |
| 1449 | * file (possibly extending it). */ | 1658 | * file (possibly extending it). |
| 1659 | */ | ||
| 1450 | if (ret > 0 && off + ret > vblk->len) { | 1660 | if (ret > 0 && off + ret > vblk->len) { |
| 1451 | /* Trim it back to the correct length */ | 1661 | /* Trim it back to the correct length */ |
| 1452 | ftruncate64(vblk->fd, vblk->len); | 1662 | ftruncate64(vblk->fd, vblk->len); |
| @@ -1456,10 +1666,12 @@ static void blk_request(struct virtqueue *vq) | |||
| 1456 | wlen = sizeof(*in); | 1666 | wlen = sizeof(*in); |
| 1457 | *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); | 1667 | *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); |
| 1458 | } else { | 1668 | } else { |
| 1459 | /* Read */ | 1669 | /* |
| 1460 | 1670 | * Read | |
| 1461 | /* Move to the right location in the block file. This can fail | 1671 | * |
| 1462 | * if they try to read past end. */ | 1672 | * Move to the right location in the block file. This can fail |
| 1673 | * if they try to read past end. | ||
| 1674 | */ | ||
| 1463 | if (lseek64(vblk->fd, off, SEEK_SET) != off) | 1675 | if (lseek64(vblk->fd, off, SEEK_SET) != off) |
| 1464 | err(1, "Bad seek to sector %llu", out->sector); | 1676 | err(1, "Bad seek to sector %llu", out->sector); |
| 1465 | 1677 | ||
| @@ -1474,13 +1686,16 @@ static void blk_request(struct virtqueue *vq) | |||
| 1474 | } | 1686 | } |
| 1475 | } | 1687 | } |
| 1476 | 1688 | ||
| 1477 | /* OK, so we noted that it was pretty poor to use an fdatasync as a | 1689 | /* |
| 1690 | * OK, so we noted that it was pretty poor to use an fdatasync as a | ||
| 1478 | * barrier. But Christoph Hellwig points out that we need a sync | 1691 | * barrier. But Christoph Hellwig points out that we need a sync |
| 1479 | * *afterwards* as well: "Barriers specify no reordering to the front | 1692 | * *afterwards* as well: "Barriers specify no reordering to the front |
| 1480 | * or the back." And Jens Axboe confirmed it, so here we are: */ | 1693 | * or the back." And Jens Axboe confirmed it, so here we are: |
| 1694 | */ | ||
| 1481 | if (out->type & VIRTIO_BLK_T_BARRIER) | 1695 | if (out->type & VIRTIO_BLK_T_BARRIER) |
| 1482 | fdatasync(vblk->fd); | 1696 | fdatasync(vblk->fd); |
| 1483 | 1697 | ||
| 1698 | /* Finished that request. */ | ||
| 1484 | add_used(vq, head, wlen); | 1699 | add_used(vq, head, wlen); |
| 1485 | } | 1700 | } |
| 1486 | 1701 | ||
| @@ -1491,7 +1706,7 @@ static void setup_block_file(const char *filename) | |||
| 1491 | struct vblk_info *vblk; | 1706 | struct vblk_info *vblk; |
| 1492 | struct virtio_blk_config conf; | 1707 | struct virtio_blk_config conf; |
| 1493 | 1708 | ||
| 1494 | /* The device responds to return from I/O thread. */ | 1709 | /* Creat the device. */ |
| 1495 | dev = new_device("block", VIRTIO_ID_BLOCK); | 1710 | dev = new_device("block", VIRTIO_ID_BLOCK); |
| 1496 | 1711 | ||
| 1497 | /* The device has one virtqueue, where the Guest places requests. */ | 1712 | /* The device has one virtqueue, where the Guest places requests. */ |
| @@ -1510,27 +1725,32 @@ static void setup_block_file(const char *filename) | |||
| 1510 | /* Tell Guest how many sectors this device has. */ | 1725 | /* Tell Guest how many sectors this device has. */ |
| 1511 | conf.capacity = cpu_to_le64(vblk->len / 512); | 1726 | conf.capacity = cpu_to_le64(vblk->len / 512); |
| 1512 | 1727 | ||
| 1513 | /* Tell Guest not to put in too many descriptors at once: two are used | 1728 | /* |
| 1514 | * for the in and out elements. */ | 1729 | * Tell Guest not to put in too many descriptors at once: two are used |
| 1730 | * for the in and out elements. | ||
| 1731 | */ | ||
| 1515 | add_feature(dev, VIRTIO_BLK_F_SEG_MAX); | 1732 | add_feature(dev, VIRTIO_BLK_F_SEG_MAX); |
| 1516 | conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2); | 1733 | conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2); |
| 1517 | 1734 | ||
| 1518 | set_config(dev, sizeof(conf), &conf); | 1735 | /* Don't try to put whole struct: we have 8 bit limit. */ |
| 1736 | set_config(dev, offsetof(struct virtio_blk_config, geometry), &conf); | ||
| 1519 | 1737 | ||
| 1520 | verbose("device %u: virtblock %llu sectors\n", | 1738 | verbose("device %u: virtblock %llu sectors\n", |
| 1521 | ++devices.device_num, le64_to_cpu(conf.capacity)); | 1739 | ++devices.device_num, le64_to_cpu(conf.capacity)); |
| 1522 | } | 1740 | } |
| 1523 | 1741 | ||
| 1524 | struct rng_info { | 1742 | /*L:211 |
| 1525 | int rfd; | 1743 | * Our random number generator device reads from /dev/random into the Guest's |
| 1526 | }; | ||
| 1527 | |||
| 1528 | /* Our random number generator device reads from /dev/random into the Guest's | ||
| 1529 | * input buffers. The usual case is that the Guest doesn't want random numbers | 1744 | * input buffers. The usual case is that the Guest doesn't want random numbers |
| 1530 | * and so has no buffers although /dev/random is still readable, whereas | 1745 | * and so has no buffers although /dev/random is still readable, whereas |
| 1531 | * console is the reverse. | 1746 | * console is the reverse. |
| 1532 | * | 1747 | * |
| 1533 | * The same logic applies, however. */ | 1748 | * The same logic applies, however. |
| 1749 | */ | ||
| 1750 | struct rng_info { | ||
| 1751 | int rfd; | ||
| 1752 | }; | ||
| 1753 | |||
| 1534 | static void rng_input(struct virtqueue *vq) | 1754 | static void rng_input(struct virtqueue *vq) |
| 1535 | { | 1755 | { |
| 1536 | int len; | 1756 | int len; |
| @@ -1543,9 +1763,10 @@ static void rng_input(struct virtqueue *vq) | |||
| 1543 | if (out_num) | 1763 | if (out_num) |
| 1544 | errx(1, "Output buffers in rng?"); | 1764 | errx(1, "Output buffers in rng?"); |
| 1545 | 1765 | ||
| 1546 | /* This is why we convert to iovecs: the readv() call uses them, and so | 1766 | /* |
| 1547 | * it reads straight into the Guest's buffer. We loop to make sure we | 1767 | * Just like the console write, we loop to cover the whole iovec. |
| 1548 | * fill it. */ | 1768 | * In this case, short reads actually happen quite a bit. |
| 1769 | */ | ||
| 1549 | while (!iov_empty(iov, in_num)) { | 1770 | while (!iov_empty(iov, in_num)) { |
| 1550 | len = readv(rng_info->rfd, iov, in_num); | 1771 | len = readv(rng_info->rfd, iov, in_num); |
| 1551 | if (len <= 0) | 1772 | if (len <= 0) |
| @@ -1558,15 +1779,18 @@ static void rng_input(struct virtqueue *vq) | |||
| 1558 | add_used(vq, head, totlen); | 1779 | add_used(vq, head, totlen); |
| 1559 | } | 1780 | } |
| 1560 | 1781 | ||
| 1561 | /* And this creates a "hardware" random number device for the Guest. */ | 1782 | /*L:199 |
| 1783 | * This creates a "hardware" random number device for the Guest. | ||
| 1784 | */ | ||
| 1562 | static void setup_rng(void) | 1785 | static void setup_rng(void) |
| 1563 | { | 1786 | { |
| 1564 | struct device *dev; | 1787 | struct device *dev; |
| 1565 | struct rng_info *rng_info = malloc(sizeof(*rng_info)); | 1788 | struct rng_info *rng_info = malloc(sizeof(*rng_info)); |
| 1566 | 1789 | ||
| 1790 | /* Our device's privat info simply contains the /dev/random fd. */ | ||
| 1567 | rng_info->rfd = open_or_die("/dev/random", O_RDONLY); | 1791 | rng_info->rfd = open_or_die("/dev/random", O_RDONLY); |
| 1568 | 1792 | ||
| 1569 | /* The device responds to return from I/O thread. */ | 1793 | /* Create the new device. */ |
| 1570 | dev = new_device("rng", VIRTIO_ID_RNG); | 1794 | dev = new_device("rng", VIRTIO_ID_RNG); |
| 1571 | dev->priv = rng_info; | 1795 | dev->priv = rng_info; |
| 1572 | 1796 | ||
| @@ -1582,8 +1806,10 @@ static void __attribute__((noreturn)) restart_guest(void) | |||
| 1582 | { | 1806 | { |
| 1583 | unsigned int i; | 1807 | unsigned int i; |
| 1584 | 1808 | ||
| 1585 | /* Since we don't track all open fds, we simply close everything beyond | 1809 | /* |
| 1586 | * stderr. */ | 1810 | * Since we don't track all open fds, we simply close everything beyond |
| 1811 | * stderr. | ||
| 1812 | */ | ||
| 1587 | for (i = 3; i < FD_SETSIZE; i++) | 1813 | for (i = 3; i < FD_SETSIZE; i++) |
| 1588 | close(i); | 1814 | close(i); |
| 1589 | 1815 | ||
| @@ -1594,8 +1820,10 @@ static void __attribute__((noreturn)) restart_guest(void) | |||
| 1594 | err(1, "Could not exec %s", main_args[0]); | 1820 | err(1, "Could not exec %s", main_args[0]); |
| 1595 | } | 1821 | } |
| 1596 | 1822 | ||
| 1597 | /*L:220 Finally we reach the core of the Launcher which runs the Guest, serves | 1823 | /*L:220 |
| 1598 | * its input and output, and finally, lays it to rest. */ | 1824 | * Finally we reach the core of the Launcher which runs the Guest, serves |
| 1825 | * its input and output, and finally, lays it to rest. | ||
| 1826 | */ | ||
| 1599 | static void __attribute__((noreturn)) run_guest(void) | 1827 | static void __attribute__((noreturn)) run_guest(void) |
| 1600 | { | 1828 | { |
| 1601 | for (;;) { | 1829 | for (;;) { |
| @@ -1630,7 +1858,7 @@ static void __attribute__((noreturn)) run_guest(void) | |||
| 1630 | * | 1858 | * |
| 1631 | * Are you ready? Take a deep breath and join me in the core of the Host, in | 1859 | * Are you ready? Take a deep breath and join me in the core of the Host, in |
| 1632 | * "make Host". | 1860 | * "make Host". |
| 1633 | :*/ | 1861 | :*/ |
| 1634 | 1862 | ||
| 1635 | static struct option opts[] = { | 1863 | static struct option opts[] = { |
| 1636 | { "verbose", 0, NULL, 'v' }, | 1864 | { "verbose", 0, NULL, 'v' }, |
| @@ -1651,8 +1879,7 @@ static void usage(void) | |||
| 1651 | /*L:105 The main routine is where the real work begins: */ | 1879 | /*L:105 The main routine is where the real work begins: */ |
| 1652 | int main(int argc, char *argv[]) | 1880 | int main(int argc, char *argv[]) |
| 1653 | { | 1881 | { |
| 1654 | /* Memory, top-level pagetable, code startpoint and size of the | 1882 | /* Memory, code startpoint and size of the (optional) initrd. */ |
| 1655 | * (optional) initrd. */ | ||
| 1656 | unsigned long mem = 0, start, initrd_size = 0; | 1883 | unsigned long mem = 0, start, initrd_size = 0; |
| 1657 | /* Two temporaries. */ | 1884 | /* Two temporaries. */ |
| 1658 | int i, c; | 1885 | int i, c; |
| @@ -1664,24 +1891,32 @@ int main(int argc, char *argv[]) | |||
| 1664 | /* Save the args: we "reboot" by execing ourselves again. */ | 1891 | /* Save the args: we "reboot" by execing ourselves again. */ |
| 1665 | main_args = argv; | 1892 | main_args = argv; |
| 1666 | 1893 | ||
| 1667 | /* First we initialize the device list. We keep a pointer to the last | 1894 | /* |
| 1895 | * First we initialize the device list. We keep a pointer to the last | ||
| 1668 | * device, and the next interrupt number to use for devices (1: | 1896 | * device, and the next interrupt number to use for devices (1: |
| 1669 | * remember that 0 is used by the timer). */ | 1897 | * remember that 0 is used by the timer). |
| 1898 | */ | ||
| 1670 | devices.lastdev = NULL; | 1899 | devices.lastdev = NULL; |
| 1671 | devices.next_irq = 1; | 1900 | devices.next_irq = 1; |
| 1672 | 1901 | ||
| 1902 | /* We're CPU 0. In fact, that's the only CPU possible right now. */ | ||
| 1673 | cpu_id = 0; | 1903 | cpu_id = 0; |
| 1674 | /* We need to know how much memory so we can set up the device | 1904 | |
| 1905 | /* | ||
| 1906 | * We need to know how much memory so we can set up the device | ||
| 1675 | * descriptor and memory pages for the devices as we parse the command | 1907 | * descriptor and memory pages for the devices as we parse the command |
| 1676 | * line. So we quickly look through the arguments to find the amount | 1908 | * line. So we quickly look through the arguments to find the amount |
| 1677 | * of memory now. */ | 1909 | * of memory now. |
| 1910 | */ | ||
| 1678 | for (i = 1; i < argc; i++) { | 1911 | for (i = 1; i < argc; i++) { |
| 1679 | if (argv[i][0] != '-') { | 1912 | if (argv[i][0] != '-') { |
| 1680 | mem = atoi(argv[i]) * 1024 * 1024; | 1913 | mem = atoi(argv[i]) * 1024 * 1024; |
| 1681 | /* We start by mapping anonymous pages over all of | 1914 | /* |
| 1915 | * We start by mapping anonymous pages over all of | ||
| 1682 | * guest-physical memory range. This fills it with 0, | 1916 | * guest-physical memory range. This fills it with 0, |
| 1683 | * and ensures that the Guest won't be killed when it | 1917 | * and ensures that the Guest won't be killed when it |
| 1684 | * tries to access it. */ | 1918 | * tries to access it. |
| 1919 | */ | ||
| 1685 | guest_base = map_zeroed_pages(mem / getpagesize() | 1920 | guest_base = map_zeroed_pages(mem / getpagesize() |
| 1686 | + DEVICE_PAGES); | 1921 | + DEVICE_PAGES); |
| 1687 | guest_limit = mem; | 1922 | guest_limit = mem; |
| @@ -1714,8 +1949,10 @@ int main(int argc, char *argv[]) | |||
| 1714 | usage(); | 1949 | usage(); |
| 1715 | } | 1950 | } |
| 1716 | } | 1951 | } |
| 1717 | /* After the other arguments we expect memory and kernel image name, | 1952 | /* |
| 1718 | * followed by command line arguments for the kernel. */ | 1953 | * After the other arguments we expect memory and kernel image name, |
| 1954 | * followed by command line arguments for the kernel. | ||
| 1955 | */ | ||
| 1719 | if (optind + 2 > argc) | 1956 | if (optind + 2 > argc) |
| 1720 | usage(); | 1957 | usage(); |
| 1721 | 1958 | ||
| @@ -1733,20 +1970,26 @@ int main(int argc, char *argv[]) | |||
| 1733 | /* Map the initrd image if requested (at top of physical memory) */ | 1970 | /* Map the initrd image if requested (at top of physical memory) */ |
| 1734 | if (initrd_name) { | 1971 | if (initrd_name) { |
| 1735 | initrd_size = load_initrd(initrd_name, mem); | 1972 | initrd_size = load_initrd(initrd_name, mem); |
| 1736 | /* These are the location in the Linux boot header where the | 1973 | /* |
| 1737 | * start and size of the initrd are expected to be found. */ | 1974 | * These are the location in the Linux boot header where the |
| 1975 | * start and size of the initrd are expected to be found. | ||
| 1976 | */ | ||
| 1738 | boot->hdr.ramdisk_image = mem - initrd_size; | 1977 | boot->hdr.ramdisk_image = mem - initrd_size; |
| 1739 | boot->hdr.ramdisk_size = initrd_size; | 1978 | boot->hdr.ramdisk_size = initrd_size; |
| 1740 | /* The bootloader type 0xFF means "unknown"; that's OK. */ | 1979 | /* The bootloader type 0xFF means "unknown"; that's OK. */ |
| 1741 | boot->hdr.type_of_loader = 0xFF; | 1980 | boot->hdr.type_of_loader = 0xFF; |
| 1742 | } | 1981 | } |
| 1743 | 1982 | ||
| 1744 | /* The Linux boot header contains an "E820" memory map: ours is a | 1983 | /* |
| 1745 | * simple, single region. */ | 1984 | * The Linux boot header contains an "E820" memory map: ours is a |
| 1985 | * simple, single region. | ||
| 1986 | */ | ||
| 1746 | boot->e820_entries = 1; | 1987 | boot->e820_entries = 1; |
| 1747 | boot->e820_map[0] = ((struct e820entry) { 0, mem, E820_RAM }); | 1988 | boot->e820_map[0] = ((struct e820entry) { 0, mem, E820_RAM }); |
| 1748 | /* The boot header contains a command line pointer: we put the command | 1989 | /* |
| 1749 | * line after the boot header. */ | 1990 | * The boot header contains a command line pointer: we put the command |
| 1991 | * line after the boot header. | ||
| 1992 | */ | ||
| 1750 | boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1); | 1993 | boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1); |
| 1751 | /* We use a simple helper to copy the arguments separated by spaces. */ | 1994 | /* We use a simple helper to copy the arguments separated by spaces. */ |
| 1752 | concat((char *)(boot + 1), argv+optind+2); | 1995 | concat((char *)(boot + 1), argv+optind+2); |
| @@ -1760,11 +2003,13 @@ int main(int argc, char *argv[]) | |||
| 1760 | /* Tell the entry path not to try to reload segment registers. */ | 2003 | /* Tell the entry path not to try to reload segment registers. */ |
| 1761 | boot->hdr.loadflags |= KEEP_SEGMENTS; | 2004 | boot->hdr.loadflags |= KEEP_SEGMENTS; |
| 1762 | 2005 | ||
| 1763 | /* We tell the kernel to initialize the Guest: this returns the open | 2006 | /* |
| 1764 | * /dev/lguest file descriptor. */ | 2007 | * We tell the kernel to initialize the Guest: this returns the open |
| 2008 | * /dev/lguest file descriptor. | ||
| 2009 | */ | ||
| 1765 | tell_kernel(start); | 2010 | tell_kernel(start); |
| 1766 | 2011 | ||
| 1767 | /* Ensure that we terminate if a child dies. */ | 2012 | /* Ensure that we terminate if a device-servicing child dies. */ |
| 1768 | signal(SIGCHLD, kill_launcher); | 2013 | signal(SIGCHLD, kill_launcher); |
| 1769 | 2014 | ||
| 1770 | /* If we exit via err(), this kills all the threads, restores tty. */ | 2015 | /* If we exit via err(), this kills all the threads, restores tty. */ |
