diff options
| -rw-r--r-- | Documentation/lguest/lguest.c | 1143 |
1 files changed, 681 insertions, 462 deletions
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c index 32c2eaf94c4d..7418f852e40c 100644 --- a/Documentation/lguest/lguest.c +++ b/Documentation/lguest/lguest.c | |||
| @@ -32,7 +32,9 @@ | |||
| 32 | #include <termios.h> | 32 | #include <termios.h> |
| 33 | #include <getopt.h> | 33 | #include <getopt.h> |
| 34 | #include <zlib.h> | 34 | #include <zlib.h> |
| 35 | /*L:110 We can ignore the 28 include files we need for this program, but I do | 35 | #include <assert.h> |
| 36 | #include <sched.h> | ||
| 37 | /*L:110 We can ignore the 30 include files we need for this program, but I do | ||
| 36 | * want to draw attention to the use of kernel-style types. | 38 | * want to draw attention to the use of kernel-style types. |
| 37 | * | 39 | * |
| 38 | * As Linus said, "C is a Spartan language, and so should your naming be." I | 40 | * As Linus said, "C is a Spartan language, and so should your naming be." I |
| @@ -44,6 +46,12 @@ typedef uint32_t u32; | |||
| 44 | typedef uint16_t u16; | 46 | typedef uint16_t u16; |
| 45 | typedef uint8_t u8; | 47 | typedef uint8_t u8; |
| 46 | #include "linux/lguest_launcher.h" | 48 | #include "linux/lguest_launcher.h" |
| 49 | #include "linux/pci_ids.h" | ||
| 50 | #include "linux/virtio_config.h" | ||
| 51 | #include "linux/virtio_net.h" | ||
| 52 | #include "linux/virtio_blk.h" | ||
| 53 | #include "linux/virtio_console.h" | ||
| 54 | #include "linux/virtio_ring.h" | ||
| 47 | #include "asm-x86/e820.h" | 55 | #include "asm-x86/e820.h" |
| 48 | /*:*/ | 56 | /*:*/ |
| 49 | 57 | ||
| @@ -55,6 +63,8 @@ typedef uint8_t u8; | |||
| 55 | #endif | 63 | #endif |
| 56 | /* We can have up to 256 pages for devices. */ | 64 | /* We can have up to 256 pages for devices. */ |
| 57 | #define DEVICE_PAGES 256 | 65 | #define DEVICE_PAGES 256 |
| 66 | /* This fits nicely in a single 4096-byte page. */ | ||
| 67 | #define VIRTQUEUE_NUM 127 | ||
| 58 | 68 | ||
| 59 | /*L:120 verbose is both a global flag and a macro. The C preprocessor allows | 69 | /*L:120 verbose is both a global flag and a macro. The C preprocessor allows |
| 60 | * this, and although I wouldn't recommend it, it works quite nicely here. */ | 70 | * this, and although I wouldn't recommend it, it works quite nicely here. */ |
| @@ -78,8 +88,17 @@ struct device_list | |||
| 78 | fd_set infds; | 88 | fd_set infds; |
| 79 | int max_infd; | 89 | int max_infd; |
| 80 | 90 | ||
| 91 | /* Counter to assign interrupt numbers. */ | ||
| 92 | unsigned int next_irq; | ||
| 93 | |||
| 94 | /* Counter to print out convenient device numbers. */ | ||
| 95 | unsigned int device_num; | ||
| 96 | |||
| 81 | /* The descriptor page for the devices. */ | 97 | /* The descriptor page for the devices. */ |
| 82 | struct lguest_device_desc *descs; | 98 | u8 *descpage; |
| 99 | |||
| 100 | /* The tail of the last descriptor. */ | ||
| 101 | unsigned int desc_used; | ||
| 83 | 102 | ||
| 84 | /* A single linked list of devices. */ | 103 | /* A single linked list of devices. */ |
| 85 | struct device *dev; | 104 | struct device *dev; |
| @@ -87,31 +106,88 @@ struct device_list | |||
| 87 | struct device **lastdev; | 106 | struct device **lastdev; |
| 88 | }; | 107 | }; |
| 89 | 108 | ||
| 109 | /* The list of Guest devices, based on command line arguments. */ | ||
| 110 | static struct device_list devices; | ||
| 111 | |||
| 90 | /* The device structure describes a single device. */ | 112 | /* The device structure describes a single device. */ |
| 91 | struct device | 113 | struct device |
| 92 | { | 114 | { |
| 93 | /* The linked-list pointer. */ | 115 | /* The linked-list pointer. */ |
| 94 | struct device *next; | 116 | struct device *next; |
| 95 | /* The descriptor for this device, as mapped into the Guest. */ | 117 | |
| 118 | /* The this device's descriptor, as mapped into the Guest. */ | ||
| 96 | struct lguest_device_desc *desc; | 119 | struct lguest_device_desc *desc; |
| 97 | /* The memory page(s) of this device, if any. Also mapped in Guest. */ | 120 | |
| 98 | void *mem; | 121 | /* The name of this device, for --verbose. */ |
| 122 | const char *name; | ||
| 99 | 123 | ||
| 100 | /* If handle_input is set, it wants to be called when this file | 124 | /* If handle_input is set, it wants to be called when this file |
| 101 | * descriptor is ready. */ | 125 | * descriptor is ready. */ |
| 102 | int fd; | 126 | int fd; |
| 103 | bool (*handle_input)(int fd, struct device *me); | 127 | bool (*handle_input)(int fd, struct device *me); |
| 104 | 128 | ||
| 105 | /* If handle_output is set, it wants to be called when the Guest sends | 129 | /* Any queues attached to this device */ |
| 106 | * DMA to this key. */ | 130 | struct virtqueue *vq; |
| 107 | unsigned long watch_key; | ||
| 108 | u32 (*handle_output)(int fd, const struct iovec *iov, | ||
| 109 | unsigned int num, struct device *me); | ||
| 110 | 131 | ||
| 111 | /* Device-specific data. */ | 132 | /* Device-specific data. */ |
| 112 | void *priv; | 133 | void *priv; |
| 113 | }; | 134 | }; |
| 114 | 135 | ||
| 136 | /* The virtqueue structure describes a queue attached to a device. */ | ||
| 137 | struct virtqueue | ||
| 138 | { | ||
| 139 | struct virtqueue *next; | ||
| 140 | |||
| 141 | /* Which device owns me. */ | ||
| 142 | struct device *dev; | ||
| 143 | |||
| 144 | /* The configuration for this queue. */ | ||
| 145 | struct lguest_vqconfig config; | ||
| 146 | |||
| 147 | /* The actual ring of buffers. */ | ||
| 148 | struct vring vring; | ||
| 149 | |||
| 150 | /* Last available index we saw. */ | ||
| 151 | u16 last_avail_idx; | ||
| 152 | |||
| 153 | /* The routine to call when the Guest pings us. */ | ||
| 154 | void (*handle_output)(int fd, struct virtqueue *me); | ||
| 155 | }; | ||
| 156 | |||
| 157 | /* Since guest is UP and we don't run at the same time, we don't need barriers. | ||
| 158 | * But I include them in the code in case others copy it. */ | ||
| 159 | #define wmb() | ||
| 160 | |||
| 161 | /* Convert an iovec element to the given type. | ||
| 162 | * | ||
| 163 | * This is a fairly ugly trick: we need to know the size of the type and | ||
| 164 | * alignment requirement to check the pointer is kosher. It's also nice to | ||
| 165 | * have the name of the type in case we report failure. | ||
| 166 | * | ||
| 167 | * Typing those three things all the time is cumbersome and error prone, so we | ||
| 168 | * have a macro which sets them all up and passes to the real function. */ | ||
| 169 | #define convert(iov, type) \ | ||
| 170 | ((type *)_convert((iov), sizeof(type), __alignof__(type), #type)) | ||
| 171 | |||
| 172 | static void *_convert(struct iovec *iov, size_t size, size_t align, | ||
| 173 | const char *name) | ||
| 174 | { | ||
| 175 | if (iov->iov_len != size) | ||
| 176 | errx(1, "Bad iovec size %zu for %s", iov->iov_len, name); | ||
| 177 | if ((unsigned long)iov->iov_base % align != 0) | ||
| 178 | errx(1, "Bad alignment %p for %s", iov->iov_base, name); | ||
| 179 | return iov->iov_base; | ||
| 180 | } | ||
| 181 | |||
| 182 | /* The virtio configuration space is defined to be little-endian. x86 is | ||
| 183 | * little-endian too, but it's nice to be explicit so we have these helpers. */ | ||
| 184 | #define cpu_to_le16(v16) (v16) | ||
| 185 | #define cpu_to_le32(v32) (v32) | ||
| 186 | #define cpu_to_le64(v64) (v64) | ||
| 187 | #define le16_to_cpu(v16) (v16) | ||
| 188 | #define le32_to_cpu(v32) (v32) | ||
| 189 | #define le64_to_cpu(v32) (v64) | ||
| 190 | |||
| 115 | /*L:100 The Launcher code itself takes us out into userspace, that scary place | 191 | /*L:100 The Launcher code itself takes us out into userspace, that scary place |
| 116 | * where pointers run wild and free! Unfortunately, like most userspace | 192 | * where pointers run wild and free! Unfortunately, like most userspace |
| 117 | * programs, it's quite boring (which is why everyone likes to hack on the | 193 | * programs, it's quite boring (which is why everyone likes to hack on the |
| @@ -486,11 +562,11 @@ static int tell_kernel(unsigned long pgdir, unsigned long start) | |||
| 486 | } | 562 | } |
| 487 | /*:*/ | 563 | /*:*/ |
| 488 | 564 | ||
| 489 | static void set_fd(int fd, struct device_list *devices) | 565 | static void add_device_fd(int fd) |
| 490 | { | 566 | { |
| 491 | FD_SET(fd, &devices->infds); | 567 | FD_SET(fd, &devices.infds); |
| 492 | if (fd > devices->max_infd) | 568 | if (fd > devices.max_infd) |
| 493 | devices->max_infd = fd; | 569 | devices.max_infd = fd; |
| 494 | } | 570 | } |
| 495 | 571 | ||
| 496 | /*L:200 | 572 | /*L:200 |
| @@ -508,18 +584,18 @@ static void set_fd(int fd, struct device_list *devices) | |||
| 508 | * | 584 | * |
| 509 | * This, of course, is merely a different *kind* of icky. | 585 | * This, of course, is merely a different *kind* of icky. |
| 510 | */ | 586 | */ |
| 511 | static void wake_parent(int pipefd, int lguest_fd, struct device_list *devices) | 587 | static void wake_parent(int pipefd, int lguest_fd) |
| 512 | { | 588 | { |
| 513 | /* Add the pipe from the Launcher to the fdset in the device_list, so | 589 | /* Add the pipe from the Launcher to the fdset in the device_list, so |
| 514 | * we watch it, too. */ | 590 | * we watch it, too. */ |
| 515 | set_fd(pipefd, devices); | 591 | add_device_fd(pipefd); |
| 516 | 592 | ||
| 517 | for (;;) { | 593 | for (;;) { |
| 518 | fd_set rfds = devices->infds; | 594 | fd_set rfds = devices.infds; |
| 519 | unsigned long args[] = { LHREQ_BREAK, 1 }; | 595 | unsigned long args[] = { LHREQ_BREAK, 1 }; |
| 520 | 596 | ||
| 521 | /* Wait until input is ready from one of the devices. */ | 597 | /* Wait until input is ready from one of the devices. */ |
| 522 | select(devices->max_infd+1, &rfds, NULL, NULL, NULL); | 598 | select(devices.max_infd+1, &rfds, NULL, NULL, NULL); |
| 523 | /* Is it a message from the Launcher? */ | 599 | /* Is it a message from the Launcher? */ |
| 524 | if (FD_ISSET(pipefd, &rfds)) { | 600 | if (FD_ISSET(pipefd, &rfds)) { |
| 525 | int ignorefd; | 601 | int ignorefd; |
| @@ -530,14 +606,14 @@ static void wake_parent(int pipefd, int lguest_fd, struct device_list *devices) | |||
| 530 | /* Otherwise it's telling us there's a problem with one | 606 | /* Otherwise it's telling us there's a problem with one |
| 531 | * of the devices, and we should ignore that file | 607 | * of the devices, and we should ignore that file |
| 532 | * descriptor from now on. */ | 608 | * descriptor from now on. */ |
| 533 | FD_CLR(ignorefd, &devices->infds); | 609 | FD_CLR(ignorefd, &devices.infds); |
| 534 | } else /* Send LHREQ_BREAK command. */ | 610 | } else /* Send LHREQ_BREAK command. */ |
| 535 | write(lguest_fd, args, sizeof(args)); | 611 | write(lguest_fd, args, sizeof(args)); |
| 536 | } | 612 | } |
| 537 | } | 613 | } |
| 538 | 614 | ||
| 539 | /* This routine just sets up a pipe to the Waker process. */ | 615 | /* This routine just sets up a pipe to the Waker process. */ |
| 540 | static int setup_waker(int lguest_fd, struct device_list *device_list) | 616 | static int setup_waker(int lguest_fd) |
| 541 | { | 617 | { |
| 542 | int pipefd[2], child; | 618 | int pipefd[2], child; |
| 543 | 619 | ||
| @@ -551,7 +627,7 @@ static int setup_waker(int lguest_fd, struct device_list *device_list) | |||
| 551 | if (child == 0) { | 627 | if (child == 0) { |
| 552 | /* Close the "writing" end of our copy of the pipe */ | 628 | /* Close the "writing" end of our copy of the pipe */ |
| 553 | close(pipefd[1]); | 629 | close(pipefd[1]); |
| 554 | wake_parent(pipefd[0], lguest_fd, device_list); | 630 | wake_parent(pipefd[0], lguest_fd); |
| 555 | } | 631 | } |
| 556 | /* Close the reading end of our copy of the pipe. */ | 632 | /* Close the reading end of our copy of the pipe. */ |
| 557 | close(pipefd[0]); | 633 | close(pipefd[0]); |
| @@ -574,7 +650,7 @@ static void *_check_pointer(unsigned long addr, unsigned int size, | |||
| 574 | /* We have to separately check addr and addr+size, because size could | 650 | /* We have to separately check addr and addr+size, because size could |
| 575 | * be huge and addr + size might wrap around. */ | 651 | * be huge and addr + size might wrap around. */ |
| 576 | if (addr >= guest_limit || addr + size >= guest_limit) | 652 | if (addr >= guest_limit || addr + size >= guest_limit) |
| 577 | errx(1, "%s:%i: Invalid address %li", __FILE__, line, addr); | 653 | errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr); |
| 578 | /* We return a pointer for the caller's convenience, now we know it's | 654 | /* We return a pointer for the caller's convenience, now we know it's |
| 579 | * safe to use. */ | 655 | * safe to use. */ |
| 580 | return from_guest_phys(addr); | 656 | return from_guest_phys(addr); |
| @@ -582,74 +658,131 @@ static void *_check_pointer(unsigned long addr, unsigned int size, | |||
| 582 | /* A macro which transparently hands the line number to the real function. */ | 658 | /* A macro which transparently hands the line number to the real function. */ |
| 583 | #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) | 659 | #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) |
| 584 | 660 | ||
| 585 | /* The Guest has given us the address of a "struct lguest_dma". We check it's | 661 | /* This simply sets up an iovec array where we can put data to be discarded. |
| 586 | * OK and convert it to an iovec (which is a simple array of ptr/size | 662 | * This happens when the Guest doesn't want or can't handle the input: we have |
| 587 | * pairs). */ | 663 | * to get rid of it somewhere, and if we bury it in the ceiling space it will |
| 588 | static u32 *dma2iov(unsigned long dma, struct iovec iov[], unsigned *num) | 664 | * start to smell after a week. */ |
| 665 | static void discard_iovec(struct iovec *iov, unsigned int *num) | ||
| 589 | { | 666 | { |
| 590 | unsigned int i; | 667 | static char discard_buf[1024]; |
| 591 | struct lguest_dma *udma; | 668 | *num = 1; |
| 592 | 669 | iov->iov_base = discard_buf; | |
| 593 | /* First we make sure that the array memory itself is valid. */ | 670 | iov->iov_len = sizeof(discard_buf); |
| 594 | udma = check_pointer(dma, sizeof(*udma)); | 671 | } |
| 595 | /* Now we check each element */ | ||
| 596 | for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) { | ||
| 597 | /* A zero length ends the array. */ | ||
| 598 | if (!udma->len[i]) | ||
| 599 | break; | ||
| 600 | 672 | ||
| 601 | iov[i].iov_base = check_pointer(udma->addr[i], udma->len[i]); | 673 | /* This function returns the next descriptor in the chain, or vq->vring.num. */ |
| 602 | iov[i].iov_len = udma->len[i]; | 674 | static unsigned next_desc(struct virtqueue *vq, unsigned int i) |
| 603 | } | 675 | { |
| 604 | *num = i; | 676 | unsigned int next; |
| 677 | |||
| 678 | /* If this descriptor says it doesn't chain, we're done. */ | ||
| 679 | if (!(vq->vring.desc[i].flags & VRING_DESC_F_NEXT)) | ||
| 680 | return vq->vring.num; | ||
| 681 | |||
| 682 | /* Check they're not leading us off end of descriptors. */ | ||
| 683 | next = vq->vring.desc[i].next; | ||
| 684 | /* Make sure compiler knows to grab that: we don't want it changing! */ | ||
| 685 | wmb(); | ||
| 686 | |||
| 687 | if (next >= vq->vring.num) | ||
| 688 | errx(1, "Desc next is %u", next); | ||
| 689 | |||
| 690 | return next; | ||
| 691 | } | ||
| 692 | |||
| 693 | /* This looks in the virtqueue and for the first available buffer, and converts | ||
| 694 | * it to an iovec for convenient access. Since descriptors consist of some | ||
| 695 | * number of output then some number of input descriptors, it's actually two | ||
| 696 | * iovecs, but we pack them into one and note how many of each there were. | ||
| 697 | * | ||
| 698 | * This function returns the descriptor number found, or vq->vring.num (which | ||
| 699 | * is never a valid descriptor number) if none was found. */ | ||
| 700 | static unsigned get_vq_desc(struct virtqueue *vq, | ||
| 701 | struct iovec iov[], | ||
| 702 | unsigned int *out_num, unsigned int *in_num) | ||
| 703 | { | ||
| 704 | unsigned int i, head; | ||
| 705 | |||
| 706 | /* Check it isn't doing very strange things with descriptor numbers. */ | ||
| 707 | if ((u16)(vq->vring.avail->idx - vq->last_avail_idx) > vq->vring.num) | ||
| 708 | errx(1, "Guest moved used index from %u to %u", | ||
| 709 | vq->last_avail_idx, vq->vring.avail->idx); | ||
| 710 | |||
| 711 | /* If there's nothing new since last we looked, return invalid. */ | ||
| 712 | if (vq->vring.avail->idx == vq->last_avail_idx) | ||
| 713 | return vq->vring.num; | ||
| 714 | |||
| 715 | /* Grab the next descriptor number they're advertising, and increment | ||
| 716 | * the index we've seen. */ | ||
| 717 | head = vq->vring.avail->ring[vq->last_avail_idx++ % vq->vring.num]; | ||
| 718 | |||
| 719 | /* If their number is silly, that's a fatal mistake. */ | ||
| 720 | if (head >= vq->vring.num) | ||
| 721 | errx(1, "Guest says index %u is available", head); | ||
| 722 | |||
| 723 | /* When we start there are none of either input nor output. */ | ||
| 724 | *out_num = *in_num = 0; | ||
| 725 | |||
| 726 | i = head; | ||
| 727 | do { | ||
| 728 | /* Grab the first descriptor, and check it's OK. */ | ||
| 729 | iov[*out_num + *in_num].iov_len = vq->vring.desc[i].len; | ||
| 730 | iov[*out_num + *in_num].iov_base | ||
| 731 | = check_pointer(vq->vring.desc[i].addr, | ||
| 732 | vq->vring.desc[i].len); | ||
| 733 | /* If this is an input descriptor, increment that count. */ | ||
| 734 | if (vq->vring.desc[i].flags & VRING_DESC_F_WRITE) | ||
| 735 | (*in_num)++; | ||
| 736 | else { | ||
| 737 | /* If it's an output descriptor, they're all supposed | ||
| 738 | * to come before any input descriptors. */ | ||
| 739 | if (*in_num) | ||
| 740 | errx(1, "Descriptor has out after in"); | ||
| 741 | (*out_num)++; | ||
| 742 | } | ||
| 743 | |||
| 744 | /* If we've got too many, that implies a descriptor loop. */ | ||
| 745 | if (*out_num + *in_num > vq->vring.num) | ||
| 746 | errx(1, "Looped descriptor"); | ||
| 747 | } while ((i = next_desc(vq, i)) != vq->vring.num); | ||
| 605 | 748 | ||
| 606 | /* We return the pointer to where the caller should write the amount of | 749 | return head; |
| 607 | * the buffer used. */ | ||
| 608 | return &udma->used_len; | ||
| 609 | } | 750 | } |
| 610 | 751 | ||
| 611 | /* This routine gets a DMA buffer from the Guest for a given key, and converts | 752 | /* Once we've used one of their buffers, we tell them about it. We'll then |
| 612 | * it to an iovec array. It returns the interrupt the Guest wants when we're | 753 | * want to send them an interrupt, using trigger_irq(). */ |
| 613 | * finished, and a pointer to the "used_len" field to fill in. */ | 754 | static void add_used(struct virtqueue *vq, unsigned int head, int len) |
| 614 | static u32 *get_dma_buffer(int fd, void *key, | ||
| 615 | struct iovec iov[], unsigned int *num, u32 *irq) | ||
| 616 | { | 755 | { |
| 617 | unsigned long buf[] = { LHREQ_GETDMA, to_guest_phys(key) }; | 756 | struct vring_used_elem *used; |
| 618 | unsigned long udma; | 757 | |
| 619 | u32 *res; | 758 | /* Get a pointer to the next entry in the used ring. */ |
| 620 | 759 | used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num]; | |
| 621 | /* Ask the kernel for a DMA buffer corresponding to this key. */ | 760 | used->id = head; |
| 622 | udma = write(fd, buf, sizeof(buf)); | 761 | used->len = len; |
| 623 | /* They haven't registered any, or they're all used? */ | 762 | /* Make sure buffer is written before we update index. */ |
| 624 | if (udma == (unsigned long)-1) | 763 | wmb(); |
| 625 | return NULL; | 764 | vq->vring.used->idx++; |
| 626 | |||
| 627 | /* Convert it into our iovec array */ | ||
| 628 | res = dma2iov(udma, iov, num); | ||
| 629 | /* The kernel stashes irq in ->used_len to get it out to us. */ | ||
| 630 | *irq = *res; | ||
| 631 | /* Return a pointer to ((struct lguest_dma *)udma)->used_len. */ | ||
| 632 | return res; | ||
| 633 | } | 765 | } |
| 634 | 766 | ||
| 635 | /* This is a convenient routine to send the Guest an interrupt. */ | 767 | /* This actually sends the interrupt for this virtqueue */ |
| 636 | static void trigger_irq(int fd, u32 irq) | 768 | static void trigger_irq(int fd, struct virtqueue *vq) |
| 637 | { | 769 | { |
| 638 | unsigned long buf[] = { LHREQ_IRQ, irq }; | 770 | unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; |
| 771 | |||
| 772 | if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) | ||
| 773 | return; | ||
| 774 | |||
| 775 | /* Send the Guest an interrupt tell them we used something up. */ | ||
| 639 | if (write(fd, buf, sizeof(buf)) != 0) | 776 | if (write(fd, buf, sizeof(buf)) != 0) |
| 640 | err(1, "Triggering irq %i", irq); | 777 | err(1, "Triggering irq %i", vq->config.irq); |
| 641 | } | 778 | } |
| 642 | 779 | ||
| 643 | /* This simply sets up an iovec array where we can put data to be discarded. | 780 | /* And here's the combo meal deal. Supersize me! */ |
| 644 | * This happens when the Guest doesn't want or can't handle the input: we have | 781 | static void add_used_and_trigger(int fd, struct virtqueue *vq, |
| 645 | * to get rid of it somewhere, and if we bury it in the ceiling space it will | 782 | unsigned int head, int len) |
| 646 | * start to smell after a week. */ | ||
| 647 | static void discard_iovec(struct iovec *iov, unsigned int *num) | ||
| 648 | { | 783 | { |
| 649 | static char discard_buf[1024]; | 784 | add_used(vq, head, len); |
| 650 | *num = 1; | 785 | trigger_irq(fd, vq); |
| 651 | iov->iov_base = discard_buf; | ||
| 652 | iov->iov_len = sizeof(discard_buf); | ||
| 653 | } | 786 | } |
| 654 | 787 | ||
| 655 | /* Here is the input terminal setting we save, and the routine to restore them | 788 | /* Here is the input terminal setting we save, and the routine to restore them |
| @@ -672,38 +805,37 @@ struct console_abort | |||
| 672 | /* This is the routine which handles console input (ie. stdin). */ | 805 | /* This is the routine which handles console input (ie. stdin). */ |
| 673 | static bool handle_console_input(int fd, struct device *dev) | 806 | static bool handle_console_input(int fd, struct device *dev) |
| 674 | { | 807 | { |
| 675 | u32 irq = 0, *lenp; | ||
| 676 | int len; | 808 | int len; |
| 677 | unsigned int num; | 809 | unsigned int head, in_num, out_num; |
| 678 | struct iovec iov[LGUEST_MAX_DMA_SECTIONS]; | 810 | struct iovec iov[dev->vq->vring.num]; |
| 679 | struct console_abort *abort = dev->priv; | 811 | struct console_abort *abort = dev->priv; |
| 680 | 812 | ||
| 681 | /* First we get the console buffer from the Guest. The key is dev->mem | 813 | /* First we need a console buffer from the Guests's input virtqueue. */ |
| 682 | * which was set to 0 in setup_console(). */ | 814 | head = get_vq_desc(dev->vq, iov, &out_num, &in_num); |
| 683 | lenp = get_dma_buffer(fd, dev->mem, iov, &num, &irq); | 815 | if (head == dev->vq->vring.num) { |
| 684 | if (!lenp) { | 816 | /* If they're not ready for input, we warn and set up to |
| 685 | /* If it's not ready for input, warn and set up to discard. */ | 817 | * discard. */ |
| 686 | warn("console: no dma buffer!"); | 818 | warnx("console: no dma buffer!"); |
| 687 | discard_iovec(iov, &num); | 819 | discard_iovec(iov, &in_num); |
| 688 | } | 820 | } else if (out_num) |
| 821 | errx(1, "Output buffers in console in queue?"); | ||
| 689 | 822 | ||
| 690 | /* This is why we convert to iovecs: the readv() call uses them, and so | 823 | /* This is why we convert to iovecs: the readv() call uses them, and so |
| 691 | * it reads straight into the Guest's buffer. */ | 824 | * it reads straight into the Guest's buffer. */ |
| 692 | len = readv(dev->fd, iov, num); | 825 | len = readv(dev->fd, iov, in_num); |
| 693 | if (len <= 0) { | 826 | if (len <= 0) { |
| 694 | /* This implies that the console is closed, is /dev/null, or | 827 | /* This implies that the console is closed, is /dev/null, or |
| 695 | * something went terribly wrong. We still go through the rest | 828 | * something went terribly wrong. */ |
| 696 | * of the logic, though, especially the exit handling below. */ | ||
| 697 | warnx("Failed to get console input, ignoring console."); | 829 | warnx("Failed to get console input, ignoring console."); |
| 698 | len = 0; | 830 | /* Put the input terminal back and return failure (meaning, |
| 831 | * don't call us again). */ | ||
| 832 | restore_term(); | ||
| 833 | return false; | ||
| 699 | } | 834 | } |
| 700 | 835 | ||
| 701 | /* If we read the data into the Guest, fill in the length and send the | 836 | /* If we actually read the data into the Guest, tell them about it. */ |
| 702 | * interrupt. */ | 837 | if (head != dev->vq->vring.num) |
| 703 | if (lenp) { | 838 | add_used_and_trigger(fd, dev->vq, head, len); |
| 704 | *lenp = len; | ||
| 705 | trigger_irq(fd, irq); | ||
| 706 | } | ||
| 707 | 839 | ||
| 708 | /* Three ^C within one second? Exit. | 840 | /* Three ^C within one second? Exit. |
| 709 | * | 841 | * |
| @@ -732,202 +864,137 @@ static bool handle_console_input(int fd, struct device *dev) | |||
| 732 | /* Any other key resets the abort counter. */ | 864 | /* Any other key resets the abort counter. */ |
| 733 | abort->count = 0; | 865 | abort->count = 0; |
| 734 | 866 | ||
| 735 | /* Now, if we didn't read anything, put the input terminal back and | ||
| 736 | * return failure (meaning, don't call us again). */ | ||
| 737 | if (!len) { | ||
| 738 | restore_term(); | ||
| 739 | return false; | ||
| 740 | } | ||
| 741 | /* Everything went OK! */ | 867 | /* Everything went OK! */ |
| 742 | return true; | 868 | return true; |
| 743 | } | 869 | } |
| 744 | 870 | ||
| 745 | /* Handling console output is much simpler than input. */ | 871 | /* Handling output for console is simple: we just get all the output buffers |
| 746 | static u32 handle_console_output(int fd, const struct iovec *iov, | 872 | * and write them to stdout. */ |
| 747 | unsigned num, struct device*dev) | 873 | static void handle_console_output(int fd, struct virtqueue *vq) |
| 748 | { | 874 | { |
| 749 | /* Whatever the Guest sends, write it to standard output. Return the | 875 | unsigned int head, out, in; |
| 750 | * number of bytes written. */ | 876 | int len; |
| 751 | return writev(STDOUT_FILENO, iov, num); | 877 | struct iovec iov[vq->vring.num]; |
| 752 | } | 878 | |
| 753 | 879 | /* Keep getting output buffers from the Guest until we run out. */ | |
| 754 | /* Guest->Host network output is also pretty easy. */ | 880 | while ((head = get_vq_desc(vq, iov, &out, &in)) != vq->vring.num) { |
| 755 | static u32 handle_tun_output(int fd, const struct iovec *iov, | 881 | if (in) |
| 756 | unsigned num, struct device *dev) | 882 | errx(1, "Input buffers in output queue?"); |
| 757 | { | 883 | len = writev(STDOUT_FILENO, iov, out); |
| 758 | /* We put a flag in the "priv" pointer of the network device, and set | 884 | add_used_and_trigger(fd, vq, head, len); |
| 759 | * it as soon as we see output. We'll see why in handle_tun_input() */ | 885 | } |
| 760 | *(bool *)dev->priv = true; | ||
| 761 | /* Whatever packet the Guest sent us, write it out to the tun | ||
| 762 | * device. */ | ||
| 763 | return writev(dev->fd, iov, num); | ||
| 764 | } | 886 | } |
| 765 | 887 | ||
| 766 | /* This matches the peer_key() in lguest_net.c. The key for any given slot | 888 | /* Handling output for network is also simple: we get all the output buffers |
| 767 | * is the address of the network device's page plus 4 * the slot number. */ | 889 | * and write them (ignoring the first element) to this device's file descriptor |
| 768 | static unsigned long peer_offset(unsigned int peernum) | 890 | * (stdout). */ |
| 891 | static void handle_net_output(int fd, struct virtqueue *vq) | ||
| 769 | { | 892 | { |
| 770 | return 4 * peernum; | 893 | unsigned int head, out, in; |
| 894 | int len; | ||
| 895 | struct iovec iov[vq->vring.num]; | ||
| 896 | |||
| 897 | /* Keep getting output buffers from the Guest until we run out. */ | ||
| 898 | while ((head = get_vq_desc(vq, iov, &out, &in)) != vq->vring.num) { | ||
| 899 | if (in) | ||
| 900 | errx(1, "Input buffers in output queue?"); | ||
| 901 | /* Check header, but otherwise ignore it (we said we supported | ||
| 902 | * no features). */ | ||
| 903 | (void)convert(&iov[0], struct virtio_net_hdr); | ||
| 904 | len = writev(vq->dev->fd, iov+1, out-1); | ||
| 905 | add_used_and_trigger(fd, vq, head, len); | ||
| 906 | } | ||
| 771 | } | 907 | } |
| 772 | 908 | ||
| 773 | /* This is where we handle a packet coming in from the tun device */ | 909 | /* This is where we handle a packet coming in from the tun device to our |
| 910 | * Guest. */ | ||
| 774 | static bool handle_tun_input(int fd, struct device *dev) | 911 | static bool handle_tun_input(int fd, struct device *dev) |
| 775 | { | 912 | { |
| 776 | u32 irq = 0, *lenp; | 913 | unsigned int head, in_num, out_num; |
| 777 | int len; | 914 | int len; |
| 778 | unsigned num; | 915 | struct iovec iov[dev->vq->vring.num]; |
| 779 | struct iovec iov[LGUEST_MAX_DMA_SECTIONS]; | 916 | struct virtio_net_hdr *hdr; |
| 780 | 917 | ||
| 781 | /* First we get a buffer the Guest has bound to its key. */ | 918 | /* First we need a network buffer from the Guests's recv virtqueue. */ |
| 782 | lenp = get_dma_buffer(fd, dev->mem+peer_offset(NET_PEERNUM), iov, &num, | 919 | head = get_vq_desc(dev->vq, iov, &out_num, &in_num); |
| 783 | &irq); | 920 | if (head == dev->vq->vring.num) { |
| 784 | if (!lenp) { | ||
| 785 | /* Now, it's expected that if we try to send a packet too | 921 | /* Now, it's expected that if we try to send a packet too |
| 786 | * early, the Guest won't be ready yet. This is why we set a | 922 | * early, the Guest won't be ready yet. Wait until the device |
| 787 | * flag when the Guest sends its first packet. If it's sent a | 923 | * status says it's ready. */ |
| 788 | * packet we assume it should be ready to receive them. | 924 | /* FIXME: Actually want DRIVER_ACTIVE here. */ |
| 789 | * | 925 | if (dev->desc->status & VIRTIO_CONFIG_S_DRIVER_OK) |
| 790 | * Actually, this is what the status bits in the descriptor are | ||
| 791 | * for: we should *use* them. FIXME! */ | ||
| 792 | if (*(bool *)dev->priv) | ||
| 793 | warn("network: no dma buffer!"); | 926 | warn("network: no dma buffer!"); |
| 794 | discard_iovec(iov, &num); | 927 | discard_iovec(iov, &in_num); |
| 795 | } | 928 | } else if (out_num) |
| 929 | errx(1, "Output buffers in network recv queue?"); | ||
| 930 | |||
| 931 | /* First element is the header: we set it to 0 (no features). */ | ||
| 932 | hdr = convert(&iov[0], struct virtio_net_hdr); | ||
| 933 | hdr->flags = 0; | ||
| 934 | hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; | ||
| 796 | 935 | ||
| 797 | /* Read the packet from the device directly into the Guest's buffer. */ | 936 | /* Read the packet from the device directly into the Guest's buffer. */ |
| 798 | len = readv(dev->fd, iov, num); | 937 | len = readv(dev->fd, iov+1, in_num-1); |
| 799 | if (len <= 0) | 938 | if (len <= 0) |
| 800 | err(1, "reading network"); | 939 | err(1, "reading network"); |
| 801 | 940 | ||
| 802 | /* Write the used_len, and trigger the interrupt for the Guest */ | 941 | /* If we actually read the data into the Guest, tell them about it. */ |
| 803 | if (lenp) { | 942 | if (head != dev->vq->vring.num) |
| 804 | *lenp = len; | 943 | add_used_and_trigger(fd, dev->vq, head, sizeof(*hdr) + len); |
| 805 | trigger_irq(fd, irq); | 944 | |
| 806 | } | ||
| 807 | verbose("tun input packet len %i [%02x %02x] (%s)\n", len, | 945 | verbose("tun input packet len %i [%02x %02x] (%s)\n", len, |
| 808 | ((u8 *)iov[0].iov_base)[0], ((u8 *)iov[0].iov_base)[1], | 946 | ((u8 *)iov[1].iov_base)[0], ((u8 *)iov[1].iov_base)[1], |
| 809 | lenp ? "sent" : "discarded"); | 947 | head != dev->vq->vring.num ? "sent" : "discarded"); |
| 948 | |||
| 810 | /* All good. */ | 949 | /* All good. */ |
| 811 | return true; | 950 | return true; |
| 812 | } | 951 | } |
| 813 | 952 | ||
| 814 | /* The last device handling routine is block output: the Guest has sent a DMA | 953 | /* This is the generic routine we call when the Guest uses LHCALL_NOTIFY. */ |
| 815 | * to the block device. It will have placed the command it wants in the | 954 | static void handle_output(int fd, unsigned long addr) |
| 816 | * "struct lguest_block_page". */ | ||
| 817 | static u32 handle_block_output(int fd, const struct iovec *iov, | ||
| 818 | unsigned num, struct device *dev) | ||
| 819 | { | ||
| 820 | struct lguest_block_page *p = dev->mem; | ||
| 821 | u32 irq, *lenp; | ||
| 822 | unsigned int len, reply_num; | ||
| 823 | struct iovec reply[LGUEST_MAX_DMA_SECTIONS]; | ||
| 824 | off64_t device_len, off = (off64_t)p->sector * 512; | ||
| 825 | |||
| 826 | /* First we extract the device length from the dev->priv pointer. */ | ||
| 827 | device_len = *(off64_t *)dev->priv; | ||
| 828 | |||
| 829 | /* We first check that the read or write is within the length of the | ||
| 830 | * block file. */ | ||
| 831 | if (off >= device_len) | ||
| 832 | errx(1, "Bad offset %llu vs %llu", off, device_len); | ||
| 833 | /* Move to the right location in the block file. This shouldn't fail, | ||
| 834 | * but best to check. */ | ||
| 835 | if (lseek64(dev->fd, off, SEEK_SET) != off) | ||
| 836 | err(1, "Bad seek to sector %i", p->sector); | ||
| 837 | |||
| 838 | verbose("Block: %s at offset %llu\n", p->type ? "WRITE" : "READ", off); | ||
| 839 | |||
| 840 | /* They were supposed to bind a reply buffer at key equal to the start | ||
| 841 | * of the block device memory. We need this to tell them when the | ||
| 842 | * request is finished. */ | ||
| 843 | lenp = get_dma_buffer(fd, dev->mem, reply, &reply_num, &irq); | ||
| 844 | if (!lenp) | ||
| 845 | err(1, "Block request didn't give us a dma buffer"); | ||
| 846 | |||
| 847 | if (p->type) { | ||
| 848 | /* A write request. The DMA they sent contained the data, so | ||
| 849 | * write it out. */ | ||
| 850 | len = writev(dev->fd, iov, num); | ||
| 851 | /* Grr... Now we know how long the "struct lguest_dma" they | ||
| 852 | * sent was, we make sure they didn't try to write over the end | ||
| 853 | * of the block file (possibly extending it). */ | ||
| 854 | if (off + len > device_len) { | ||
| 855 | /* Trim it back to the correct length */ | ||
| 856 | ftruncate64(dev->fd, device_len); | ||
| 857 | /* Die, bad Guest, die. */ | ||
| 858 | errx(1, "Write past end %llu+%u", off, len); | ||
| 859 | } | ||
| 860 | /* The reply length is 0: we just send back an empty DMA to | ||
| 861 | * interrupt them and tell them the write is finished. */ | ||
| 862 | *lenp = 0; | ||
| 863 | } else { | ||
| 864 | /* A read request. They sent an empty DMA to start the | ||
| 865 | * request, and we put the read contents into the reply | ||
| 866 | * buffer. */ | ||
| 867 | len = readv(dev->fd, reply, reply_num); | ||
| 868 | *lenp = len; | ||
| 869 | } | ||
| 870 | |||
| 871 | /* The result is 1 (done), 2 if there was an error (short read or | ||
| 872 | * write). */ | ||
| 873 | p->result = 1 + (p->bytes != len); | ||
| 874 | /* Now tell them we've used their reply buffer. */ | ||
| 875 | trigger_irq(fd, irq); | ||
| 876 | |||
| 877 | /* We're supposed to return the number of bytes of the output buffer we | ||
| 878 | * used. But the block device uses the "result" field instead, so we | ||
| 879 | * don't bother. */ | ||
| 880 | return 0; | ||
| 881 | } | ||
| 882 | |||
| 883 | /* This is the generic routine we call when the Guest sends some DMA out. */ | ||
| 884 | static void handle_output(int fd, unsigned long dma, unsigned long key, | ||
| 885 | struct device_list *devices) | ||
| 886 | { | 955 | { |
| 887 | struct device *i; | 956 | struct device *i; |
| 888 | u32 *lenp; | 957 | struct virtqueue *vq; |
| 889 | struct iovec iov[LGUEST_MAX_DMA_SECTIONS]; | 958 | |
| 890 | unsigned num = 0; | 959 | /* Check each virtqueue. */ |
| 891 | 960 | for (i = devices.dev; i; i = i->next) { | |
| 892 | /* Convert the "struct lguest_dma" they're sending to a "struct | 961 | for (vq = i->vq; vq; vq = vq->next) { |
| 893 | * iovec". */ | 962 | if (vq->config.pfn == addr/getpagesize() |
| 894 | lenp = dma2iov(dma, iov, &num); | 963 | && vq->handle_output) { |
| 895 | 964 | verbose("Output to %s\n", vq->dev->name); | |
| 896 | /* Check each device: if they expect output to this key, tell them to | 965 | vq->handle_output(fd, vq); |
| 897 | * handle it. */ | 966 | return; |
| 898 | for (i = devices->dev; i; i = i->next) { | 967 | } |
| 899 | if (i->handle_output && key == i->watch_key) { | ||
| 900 | /* We write the result straight into the used_len field | ||
| 901 | * for them. */ | ||
| 902 | *lenp = i->handle_output(fd, iov, num, i); | ||
| 903 | return; | ||
| 904 | } | 968 | } |
| 905 | } | 969 | } |
| 906 | 970 | ||
| 907 | /* This can happen: the kernel sends any SEND_DMA which doesn't match | 971 | /* Early console write is done using notify on a nul-terminated string |
| 908 | * another Guest to us. It could be that another Guest just left a | 972 | * in Guest memory. */ |
| 909 | * network, for example. But it's unusual. */ | 973 | if (addr >= guest_limit) |
| 910 | warnx("Pending dma %p, key %p", (void *)dma, (void *)key); | 974 | errx(1, "Bad NOTIFY %#lx", addr); |
| 975 | |||
| 976 | write(STDOUT_FILENO, from_guest_phys(addr), | ||
| 977 | strnlen(from_guest_phys(addr), guest_limit - addr)); | ||
| 911 | } | 978 | } |
| 912 | 979 | ||
| 913 | /* This is called when the waker wakes us up: check for incoming file | 980 | /* This is called when the waker wakes us up: check for incoming file |
| 914 | * descriptors. */ | 981 | * descriptors. */ |
| 915 | static void handle_input(int fd, struct device_list *devices) | 982 | static void handle_input(int fd) |
| 916 | { | 983 | { |
| 917 | /* select() wants a zeroed timeval to mean "don't wait". */ | 984 | /* select() wants a zeroed timeval to mean "don't wait". */ |
| 918 | struct timeval poll = { .tv_sec = 0, .tv_usec = 0 }; | 985 | struct timeval poll = { .tv_sec = 0, .tv_usec = 0 }; |
| 919 | 986 | ||
| 920 | for (;;) { | 987 | for (;;) { |
| 921 | struct device *i; | 988 | struct device *i; |
| 922 | fd_set fds = devices->infds; | 989 | fd_set fds = devices.infds; |
| 923 | 990 | ||
| 924 | /* If nothing is ready, we're done. */ | 991 | /* If nothing is ready, we're done. */ |
| 925 | if (select(devices->max_infd+1, &fds, NULL, NULL, &poll) == 0) | 992 | if (select(devices.max_infd+1, &fds, NULL, NULL, &poll) == 0) |
| 926 | break; | 993 | break; |
| 927 | 994 | ||
| 928 | /* Otherwise, call the device(s) which have readable | 995 | /* Otherwise, call the device(s) which have readable |
| 929 | * file descriptors and a method of handling them. */ | 996 | * file descriptors and a method of handling them. */ |
| 930 | for (i = devices->dev; i; i = i->next) { | 997 | for (i = devices.dev; i; i = i->next) { |
| 931 | if (i->handle_input && FD_ISSET(i->fd, &fds)) { | 998 | if (i->handle_input && FD_ISSET(i->fd, &fds)) { |
| 932 | /* If handle_input() returns false, it means we | 999 | /* If handle_input() returns false, it means we |
| 933 | * should no longer service it. | 1000 | * should no longer service it. |
| @@ -936,7 +1003,7 @@ static void handle_input(int fd, struct device_list *devices) | |||
| 936 | /* Clear it from the set of input file | 1003 | /* Clear it from the set of input file |
| 937 | * descriptors kept at the head of the | 1004 | * descriptors kept at the head of the |
| 938 | * device list. */ | 1005 | * device list. */ |
| 939 | FD_CLR(i->fd, &devices->infds); | 1006 | FD_CLR(i->fd, &devices.infds); |
| 940 | /* Tell waker to ignore it too... */ | 1007 | /* Tell waker to ignore it too... */ |
| 941 | write(waker_fd, &i->fd, sizeof(i->fd)); | 1008 | write(waker_fd, &i->fd, sizeof(i->fd)); |
| 942 | } | 1009 | } |
| @@ -953,43 +1020,93 @@ static void handle_input(int fd, struct device_list *devices) | |||
| 953 | * routines to allocate them. | 1020 | * routines to allocate them. |
| 954 | * | 1021 | * |
| 955 | * This routine allocates a new "struct lguest_device_desc" from descriptor | 1022 | * This routine allocates a new "struct lguest_device_desc" from descriptor |
| 956 | * table in the devices array just above the Guest's normal memory. */ | 1023 | * table just above the Guest's normal memory. It returns a pointer to that |
| 957 | static struct lguest_device_desc * | 1024 | * descriptor. */ |
| 958 | new_dev_desc(struct lguest_device_desc *descs, | 1025 | static struct lguest_device_desc *new_dev_desc(u16 type) |
| 959 | u16 type, u16 features, u16 num_pages) | ||
| 960 | { | 1026 | { |
| 961 | unsigned int i; | 1027 | struct lguest_device_desc *d; |
| 962 | 1028 | ||
| 963 | for (i = 0; i < LGUEST_MAX_DEVICES; i++) { | 1029 | /* We only have one page for all the descriptors. */ |
| 964 | if (!descs[i].type) { | 1030 | if (devices.desc_used + sizeof(*d) > getpagesize()) |
| 965 | descs[i].type = type; | 1031 | errx(1, "Too many devices"); |
| 966 | descs[i].features = features; | 1032 | |
| 967 | descs[i].num_pages = num_pages; | 1033 | /* We don't need to set config_len or status: page is 0 already. */ |
| 968 | /* If they said the device needs memory, we allocate | 1034 | d = (void *)devices.descpage + devices.desc_used; |
| 969 | * that now. */ | 1035 | d->type = type; |
| 970 | if (num_pages) { | 1036 | devices.desc_used += sizeof(*d); |
| 971 | unsigned long pa; | 1037 | |
| 972 | pa = to_guest_phys(get_pages(num_pages)); | 1038 | return d; |
| 973 | descs[i].pfn = pa / getpagesize(); | 1039 | } |
| 974 | } | 1040 | |
| 975 | return &descs[i]; | 1041 | /* Each device descriptor is followed by some configuration information. |
| 976 | } | 1042 | * The first byte is a "status" byte for the Guest to report what's happening. |
| 977 | } | 1043 | * After that are fields: u8 type, u8 len, [... len bytes...]. |
| 978 | errx(1, "too many devices"); | 1044 | * |
| 1045 | * This routine adds a new field to an existing device's descriptor. It only | ||
| 1046 | * works for the last device, but that's OK because that's how we use it. */ | ||
| 1047 | static void add_desc_field(struct device *dev, u8 type, u8 len, const void *c) | ||
| 1048 | { | ||
| 1049 | /* This is the last descriptor, right? */ | ||
| 1050 | assert(devices.descpage + devices.desc_used | ||
| 1051 | == (u8 *)(dev->desc + 1) + dev->desc->config_len); | ||
| 1052 | |||
| 1053 | /* We only have one page of device descriptions. */ | ||
| 1054 | if (devices.desc_used + 2 + len > getpagesize()) | ||
| 1055 | errx(1, "Too many devices"); | ||
| 1056 | |||
| 1057 | /* Copy in the new config header: type then length. */ | ||
| 1058 | devices.descpage[devices.desc_used++] = type; | ||
| 1059 | devices.descpage[devices.desc_used++] = len; | ||
| 1060 | memcpy(devices.descpage + devices.desc_used, c, len); | ||
| 1061 | devices.desc_used += len; | ||
| 1062 | |||
| 1063 | /* Update the device descriptor length: two byte head then data. */ | ||
| 1064 | dev->desc->config_len += 2 + len; | ||
| 1065 | } | ||
| 1066 | |||
| 1067 | /* This routine adds a virtqueue to a device. We specify how many descriptors | ||
| 1068 | * the virtqueue is to have. */ | ||
| 1069 | static void add_virtqueue(struct device *dev, unsigned int num_descs, | ||
| 1070 | void (*handle_output)(int fd, struct virtqueue *me)) | ||
| 1071 | { | ||
| 1072 | unsigned int pages; | ||
| 1073 | struct virtqueue **i, *vq = malloc(sizeof(*vq)); | ||
| 1074 | void *p; | ||
| 1075 | |||
| 1076 | /* First we need some pages for this virtqueue. */ | ||
| 1077 | pages = (vring_size(num_descs) + getpagesize() - 1) / getpagesize(); | ||
| 1078 | p = get_pages(pages); | ||
| 1079 | |||
| 1080 | /* Initialize the configuration. */ | ||
| 1081 | vq->config.num = num_descs; | ||
| 1082 | vq->config.irq = devices.next_irq++; | ||
| 1083 | vq->config.pfn = to_guest_phys(p) / getpagesize(); | ||
| 1084 | |||
| 1085 | /* Initialize the vring. */ | ||
| 1086 | vring_init(&vq->vring, num_descs, p); | ||
| 1087 | |||
| 1088 | /* Add the configuration information to this device's descriptor. */ | ||
| 1089 | add_desc_field(dev, VIRTIO_CONFIG_F_VIRTQUEUE, | ||
| 1090 | sizeof(vq->config), &vq->config); | ||
| 1091 | |||
| 1092 | /* Add to tail of list, so dev->vq is first vq, dev->vq->next is | ||
| 1093 | * second. */ | ||
| 1094 | for (i = &dev->vq; *i; i = &(*i)->next); | ||
| 1095 | *i = vq; | ||
| 1096 | |||
| 1097 | /* Link virtqueue back to device. */ | ||
| 1098 | vq->dev = dev; | ||
| 1099 | |||
| 1100 | /* Set up handler. */ | ||
| 1101 | vq->handle_output = handle_output; | ||
| 1102 | if (!handle_output) | ||
| 1103 | vq->vring.used->flags = VRING_USED_F_NO_NOTIFY; | ||
| 979 | } | 1104 | } |
| 980 | 1105 | ||
| 981 | /* This monster routine does all the creation and setup of a new device, | 1106 | /* This routine does all the creation and setup of a new device, including |
| 982 | * including caling new_dev_desc() to allocate the descriptor and device | 1107 | * caling new_dev_desc() to allocate the descriptor and device memory. */ |
| 983 | * memory. */ | 1108 | static struct device *new_device(const char *name, u16 type, int fd, |
| 984 | static struct device *new_device(struct device_list *devices, | 1109 | bool (*handle_input)(int, struct device *)) |
| 985 | u16 type, u16 num_pages, u16 features, | ||
| 986 | int fd, | ||
| 987 | bool (*handle_input)(int, struct device *), | ||
| 988 | unsigned long watch_off, | ||
| 989 | u32 (*handle_output)(int, | ||
| 990 | const struct iovec *, | ||
| 991 | unsigned, | ||
| 992 | struct device *)) | ||
| 993 | { | 1110 | { |
| 994 | struct device *dev = malloc(sizeof(*dev)); | 1111 | struct device *dev = malloc(sizeof(*dev)); |
| 995 | 1112 | ||
| @@ -997,27 +1114,25 @@ static struct device *new_device(struct device_list *devices, | |||
| 997 | * easier, but the user expects the devices to be arranged on the bus | 1114 | * easier, but the user expects the devices to be arranged on the bus |
| 998 | * in command-line order. The first network device on the command line | 1115 | * in command-line order. The first network device on the command line |
| 999 | * is eth0, the first block device /dev/lgba, etc. */ | 1116 | * is eth0, the first block device /dev/lgba, etc. */ |
| 1000 | *devices->lastdev = dev; | 1117 | *devices.lastdev = dev; |
| 1001 | dev->next = NULL; | 1118 | dev->next = NULL; |
| 1002 | devices->lastdev = &dev->next; | 1119 | devices.lastdev = &dev->next; |
| 1003 | 1120 | ||
| 1004 | /* Now we populate the fields one at a time. */ | 1121 | /* Now we populate the fields one at a time. */ |
| 1005 | dev->fd = fd; | 1122 | dev->fd = fd; |
| 1006 | /* If we have an input handler for this file descriptor, then we add it | 1123 | /* If we have an input handler for this file descriptor, then we add it |
| 1007 | * to the device_list's fdset and maxfd. */ | 1124 | * to the device_list's fdset and maxfd. */ |
| 1008 | if (handle_input) | 1125 | if (handle_input) |
| 1009 | set_fd(dev->fd, devices); | 1126 | add_device_fd(dev->fd); |
| 1010 | dev->desc = new_dev_desc(devices->descs, type, features, num_pages); | 1127 | dev->desc = new_dev_desc(type); |
| 1011 | dev->mem = from_guest_phys(dev->desc->pfn * getpagesize()); | ||
| 1012 | dev->handle_input = handle_input; | 1128 | dev->handle_input = handle_input; |
| 1013 | dev->watch_key = to_guest_phys(dev->mem) + watch_off; | 1129 | dev->name = name; |
| 1014 | dev->handle_output = handle_output; | ||
| 1015 | return dev; | 1130 | return dev; |
| 1016 | } | 1131 | } |
| 1017 | 1132 | ||
| 1018 | /* Our first setup routine is the console. It's a fairly simple device, but | 1133 | /* Our first setup routine is the console. It's a fairly simple device, but |
| 1019 | * UNIX tty handling makes it uglier than it could be. */ | 1134 | * UNIX tty handling makes it uglier than it could be. */ |
| 1020 | static void setup_console(struct device_list *devices) | 1135 | static void setup_console(void) |
| 1021 | { | 1136 | { |
| 1022 | struct device *dev; | 1137 | struct device *dev; |
| 1023 | 1138 | ||
| @@ -1033,127 +1148,38 @@ static void setup_console(struct device_list *devices) | |||
| 1033 | atexit(restore_term); | 1148 | atexit(restore_term); |
| 1034 | } | 1149 | } |
| 1035 | 1150 | ||
| 1036 | /* We don't currently require any memory for the console, so we ask for | 1151 | dev = new_device("console", VIRTIO_ID_CONSOLE, |
| 1037 | * 0 pages. */ | 1152 | STDIN_FILENO, handle_console_input); |
| 1038 | dev = new_device(devices, LGUEST_DEVICE_T_CONSOLE, 0, 0, | ||
| 1039 | STDIN_FILENO, handle_console_input, | ||
| 1040 | LGUEST_CONSOLE_DMA_KEY, handle_console_output); | ||
| 1041 | /* We store the console state in dev->priv, and initialize it. */ | 1153 | /* We store the console state in dev->priv, and initialize it. */ |
| 1042 | dev->priv = malloc(sizeof(struct console_abort)); | 1154 | dev->priv = malloc(sizeof(struct console_abort)); |
| 1043 | ((struct console_abort *)dev->priv)->count = 0; | 1155 | ((struct console_abort *)dev->priv)->count = 0; |
| 1044 | verbose("device %p: console\n", | ||
| 1045 | (void *)(dev->desc->pfn * getpagesize())); | ||
| 1046 | } | ||
| 1047 | 1156 | ||
| 1048 | /* Setting up a block file is also fairly straightforward. */ | 1157 | /* The console needs two virtqueues: the input then the output. We |
| 1049 | static void setup_block_file(const char *filename, struct device_list *devices) | 1158 | * don't care when they refill the input queue, since we don't hold |
| 1050 | { | 1159 | * data waiting for them. That's why the input queue's callback is |
| 1051 | int fd; | 1160 | * NULL. */ |
| 1052 | struct device *dev; | 1161 | add_virtqueue(dev, VIRTQUEUE_NUM, NULL); |
| 1053 | off64_t *device_len; | 1162 | add_virtqueue(dev, VIRTQUEUE_NUM, handle_console_output); |
| 1054 | struct lguest_block_page *p; | 1163 | |
| 1055 | 1164 | verbose("device %u: console\n", devices.device_num++); | |
| 1056 | /* We open with O_LARGEFILE because otherwise we get stuck at 2G. We | ||
| 1057 | * open with O_DIRECT because otherwise our benchmarks go much too | ||
| 1058 | * fast. */ | ||
| 1059 | fd = open_or_die(filename, O_RDWR|O_LARGEFILE|O_DIRECT); | ||
| 1060 | |||
| 1061 | /* We want one page, and have no input handler (the block file never | ||
| 1062 | * has anything interesting to say to us). Our timing will be quite | ||
| 1063 | * random, so it should be a reasonable randomness source. */ | ||
| 1064 | dev = new_device(devices, LGUEST_DEVICE_T_BLOCK, 1, | ||
| 1065 | LGUEST_DEVICE_F_RANDOMNESS, | ||
| 1066 | fd, NULL, 0, handle_block_output); | ||
| 1067 | |||
| 1068 | /* We store the device size in the private area */ | ||
| 1069 | device_len = dev->priv = malloc(sizeof(*device_len)); | ||
| 1070 | /* This is the safe way of establishing the size of our device: it | ||
| 1071 | * might be a normal file or an actual block device like /dev/hdb. */ | ||
| 1072 | *device_len = lseek64(fd, 0, SEEK_END); | ||
| 1073 | |||
| 1074 | /* The device memory is a "struct lguest_block_page". It's zeroed | ||
| 1075 | * already, we just need to put in the device size. Block devices | ||
| 1076 | * think in sectors (ie. 512 byte chunks), so we translate here. */ | ||
| 1077 | p = dev->mem; | ||
| 1078 | p->num_sectors = *device_len/512; | ||
| 1079 | verbose("device %p: block %i sectors\n", | ||
| 1080 | (void *)(dev->desc->pfn * getpagesize()), p->num_sectors); | ||
| 1081 | } | 1165 | } |
| 1166 | /*:*/ | ||
| 1082 | 1167 | ||
| 1083 | /* | 1168 | /*M:010 Inter-guest networking is an interesting area. Simplest is to have a |
| 1084 | * Network Devices. | 1169 | * --sharenet=<name> option which opens or creates a named pipe. This can be |
| 1170 | * used to send packets to another guest in a 1:1 manner. | ||
| 1085 | * | 1171 | * |
| 1086 | * Setting up network devices is quite a pain, because we have three types. | 1172 | * More sopisticated is to use one of the tools developed for project like UML |
| 1087 | * First, we have the inter-Guest network. This is a file which is mapped into | 1173 | * to do networking. |
| 1088 | * the address space of the Guests who are on the network. Because it is a | ||
| 1089 | * shared mapping, the same page underlies all the devices, and they can send | ||
| 1090 | * DMA to each other. | ||
| 1091 | * | 1174 | * |
| 1092 | * Remember from our network driver, the Guest is told what slot in the page it | 1175 | * Faster is to do virtio bonding in kernel. Doing this 1:1 would be |
| 1093 | * is to use. We use exclusive fnctl locks to reserve a slot. If another | 1176 | * completely generic ("here's my vring, attach to your vring") and would work |
| 1094 | * Guest is using a slot, the lock will fail and we try another. Because fnctl | 1177 | * for any traffic. Of course, namespace and permissions issues need to be |
| 1095 | * locks are cleaned up automatically when we die, this cleverly means that our | 1178 | * dealt with. A more sophisticated "multi-channel" virtio_net.c could hide |
| 1096 | * reservation on the slot will vanish if we crash. */ | 1179 | * multiple inter-guest channels behind one interface, although it would |
| 1097 | static unsigned int find_slot(int netfd, const char *filename) | 1180 | * require some manner of hotplugging new virtio channels. |
| 1098 | { | 1181 | * |
| 1099 | struct flock fl; | 1182 | * Finally, we could implement a virtio network switch in the kernel. :*/ |
| 1100 | |||
| 1101 | fl.l_type = F_WRLCK; | ||
| 1102 | fl.l_whence = SEEK_SET; | ||
| 1103 | fl.l_len = 1; | ||
| 1104 | /* Try a 1 byte lock in each possible position number */ | ||
| 1105 | for (fl.l_start = 0; | ||
| 1106 | fl.l_start < getpagesize()/sizeof(struct lguest_net); | ||
| 1107 | fl.l_start++) { | ||
| 1108 | /* If we succeed, return the slot number. */ | ||
| 1109 | if (fcntl(netfd, F_SETLK, &fl) == 0) | ||
| 1110 | return fl.l_start; | ||
| 1111 | } | ||
| 1112 | errx(1, "No free slots in network file %s", filename); | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | /* This function sets up the network file */ | ||
| 1116 | static void setup_net_file(const char *filename, | ||
| 1117 | struct device_list *devices) | ||
| 1118 | { | ||
| 1119 | int netfd; | ||
| 1120 | struct device *dev; | ||
| 1121 | |||
| 1122 | /* We don't use open_or_die() here: for friendliness we create the file | ||
| 1123 | * if it doesn't already exist. */ | ||
| 1124 | netfd = open(filename, O_RDWR, 0); | ||
| 1125 | if (netfd < 0) { | ||
| 1126 | if (errno == ENOENT) { | ||
| 1127 | netfd = open(filename, O_RDWR|O_CREAT, 0600); | ||
| 1128 | if (netfd >= 0) { | ||
| 1129 | /* If we succeeded, initialize the file with a | ||
| 1130 | * blank page. */ | ||
| 1131 | char page[getpagesize()]; | ||
| 1132 | memset(page, 0, sizeof(page)); | ||
| 1133 | write(netfd, page, sizeof(page)); | ||
| 1134 | } | ||
| 1135 | } | ||
| 1136 | if (netfd < 0) | ||
| 1137 | err(1, "cannot open net file '%s'", filename); | ||
| 1138 | } | ||
| 1139 | |||
| 1140 | /* We need 1 page, and the features indicate the slot to use and that | ||
| 1141 | * no checksum is needed. We never touch this device again; it's | ||
| 1142 | * between the Guests on the network, so we don't register input or | ||
| 1143 | * output handlers. */ | ||
| 1144 | dev = new_device(devices, LGUEST_DEVICE_T_NET, 1, | ||
| 1145 | find_slot(netfd, filename)|LGUEST_NET_F_NOCSUM, | ||
| 1146 | -1, NULL, 0, NULL); | ||
| 1147 | |||
| 1148 | /* Map the shared file. */ | ||
| 1149 | if (mmap(dev->mem, getpagesize(), PROT_READ|PROT_WRITE, | ||
| 1150 | MAP_FIXED|MAP_SHARED, netfd, 0) != dev->mem) | ||
| 1151 | err(1, "could not mmap '%s'", filename); | ||
| 1152 | verbose("device %p: shared net %s, peer %i\n", | ||
| 1153 | (void *)(dev->desc->pfn * getpagesize()), filename, | ||
| 1154 | dev->desc->features & ~LGUEST_NET_F_NOCSUM); | ||
| 1155 | } | ||
| 1156 | /*:*/ | ||
| 1157 | 1183 | ||
| 1158 | static u32 str2ip(const char *ipaddr) | 1184 | static u32 str2ip(const char *ipaddr) |
| 1159 | { | 1185 | { |
| @@ -1188,7 +1214,7 @@ static void add_to_bridge(int fd, const char *if_name, const char *br_name) | |||
| 1188 | 1214 | ||
| 1189 | /* This sets up the Host end of the network device with an IP address, brings | 1215 | /* This sets up the Host end of the network device with an IP address, brings |
| 1190 | * it up so packets will flow, the copies the MAC address into the hwaddr | 1216 | * it up so packets will flow, the copies the MAC address into the hwaddr |
| 1191 | * pointer (in practice, the Host's slot in the network device's memory). */ | 1217 | * pointer. */ |
| 1192 | static void configure_device(int fd, const char *devname, u32 ipaddr, | 1218 | static void configure_device(int fd, const char *devname, u32 ipaddr, |
| 1193 | unsigned char hwaddr[6]) | 1219 | unsigned char hwaddr[6]) |
| 1194 | { | 1220 | { |
| @@ -1214,18 +1240,18 @@ static void configure_device(int fd, const char *devname, u32 ipaddr, | |||
| 1214 | memcpy(hwaddr, ifr.ifr_hwaddr.sa_data, 6); | 1240 | memcpy(hwaddr, ifr.ifr_hwaddr.sa_data, 6); |
| 1215 | } | 1241 | } |
| 1216 | 1242 | ||
| 1217 | /*L:195 The other kind of network is a Host<->Guest network. This can either | 1243 | /*L:195 Our network is a Host<->Guest network. This can either use bridging or |
| 1218 | * use briding or routing, but the principle is the same: it uses the "tun" | 1244 | * routing, but the principle is the same: it uses the "tun" device to inject |
| 1219 | * device to inject packets into the Host as if they came in from a normal | 1245 | * packets into the Host as if they came in from a normal network card. We |
| 1220 | * network card. We just shunt packets between the Guest and the tun | 1246 | * just shunt packets between the Guest and the tun device. */ |
| 1221 | * device. */ | 1247 | static void setup_tun_net(const char *arg) |
| 1222 | static void setup_tun_net(const char *arg, struct device_list *devices) | ||
| 1223 | { | 1248 | { |
| 1224 | struct device *dev; | 1249 | struct device *dev; |
| 1225 | struct ifreq ifr; | 1250 | struct ifreq ifr; |
| 1226 | int netfd, ipfd; | 1251 | int netfd, ipfd; |
| 1227 | u32 ip; | 1252 | u32 ip; |
| 1228 | const char *br_name = NULL; | 1253 | const char *br_name = NULL; |
| 1254 | u8 hwaddr[6]; | ||
| 1229 | 1255 | ||
| 1230 | /* We open the /dev/net/tun device and tell it we want a tap device. A | 1256 | /* We open the /dev/net/tun device and tell it we want a tap device. A |
| 1231 | * tap device is like a tun device, only somehow different. To tell | 1257 | * tap device is like a tun device, only somehow different. To tell |
| @@ -1241,21 +1267,12 @@ static void setup_tun_net(const char *arg, struct device_list *devices) | |||
| 1241 | * device: trust us! */ | 1267 | * device: trust us! */ |
| 1242 | ioctl(netfd, TUNSETNOCSUM, 1); | 1268 | ioctl(netfd, TUNSETNOCSUM, 1); |
| 1243 | 1269 | ||
| 1244 | /* We create the net device with 1 page, using the features field of | 1270 | /* First we create a new network device. */ |
| 1245 | * the descriptor to tell the Guest it is in slot 1 (NET_PEERNUM), and | 1271 | dev = new_device("net", VIRTIO_ID_NET, netfd, handle_tun_input); |
| 1246 | * that the device has fairly random timing. We do *not* specify | ||
| 1247 | * LGUEST_NET_F_NOCSUM: these packets can reach the real world. | ||
| 1248 | * | ||
| 1249 | * We will put our MAC address is slot 0 for the Guest to see, so | ||
| 1250 | * it will send packets to us using the key "peer_offset(0)": */ | ||
| 1251 | dev = new_device(devices, LGUEST_DEVICE_T_NET, 1, | ||
| 1252 | NET_PEERNUM|LGUEST_DEVICE_F_RANDOMNESS, netfd, | ||
| 1253 | handle_tun_input, peer_offset(0), handle_tun_output); | ||
| 1254 | 1272 | ||
| 1255 | /* We keep a flag which says whether we've seen packets come out from | 1273 | /* Network devices need a receive and a send queue. */ |
| 1256 | * this network device. */ | 1274 | add_virtqueue(dev, VIRTQUEUE_NUM, NULL); |
| 1257 | dev->priv = malloc(sizeof(bool)); | 1275 | add_virtqueue(dev, VIRTQUEUE_NUM, handle_net_output); |
| 1258 | *(bool *)dev->priv = false; | ||
| 1259 | 1276 | ||
| 1260 | /* We need a socket to perform the magic network ioctls to bring up the | 1277 | /* We need a socket to perform the magic network ioctls to bring up the |
| 1261 | * tap interface, connect to the bridge etc. Any socket will do! */ | 1278 | * tap interface, connect to the bridge etc. Any socket will do! */ |
| @@ -1271,44 +1288,251 @@ static void setup_tun_net(const char *arg, struct device_list *devices) | |||
| 1271 | } else /* It is an IP address to set up the device with */ | 1288 | } else /* It is an IP address to set up the device with */ |
| 1272 | ip = str2ip(arg); | 1289 | ip = str2ip(arg); |
| 1273 | 1290 | ||
| 1274 | /* We are peer 0, ie. first slot, so we hand dev->mem to this routine | 1291 | /* Set up the tun device, and get the mac address for the interface. */ |
| 1275 | * to write the MAC address at the start of the device memory. */ | 1292 | configure_device(ipfd, ifr.ifr_name, ip, hwaddr); |
| 1276 | configure_device(ipfd, ifr.ifr_name, ip, dev->mem); | ||
| 1277 | 1293 | ||
| 1278 | /* Set "promisc" bit: we want every single packet if we're going to | 1294 | /* Tell Guest what MAC address to use. */ |
| 1279 | * bridge to other machines (and otherwise it doesn't matter). */ | 1295 | add_desc_field(dev, VIRTIO_CONFIG_NET_MAC_F, sizeof(hwaddr), hwaddr); |
| 1280 | *((u8 *)dev->mem) |= 0x1; | ||
| 1281 | 1296 | ||
| 1297 | /* We don't seed the socket any more; setup is done. */ | ||
| 1282 | close(ipfd); | 1298 | close(ipfd); |
| 1283 | 1299 | ||
| 1284 | verbose("device %p: tun net %u.%u.%u.%u\n", | 1300 | verbose("device %u: tun net %u.%u.%u.%u\n", |
| 1285 | (void *)(dev->desc->pfn * getpagesize()), | 1301 | devices.device_num++, |
| 1286 | (u8)(ip>>24), (u8)(ip>>16), (u8)(ip>>8), (u8)ip); | 1302 | (u8)(ip>>24),(u8)(ip>>16),(u8)(ip>>8),(u8)ip); |
| 1287 | if (br_name) | 1303 | if (br_name) |
| 1288 | verbose("attached to bridge: %s\n", br_name); | 1304 | verbose("attached to bridge: %s\n", br_name); |
| 1289 | } | 1305 | } |
| 1306 | |||
| 1307 | |||
| 1308 | /* | ||
| 1309 | * Block device. | ||
| 1310 | * | ||
| 1311 | * Serving a block device is really easy: the Guest asks for a block number and | ||
| 1312 | * we read or write that position in the file. | ||
| 1313 | * | ||
| 1314 | * Unfortunately, this is amazingly slow: the Guest waits until the read is | ||
| 1315 | * finished before running anything else, even if it could be doing useful | ||
| 1316 | * work. We could use async I/O, except it's reputed to suck so hard that | ||
| 1317 | * characters actually go missing from your code when you try to use it. | ||
| 1318 | * | ||
| 1319 | * So we farm the I/O out to thread, and communicate with it via a pipe. */ | ||
| 1320 | |||
| 1321 | /* This hangs off device->priv, with the data. */ | ||
| 1322 | struct vblk_info | ||
| 1323 | { | ||
| 1324 | /* The size of the file. */ | ||
| 1325 | off64_t len; | ||
| 1326 | |||
| 1327 | /* The file descriptor for the file. */ | ||
| 1328 | int fd; | ||
| 1329 | |||
| 1330 | /* IO thread listens on this file descriptor [0]. */ | ||
| 1331 | int workpipe[2]; | ||
| 1332 | |||
| 1333 | /* IO thread writes to this file descriptor to mark it done, then | ||
| 1334 | * Launcher triggers interrupt to Guest. */ | ||
| 1335 | int done_fd; | ||
| 1336 | }; | ||
| 1337 | |||
| 1338 | /* This is the core of the I/O thread. It returns true if it did something. */ | ||
| 1339 | static bool service_io(struct device *dev) | ||
| 1340 | { | ||
| 1341 | struct vblk_info *vblk = dev->priv; | ||
| 1342 | unsigned int head, out_num, in_num, wlen; | ||
| 1343 | int ret; | ||
| 1344 | struct virtio_blk_inhdr *in; | ||
| 1345 | struct virtio_blk_outhdr *out; | ||
| 1346 | struct iovec iov[dev->vq->vring.num]; | ||
| 1347 | off64_t off; | ||
| 1348 | |||
| 1349 | head = get_vq_desc(dev->vq, iov, &out_num, &in_num); | ||
| 1350 | if (head == dev->vq->vring.num) | ||
| 1351 | return false; | ||
| 1352 | |||
| 1353 | if (out_num == 0 || in_num == 0) | ||
| 1354 | errx(1, "Bad virtblk cmd %u out=%u in=%u", | ||
| 1355 | head, out_num, in_num); | ||
| 1356 | |||
| 1357 | out = convert(&iov[0], struct virtio_blk_outhdr); | ||
| 1358 | in = convert(&iov[out_num+in_num-1], struct virtio_blk_inhdr); | ||
| 1359 | off = out->sector * 512; | ||
| 1360 | |||
| 1361 | /* This is how we implement barriers. Pretty poor, no? */ | ||
| 1362 | if (out->type & VIRTIO_BLK_T_BARRIER) | ||
| 1363 | fdatasync(vblk->fd); | ||
| 1364 | |||
| 1365 | if (out->type & VIRTIO_BLK_T_SCSI_CMD) { | ||
| 1366 | fprintf(stderr, "Scsi commands unsupported\n"); | ||
| 1367 | in->status = VIRTIO_BLK_S_UNSUPP; | ||
| 1368 | wlen = sizeof(in); | ||
| 1369 | } else if (out->type & VIRTIO_BLK_T_OUT) { | ||
| 1370 | /* Write */ | ||
| 1371 | |||
| 1372 | /* Move to the right location in the block file. This can fail | ||
| 1373 | * if they try to write past end. */ | ||
| 1374 | if (lseek64(vblk->fd, off, SEEK_SET) != off) | ||
| 1375 | err(1, "Bad seek to sector %llu", out->sector); | ||
| 1376 | |||
| 1377 | ret = writev(vblk->fd, iov+1, out_num-1); | ||
| 1378 | verbose("WRITE to sector %llu: %i\n", out->sector, ret); | ||
| 1379 | |||
| 1380 | /* Grr... Now we know how long the descriptor they sent was, we | ||
| 1381 | * make sure they didn't try to write over the end of the block | ||
| 1382 | * file (possibly extending it). */ | ||
| 1383 | if (ret > 0 && off + ret > vblk->len) { | ||
| 1384 | /* Trim it back to the correct length */ | ||
| 1385 | ftruncate64(vblk->fd, vblk->len); | ||
| 1386 | /* Die, bad Guest, die. */ | ||
| 1387 | errx(1, "Write past end %llu+%u", off, ret); | ||
| 1388 | } | ||
| 1389 | wlen = sizeof(in); | ||
| 1390 | in->status = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); | ||
| 1391 | } else { | ||
| 1392 | /* Read */ | ||
| 1393 | |||
| 1394 | /* Move to the right location in the block file. This can fail | ||
| 1395 | * if they try to read past end. */ | ||
| 1396 | if (lseek64(vblk->fd, off, SEEK_SET) != off) | ||
| 1397 | err(1, "Bad seek to sector %llu", out->sector); | ||
| 1398 | |||
| 1399 | ret = readv(vblk->fd, iov+1, in_num-1); | ||
| 1400 | verbose("READ from sector %llu: %i\n", out->sector, ret); | ||
| 1401 | if (ret >= 0) { | ||
| 1402 | wlen = sizeof(in) + ret; | ||
| 1403 | in->status = VIRTIO_BLK_S_OK; | ||
| 1404 | } else { | ||
| 1405 | wlen = sizeof(in); | ||
| 1406 | in->status = VIRTIO_BLK_S_IOERR; | ||
| 1407 | } | ||
| 1408 | } | ||
| 1409 | |||
| 1410 | /* We can't trigger an IRQ, because we're not the Launcher. It does | ||
| 1411 | * that when we tell it we're done. */ | ||
| 1412 | add_used(dev->vq, head, wlen); | ||
| 1413 | return true; | ||
| 1414 | } | ||
| 1415 | |||
| 1416 | /* This is the thread which actually services the I/O. */ | ||
| 1417 | static int io_thread(void *_dev) | ||
| 1418 | { | ||
| 1419 | struct device *dev = _dev; | ||
| 1420 | struct vblk_info *vblk = dev->priv; | ||
| 1421 | char c; | ||
| 1422 | |||
| 1423 | /* Close other side of workpipe so we get 0 read when main dies. */ | ||
| 1424 | close(vblk->workpipe[1]); | ||
| 1425 | /* Close the other side of the done_fd pipe. */ | ||
| 1426 | close(dev->fd); | ||
| 1427 | |||
| 1428 | /* When this read fails, it means Launcher died, so we follow. */ | ||
| 1429 | while (read(vblk->workpipe[0], &c, 1) == 1) { | ||
| 1430 | /* We acknowledge each request immediately, to reduce latency, | ||
| 1431 | * rather than waiting until we've done them all. I haven't | ||
| 1432 | * measured to see if it makes any difference. */ | ||
| 1433 | while (service_io(dev)) | ||
| 1434 | write(vblk->done_fd, &c, 1); | ||
| 1435 | } | ||
| 1436 | return 0; | ||
| 1437 | } | ||
| 1438 | |||
| 1439 | /* When the thread says some I/O is done, we interrupt the Guest. */ | ||
| 1440 | static bool handle_io_finish(int fd, struct device *dev) | ||
| 1441 | { | ||
| 1442 | char c; | ||
| 1443 | |||
| 1444 | /* If child died, presumably it printed message. */ | ||
| 1445 | if (read(dev->fd, &c, 1) != 1) | ||
| 1446 | exit(1); | ||
| 1447 | |||
| 1448 | /* It did some work, so trigger the irq. */ | ||
| 1449 | trigger_irq(fd, dev->vq); | ||
| 1450 | return true; | ||
| 1451 | } | ||
| 1452 | |||
| 1453 | /* When the Guest submits some I/O, we wake the I/O thread. */ | ||
| 1454 | static void handle_virtblk_output(int fd, struct virtqueue *vq) | ||
| 1455 | { | ||
| 1456 | struct vblk_info *vblk = vq->dev->priv; | ||
| 1457 | char c = 0; | ||
| 1458 | |||
| 1459 | /* Wake up I/O thread and tell it to go to work! */ | ||
| 1460 | if (write(vblk->workpipe[1], &c, 1) != 1) | ||
| 1461 | /* Presumably it indicated why it died. */ | ||
| 1462 | exit(1); | ||
| 1463 | } | ||
| 1464 | |||
| 1465 | /* This creates a virtual block device. */ | ||
| 1466 | static void setup_block_file(const char *filename) | ||
| 1467 | { | ||
| 1468 | int p[2]; | ||
| 1469 | struct device *dev; | ||
| 1470 | struct vblk_info *vblk; | ||
| 1471 | void *stack; | ||
| 1472 | u64 cap; | ||
| 1473 | unsigned int val; | ||
| 1474 | |||
| 1475 | /* This is the pipe the I/O thread will use to tell us I/O is done. */ | ||
| 1476 | pipe(p); | ||
| 1477 | |||
| 1478 | /* The device responds to return from I/O thread. */ | ||
| 1479 | dev = new_device("block", VIRTIO_ID_BLOCK, p[0], handle_io_finish); | ||
| 1480 | |||
| 1481 | /* The device has a virtqueue. */ | ||
| 1482 | add_virtqueue(dev, VIRTQUEUE_NUM, handle_virtblk_output); | ||
| 1483 | |||
| 1484 | /* Allocate the room for our own bookkeeping */ | ||
| 1485 | vblk = dev->priv = malloc(sizeof(*vblk)); | ||
| 1486 | |||
| 1487 | /* First we open the file and store the length. */ | ||
| 1488 | vblk->fd = open_or_die(filename, O_RDWR|O_LARGEFILE); | ||
| 1489 | vblk->len = lseek64(vblk->fd, 0, SEEK_END); | ||
| 1490 | |||
| 1491 | /* Tell Guest how many sectors this device has. */ | ||
| 1492 | cap = cpu_to_le64(vblk->len / 512); | ||
| 1493 | add_desc_field(dev, VIRTIO_CONFIG_BLK_F_CAPACITY, sizeof(cap), &cap); | ||
| 1494 | |||
| 1495 | /* Tell Guest not to put in too many descriptors at once: two are used | ||
| 1496 | * for the in and out elements. */ | ||
| 1497 | val = cpu_to_le32(VIRTQUEUE_NUM - 2); | ||
| 1498 | add_desc_field(dev, VIRTIO_CONFIG_BLK_F_SEG_MAX, sizeof(val), &val); | ||
| 1499 | |||
| 1500 | /* The I/O thread writes to this end of the pipe when done. */ | ||
| 1501 | vblk->done_fd = p[1]; | ||
| 1502 | |||
| 1503 | /* This is how we tell the I/O thread about more work. */ | ||
| 1504 | pipe(vblk->workpipe); | ||
| 1505 | |||
| 1506 | /* Create stack for thread and run it */ | ||
| 1507 | stack = malloc(32768); | ||
| 1508 | if (clone(io_thread, stack + 32768, CLONE_VM, dev) == -1) | ||
| 1509 | err(1, "Creating clone"); | ||
| 1510 | |||
| 1511 | /* We don't need to keep the I/O thread's end of the pipes open. */ | ||
| 1512 | close(vblk->done_fd); | ||
| 1513 | close(vblk->workpipe[0]); | ||
| 1514 | |||
| 1515 | verbose("device %u: virtblock %llu sectors\n", | ||
| 1516 | devices.device_num, cap); | ||
| 1517 | } | ||
| 1290 | /* That's the end of device setup. */ | 1518 | /* That's the end of device setup. */ |
| 1291 | 1519 | ||
| 1292 | /*L:220 Finally we reach the core of the Launcher, which runs the Guest, serves | 1520 | /*L:220 Finally we reach the core of the Launcher, which runs the Guest, serves |
| 1293 | * its input and output, and finally, lays it to rest. */ | 1521 | * its input and output, and finally, lays it to rest. */ |
| 1294 | static void __attribute__((noreturn)) | 1522 | static void __attribute__((noreturn)) run_guest(int lguest_fd) |
| 1295 | run_guest(int lguest_fd, struct device_list *device_list) | ||
| 1296 | { | 1523 | { |
| 1297 | for (;;) { | 1524 | for (;;) { |
| 1298 | unsigned long args[] = { LHREQ_BREAK, 0 }; | 1525 | unsigned long args[] = { LHREQ_BREAK, 0 }; |
| 1299 | unsigned long arr[2]; | 1526 | unsigned long notify_addr; |
| 1300 | int readval; | 1527 | int readval; |
| 1301 | 1528 | ||
| 1302 | /* We read from the /dev/lguest device to run the Guest. */ | 1529 | /* We read from the /dev/lguest device to run the Guest. */ |
| 1303 | readval = read(lguest_fd, arr, sizeof(arr)); | 1530 | readval = read(lguest_fd, ¬ify_addr, sizeof(notify_addr)); |
| 1304 | 1531 | ||
| 1305 | /* The read can only really return sizeof(arr) (the Guest did a | 1532 | /* One unsigned long means the Guest did HCALL_NOTIFY */ |
| 1306 | * SEND_DMA to us), or an error. */ | 1533 | if (readval == sizeof(notify_addr)) { |
| 1307 | 1534 | verbose("Notify on address %#lx\n", notify_addr); | |
| 1308 | /* For a successful read, arr[0] is the address of the "struct | 1535 | handle_output(lguest_fd, notify_addr); |
| 1309 | * lguest_dma", and arr[1] is the key the Guest sent to. */ | ||
| 1310 | if (readval == sizeof(arr)) { | ||
| 1311 | handle_output(lguest_fd, arr[0], arr[1], device_list); | ||
| 1312 | continue; | 1536 | continue; |
| 1313 | /* ENOENT means the Guest died. Reading tells us why. */ | 1537 | /* ENOENT means the Guest died. Reading tells us why. */ |
| 1314 | } else if (errno == ENOENT) { | 1538 | } else if (errno == ENOENT) { |
| @@ -1322,7 +1546,7 @@ run_guest(int lguest_fd, struct device_list *device_list) | |||
| 1322 | 1546 | ||
| 1323 | /* Service input, then unset the BREAK which releases | 1547 | /* Service input, then unset the BREAK which releases |
| 1324 | * the Waker. */ | 1548 | * the Waker. */ |
| 1325 | handle_input(lguest_fd, device_list); | 1549 | handle_input(lguest_fd); |
| 1326 | if (write(lguest_fd, args, sizeof(args)) < 0) | 1550 | if (write(lguest_fd, args, sizeof(args)) < 0) |
| 1327 | err(1, "Resetting break"); | 1551 | err(1, "Resetting break"); |
| 1328 | } | 1552 | } |
| @@ -1336,7 +1560,6 @@ run_guest(int lguest_fd, struct device_list *device_list) | |||
| 1336 | 1560 | ||
| 1337 | static struct option opts[] = { | 1561 | static struct option opts[] = { |
| 1338 | { "verbose", 0, NULL, 'v' }, | 1562 | { "verbose", 0, NULL, 'v' }, |
| 1339 | { "sharenet", 1, NULL, 's' }, | ||
| 1340 | { "tunnet", 1, NULL, 't' }, | 1563 | { "tunnet", 1, NULL, 't' }, |
| 1341 | { "block", 1, NULL, 'b' }, | 1564 | { "block", 1, NULL, 'b' }, |
| 1342 | { "initrd", 1, NULL, 'i' }, | 1565 | { "initrd", 1, NULL, 'i' }, |
| @@ -1345,7 +1568,7 @@ static struct option opts[] = { | |||
| 1345 | static void usage(void) | 1568 | static void usage(void) |
| 1346 | { | 1569 | { |
| 1347 | errx(1, "Usage: lguest [--verbose] " | 1570 | errx(1, "Usage: lguest [--verbose] " |
| 1348 | "[--sharenet=<filename>|--tunnet=(<ipaddr>|bridge:<bridgename>)\n" | 1571 | "[--tunnet=(<ipaddr>|bridge:<bridgename>)\n" |
| 1349 | "|--block=<filename>|--initrd=<filename>]...\n" | 1572 | "|--block=<filename>|--initrd=<filename>]...\n" |
| 1350 | "<mem-in-mb> vmlinux [args...]"); | 1573 | "<mem-in-mb> vmlinux [args...]"); |
| 1351 | } | 1574 | } |
| @@ -1358,8 +1581,6 @@ int main(int argc, char *argv[]) | |||
| 1358 | unsigned long mem = 0, pgdir, start, initrd_size = 0; | 1581 | unsigned long mem = 0, pgdir, start, initrd_size = 0; |
| 1359 | /* A temporary and the /dev/lguest file descriptor. */ | 1582 | /* A temporary and the /dev/lguest file descriptor. */ |
| 1360 | int i, c, lguest_fd; | 1583 | int i, c, lguest_fd; |
| 1361 | /* The list of Guest devices, based on command line arguments. */ | ||
| 1362 | struct device_list device_list; | ||
| 1363 | /* The boot information for the Guest. */ | 1584 | /* The boot information for the Guest. */ |
| 1364 | void *boot; | 1585 | void *boot; |
| 1365 | /* If they specify an initrd file to load. */ | 1586 | /* If they specify an initrd file to load. */ |
| @@ -1369,11 +1590,12 @@ int main(int argc, char *argv[]) | |||
| 1369 | * device receive input from a file descriptor, we keep an fdset | 1590 | * device receive input from a file descriptor, we keep an fdset |
| 1370 | * (infds) and the maximum fd number (max_infd) with the head of the | 1591 | * (infds) and the maximum fd number (max_infd) with the head of the |
| 1371 | * list. We also keep a pointer to the last device, for easy appending | 1592 | * list. We also keep a pointer to the last device, for easy appending |
| 1372 | * to the list. */ | 1593 | * to the list. Finally, we keep the next interrupt number to hand out |
| 1373 | device_list.max_infd = -1; | 1594 | * (1: remember that 0 is used by the timer). */ |
| 1374 | device_list.dev = NULL; | 1595 | FD_ZERO(&devices.infds); |
| 1375 | device_list.lastdev = &device_list.dev; | 1596 | devices.max_infd = -1; |
| 1376 | FD_ZERO(&device_list.infds); | 1597 | devices.lastdev = &devices.dev; |
| 1598 | devices.next_irq = 1; | ||
| 1377 | 1599 | ||
| 1378 | /* We need to know how much memory so we can set up the device | 1600 | /* We need to know how much memory so we can set up the device |
| 1379 | * descriptor and memory pages for the devices as we parse the command | 1601 | * descriptor and memory pages for the devices as we parse the command |
| @@ -1390,7 +1612,7 @@ int main(int argc, char *argv[]) | |||
| 1390 | + DEVICE_PAGES); | 1612 | + DEVICE_PAGES); |
| 1391 | guest_limit = mem; | 1613 | guest_limit = mem; |
| 1392 | guest_max = mem + DEVICE_PAGES*getpagesize(); | 1614 | guest_max = mem + DEVICE_PAGES*getpagesize(); |
| 1393 | device_list.descs = get_pages(1); | 1615 | devices.descpage = get_pages(1); |
| 1394 | break; | 1616 | break; |
| 1395 | } | 1617 | } |
| 1396 | } | 1618 | } |
| @@ -1401,14 +1623,11 @@ int main(int argc, char *argv[]) | |||
| 1401 | case 'v': | 1623 | case 'v': |
| 1402 | verbose = true; | 1624 | verbose = true; |
| 1403 | break; | 1625 | break; |
| 1404 | case 's': | ||
| 1405 | setup_net_file(optarg, &device_list); | ||
| 1406 | break; | ||
| 1407 | case 't': | 1626 | case 't': |
| 1408 | setup_tun_net(optarg, &device_list); | 1627 | setup_tun_net(optarg); |
| 1409 | break; | 1628 | break; |
| 1410 | case 'b': | 1629 | case 'b': |
| 1411 | setup_block_file(optarg, &device_list); | 1630 | setup_block_file(optarg); |
| 1412 | break; | 1631 | break; |
| 1413 | case 'i': | 1632 | case 'i': |
| 1414 | initrd_name = optarg; | 1633 | initrd_name = optarg; |
| @@ -1426,7 +1645,7 @@ int main(int argc, char *argv[]) | |||
| 1426 | verbose("Guest base is at %p\n", guest_base); | 1645 | verbose("Guest base is at %p\n", guest_base); |
| 1427 | 1646 | ||
| 1428 | /* We always have a console device */ | 1647 | /* We always have a console device */ |
| 1429 | setup_console(&device_list); | 1648 | setup_console(); |
| 1430 | 1649 | ||
| 1431 | /* Now we load the kernel */ | 1650 | /* Now we load the kernel */ |
| 1432 | start = load_kernel(open_or_die(argv[optind+1], O_RDONLY)); | 1651 | start = load_kernel(open_or_die(argv[optind+1], O_RDONLY)); |
| @@ -1468,10 +1687,10 @@ int main(int argc, char *argv[]) | |||
| 1468 | /* We fork off a child process, which wakes the Launcher whenever one | 1687 | /* We fork off a child process, which wakes the Launcher whenever one |
| 1469 | * of the input file descriptors needs attention. Otherwise we would | 1688 | * of the input file descriptors needs attention. Otherwise we would |
| 1470 | * run the Guest until it tries to output something. */ | 1689 | * run the Guest until it tries to output something. */ |
| 1471 | waker_fd = setup_waker(lguest_fd, &device_list); | 1690 | waker_fd = setup_waker(lguest_fd); |
| 1472 | 1691 | ||
| 1473 | /* Finally, run the Guest. This doesn't return. */ | 1692 | /* Finally, run the Guest. This doesn't return. */ |
| 1474 | run_guest(lguest_fd, &device_list); | 1693 | run_guest(lguest_fd); |
| 1475 | } | 1694 | } |
| 1476 | /*:*/ | 1695 | /*:*/ |
| 1477 | 1696 | ||
