aboutsummaryrefslogtreecommitdiffstats
path: root/tools/lguest
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2015-02-13 01:43:44 -0500
committerRusty Russell <rusty@rustcorp.com.au>2015-02-13 01:45:53 -0500
commit1e1c17a7a2e5c585926eefffbea8a61d7a03a247 (patch)
tree1b47fdc5f3a65e89e11d179930a3cdbbecc14e01 /tools/lguest
parent17c56d6de8e809ac57bf4c93d504f5336eb03dd1 (diff)
tools/lguest: use common error macros in the example launcher.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'tools/lguest')
-rw-r--r--tools/lguest/lguest.c206
1 files changed, 105 insertions, 101 deletions
diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c
index 70ee62a0eb9a..eebe94b84e8c 100644
--- a/tools/lguest/lguest.c
+++ b/tools/lguest/lguest.c
@@ -252,6 +252,16 @@ static struct termios orig_term;
252#define le32_to_cpu(v32) (v32) 252#define le32_to_cpu(v32) (v32)
253#define le64_to_cpu(v64) (v64) 253#define le64_to_cpu(v64) (v64)
254 254
255/*
256 * A real device would ignore weird/non-compliant driver behaviour. We
257 * stop and flag it, to help debugging Linux problems.
258 */
259#define bad_driver(d, fmt, ...) \
260 errx(1, "%s: bad driver: " fmt, (d)->name, ## __VA_ARGS__)
261#define bad_driver_vq(vq, fmt, ...) \
262 errx(1, "%s vq %s: bad driver: " fmt, (vq)->dev->name, \
263 vq->name, ## __VA_ARGS__)
264
255/* Is this iovec empty? */ 265/* Is this iovec empty? */
256static bool iov_empty(const struct iovec iov[], unsigned int num_iov) 266static bool iov_empty(const struct iovec iov[], unsigned int num_iov)
257{ 267{
@@ -264,7 +274,8 @@ static bool iov_empty(const struct iovec iov[], unsigned int num_iov)
264} 274}
265 275
266/* Take len bytes from the front of this iovec. */ 276/* Take len bytes from the front of this iovec. */
267static void iov_consume(struct iovec iov[], unsigned num_iov, 277static void iov_consume(struct device *d,
278 struct iovec iov[], unsigned num_iov,
268 void *dest, unsigned len) 279 void *dest, unsigned len)
269{ 280{
270 unsigned int i; 281 unsigned int i;
@@ -282,7 +293,7 @@ static void iov_consume(struct iovec iov[], unsigned num_iov,
282 len -= used; 293 len -= used;
283 } 294 }
284 if (len != 0) 295 if (len != 0)
285 errx(1, "iovec too short!"); 296 bad_driver(d, "iovec too short!");
286} 297}
287 298
288/*L:100 299/*L:100
@@ -618,7 +629,8 @@ static void tell_kernel(unsigned long start)
618 * we have a convenient routine which checks it and exits with an error message 629 * we have a convenient routine which checks it and exits with an error message
619 * if something funny is going on: 630 * if something funny is going on:
620 */ 631 */
621static void *_check_pointer(unsigned long addr, unsigned int size, 632static void *_check_pointer(struct device *d,
633 unsigned long addr, unsigned int size,
622 unsigned int line) 634 unsigned int line)
623{ 635{
624 /* 636 /*
@@ -626,7 +638,8 @@ static void *_check_pointer(unsigned long addr, unsigned int size,
626 * or addr + size wraps around. 638 * or addr + size wraps around.
627 */ 639 */
628 if ((addr + size) > guest_limit || (addr + size) < addr) 640 if ((addr + size) > guest_limit || (addr + size) < addr)
629 errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr); 641 bad_driver(d, "%s:%i: Invalid address %#lx",
642 __FILE__, line, addr);
630 /* 643 /*
631 * We return a pointer for the caller's convenience, now we know it's 644 * We return a pointer for the caller's convenience, now we know it's
632 * safe to use. 645 * safe to use.
@@ -634,14 +647,14 @@ static void *_check_pointer(unsigned long addr, unsigned int size,
634 return from_guest_phys(addr); 647 return from_guest_phys(addr);
635} 648}
636/* A macro which transparently hands the line number to the real function. */ 649/* A macro which transparently hands the line number to the real function. */
637#define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) 650#define check_pointer(d,addr,size) _check_pointer(d, addr, size, __LINE__)
638 651
639/* 652/*
640 * Each buffer in the virtqueues is actually a chain of descriptors. This 653 * Each buffer in the virtqueues is actually a chain of descriptors. This
641 * function returns the next descriptor in the chain, or vq->vring.num if we're 654 * function returns the next descriptor in the chain, or vq->vring.num if we're
642 * at the end. 655 * at the end.
643 */ 656 */
644static unsigned next_desc(struct vring_desc *desc, 657static unsigned next_desc(struct device *d, struct vring_desc *desc,
645 unsigned int i, unsigned int max) 658 unsigned int i, unsigned int max)
646{ 659{
647 unsigned int next; 660 unsigned int next;
@@ -656,7 +669,7 @@ static unsigned next_desc(struct vring_desc *desc,
656 wmb(); 669 wmb();
657 670
658 if (next >= max) 671 if (next >= max)
659 errx(1, "Desc next is %u", next); 672 bad_driver(d, "Desc next is %u", next);
660 673
661 return next; 674 return next;
662} 675}
@@ -681,8 +694,7 @@ static void trigger_irq(struct virtqueue *vq)
681 * The driver MUST set flags to 0 or 1. 694 * The driver MUST set flags to 0 or 1.
682 */ 695 */
683 if (vq->vring.avail->flags > 1) 696 if (vq->vring.avail->flags > 1)
684 errx(1, "%s: avail->flags = %u\n", 697 bad_driver_vq(vq, "avail->flags = %u\n", vq->vring.avail->flags);
685 vq->dev->name, vq->vring.avail->flags);
686 698
687 /* 699 /*
688 * 2.4.7.2: 700 * 2.4.7.2:
@@ -769,8 +781,8 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
769 781
770 /* Check it isn't doing very strange things with descriptor numbers. */ 782 /* Check it isn't doing very strange things with descriptor numbers. */
771 if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num) 783 if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num)
772 errx(1, "Guest moved used index from %u to %u", 784 bad_driver_vq(vq, "Guest moved used index from %u to %u",
773 last_avail, vq->vring.avail->idx); 785 last_avail, vq->vring.avail->idx);
774 786
775 /* 787 /*
776 * Make sure we read the descriptor number *after* we read the ring 788 * Make sure we read the descriptor number *after* we read the ring
@@ -787,7 +799,7 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
787 799
788 /* If their number is silly, that's a fatal mistake. */ 800 /* If their number is silly, that's a fatal mistake. */
789 if (head >= vq->vring.num) 801 if (head >= vq->vring.num)
790 errx(1, "Guest says index %u is available", head); 802 bad_driver_vq(vq, "Guest says index %u is available", head);
791 803
792 /* When we start there are none of either input nor output. */ 804 /* When we start there are none of either input nor output. */
793 *out_num = *in_num = 0; 805 *out_num = *in_num = 0;
@@ -817,8 +829,7 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
817 */ 829 */
818 if (!(vq->dev->features_accepted & 830 if (!(vq->dev->features_accepted &
819 (1<<VIRTIO_RING_F_INDIRECT_DESC))) 831 (1<<VIRTIO_RING_F_INDIRECT_DESC)))
820 errx(1, "%s: vq indirect not negotiated", 832 bad_driver_vq(vq, "vq indirect not negotiated");
821 vq->dev->name);
822 833
823 /* 834 /*
824 * 2.4.5.3.1: 835 * 2.4.5.3.1:
@@ -828,8 +839,7 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
828 * table per descriptor). 839 * table per descriptor).
829 */ 840 */
830 if (desc != vq->vring.desc) 841 if (desc != vq->vring.desc)
831 errx(1, "%s: Indirect within indirect", 842 bad_driver_vq(vq, "Indirect within indirect");
832 vq->dev->name);
833 843
834 /* 844 /*
835 * Proposed update VIRTIO-134 spells this out: 845 * Proposed update VIRTIO-134 spells this out:
@@ -838,11 +848,11 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
838 * and VIRTQ_DESC_F_NEXT in flags. 848 * and VIRTQ_DESC_F_NEXT in flags.
839 */ 849 */
840 if (desc[i].flags & VRING_DESC_F_NEXT) 850 if (desc[i].flags & VRING_DESC_F_NEXT)
841 errx(1, "%s: indirect and next together", 851 bad_driver_vq(vq, "indirect and next together");
842 vq->dev->name);
843 852
844 if (desc[i].len % sizeof(struct vring_desc)) 853 if (desc[i].len % sizeof(struct vring_desc))
845 errx(1, "Invalid size for indirect buffer table"); 854 bad_driver_vq(vq,
855 "Invalid size for indirect table");
846 /* 856 /*
847 * 2.4.5.3.2: 857 * 2.4.5.3.2:
848 * 858 *
@@ -854,7 +864,7 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
854 */ 864 */
855 865
856 max = desc[i].len / sizeof(struct vring_desc); 866 max = desc[i].len / sizeof(struct vring_desc);
857 desc = check_pointer(desc[i].addr, desc[i].len); 867 desc = check_pointer(vq->dev, desc[i].addr, desc[i].len);
858 i = 0; 868 i = 0;
859 869
860 /* 2.4.5.3.1: 870 /* 2.4.5.3.1:
@@ -863,14 +873,14 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
863 * than the Queue Size of the device. 873 * than the Queue Size of the device.
864 */ 874 */
865 if (max > vq->pci_config.queue_size) 875 if (max > vq->pci_config.queue_size)
866 errx(1, "%s: indirect has too many entries", 876 bad_driver_vq(vq,
867 vq->dev->name); 877 "indirect has too many entries");
868 } 878 }
869 879
870 /* Grab the first descriptor, and check it's OK. */ 880 /* Grab the first descriptor, and check it's OK. */
871 iov[*out_num + *in_num].iov_len = desc[i].len; 881 iov[*out_num + *in_num].iov_len = desc[i].len;
872 iov[*out_num + *in_num].iov_base 882 iov[*out_num + *in_num].iov_base
873 = check_pointer(desc[i].addr, desc[i].len); 883 = check_pointer(vq->dev, desc[i].addr, desc[i].len);
874 /* If this is an input descriptor, increment that count. */ 884 /* If this is an input descriptor, increment that count. */
875 if (desc[i].flags & VRING_DESC_F_WRITE) 885 if (desc[i].flags & VRING_DESC_F_WRITE)
876 (*in_num)++; 886 (*in_num)++;
@@ -880,14 +890,15 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
880 * to come before any input descriptors. 890 * to come before any input descriptors.
881 */ 891 */
882 if (*in_num) 892 if (*in_num)
883 errx(1, "Descriptor has out after in"); 893 bad_driver_vq(vq,
894 "Descriptor has out after in");
884 (*out_num)++; 895 (*out_num)++;
885 } 896 }
886 897
887 /* If we've got too many, that implies a descriptor loop. */ 898 /* If we've got too many, that implies a descriptor loop. */
888 if (*out_num + *in_num > max) 899 if (*out_num + *in_num > max)
889 errx(1, "Looped descriptor"); 900 bad_driver_vq(vq, "Looped descriptor");
890 } while ((i = next_desc(desc, i, max)) != max); 901 } while ((i = next_desc(vq->dev, desc, i, max)) != max);
891 902
892 return head; 903 return head;
893} 904}
@@ -944,7 +955,7 @@ static void console_input(struct virtqueue *vq)
944 /* Make sure there's a descriptor available. */ 955 /* Make sure there's a descriptor available. */
945 head = wait_for_vq_desc(vq, iov, &out_num, &in_num); 956 head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
946 if (out_num) 957 if (out_num)
947 errx(1, "Output buffers in console in queue?"); 958 bad_driver_vq(vq, "Output buffers in console in queue?");
948 959
949 /* Read into it. This is where we usually wait. */ 960 /* Read into it. This is where we usually wait. */
950 len = readv(STDIN_FILENO, iov, in_num); 961 len = readv(STDIN_FILENO, iov, in_num);
@@ -997,7 +1008,7 @@ static void console_output(struct virtqueue *vq)
997 /* We usually wait in here, for the Guest to give us something. */ 1008 /* We usually wait in here, for the Guest to give us something. */
998 head = wait_for_vq_desc(vq, iov, &out, &in); 1009 head = wait_for_vq_desc(vq, iov, &out, &in);
999 if (in) 1010 if (in)
1000 errx(1, "Input buffers in console output queue?"); 1011 bad_driver_vq(vq, "Input buffers in console output queue?");
1001 1012
1002 /* writev can return a partial write, so we loop here. */ 1013 /* writev can return a partial write, so we loop here. */
1003 while (!iov_empty(iov, out)) { 1014 while (!iov_empty(iov, out)) {
@@ -1006,7 +1017,7 @@ static void console_output(struct virtqueue *vq)
1006 warn("Write to stdout gave %i (%d)", len, errno); 1017 warn("Write to stdout gave %i (%d)", len, errno);
1007 break; 1018 break;
1008 } 1019 }
1009 iov_consume(iov, out, NULL, len); 1020 iov_consume(vq->dev, iov, out, NULL, len);
1010 } 1021 }
1011 1022
1012 /* 1023 /*
@@ -1035,7 +1046,7 @@ static void net_output(struct virtqueue *vq)
1035 /* We usually wait in here for the Guest to give us a packet. */ 1046 /* We usually wait in here for the Guest to give us a packet. */
1036 head = wait_for_vq_desc(vq, iov, &out, &in); 1047 head = wait_for_vq_desc(vq, iov, &out, &in);
1037 if (in) 1048 if (in)
1038 errx(1, "Input buffers in net output queue?"); 1049 bad_driver_vq(vq, "Input buffers in net output queue?");
1039 /* 1050 /*
1040 * Send the whole thing through to /dev/net/tun. It expects the exact 1051 * Send the whole thing through to /dev/net/tun. It expects the exact
1041 * same format: what a coincidence! 1052 * same format: what a coincidence!
@@ -1083,7 +1094,7 @@ static void net_input(struct virtqueue *vq)
1083 */ 1094 */
1084 head = wait_for_vq_desc(vq, iov, &out, &in); 1095 head = wait_for_vq_desc(vq, iov, &out, &in);
1085 if (out) 1096 if (out)
1086 errx(1, "Output buffers in net input queue?"); 1097 bad_driver_vq(vq, "Output buffers in net input queue?");
1087 1098
1088 /* 1099 /*
1089 * If it looks like we'll block reading from the tun device, send them 1100 * If it looks like we'll block reading from the tun device, send them
@@ -1466,7 +1477,8 @@ static void pci_data_ioread(u16 port, u32 mask, u32 *val)
1466 */ 1477 */
1467 /* Must be bar 0 */ 1478 /* Must be bar 0 */
1468 if (!valid_bar_access(d, &d->config.cfg_access)) 1479 if (!valid_bar_access(d, &d->config.cfg_access))
1469 errx(1, "Invalid cfg_access to bar%u, offset %u len %u", 1480 bad_driver(d,
1481 "Invalid cfg_access to bar%u, offset %u len %u",
1470 d->config.cfg_access.cap.bar, 1482 d->config.cfg_access.cap.bar,
1471 d->config.cfg_access.cap.offset, 1483 d->config.cfg_access.cap.offset,
1472 d->config.cfg_access.cap.length); 1484 d->config.cfg_access.cap.length);
@@ -1785,7 +1797,7 @@ static void check_virtqueue(struct device *d, struct virtqueue *vq)
1785 if (vq->pci_config.queue_desc_hi 1797 if (vq->pci_config.queue_desc_hi
1786 || vq->pci_config.queue_avail_hi 1798 || vq->pci_config.queue_avail_hi
1787 || vq->pci_config.queue_used_hi) 1799 || vq->pci_config.queue_used_hi)
1788 errx(1, "%s: invalid 64-bit queue address", d->name); 1800 bad_driver_vq(vq, "invalid 64-bit queue address");
1789 1801
1790 /* 1802 /*
1791 * 2.4.1: 1803 * 2.4.1:
@@ -1797,17 +1809,20 @@ static void check_virtqueue(struct device *d, struct virtqueue *vq)
1797 if (vq->pci_config.queue_desc_lo % 16 1809 if (vq->pci_config.queue_desc_lo % 16
1798 || vq->pci_config.queue_avail_lo % 2 1810 || vq->pci_config.queue_avail_lo % 2
1799 || vq->pci_config.queue_used_lo % 4) 1811 || vq->pci_config.queue_used_lo % 4)
1800 errx(1, "%s: invalid alignment in queue addresses", d->name); 1812 bad_driver_vq(vq, "invalid alignment in queue addresses");
1801 1813
1802 /* Initialize the virtqueue and check they're all in range. */ 1814 /* Initialize the virtqueue and check they're all in range. */
1803 vq->vring.num = vq->pci_config.queue_size; 1815 vq->vring.num = vq->pci_config.queue_size;
1804 vq->vring.desc = check_pointer(vq->pci_config.queue_desc_lo, 1816 vq->vring.desc = check_pointer(vq->dev,
1817 vq->pci_config.queue_desc_lo,
1805 sizeof(*vq->vring.desc) * vq->vring.num); 1818 sizeof(*vq->vring.desc) * vq->vring.num);
1806 vq->vring.avail = check_pointer(vq->pci_config.queue_avail_lo, 1819 vq->vring.avail = check_pointer(vq->dev,
1820 vq->pci_config.queue_avail_lo,
1807 sizeof(*vq->vring.avail) 1821 sizeof(*vq->vring.avail)
1808 + (sizeof(vq->vring.avail->ring[0]) 1822 + (sizeof(vq->vring.avail->ring[0])
1809 * vq->vring.num)); 1823 * vq->vring.num));
1810 vq->vring.used = check_pointer(vq->pci_config.queue_used_lo, 1824 vq->vring.used = check_pointer(vq->dev,
1825 vq->pci_config.queue_used_lo,
1811 sizeof(*vq->vring.used) 1826 sizeof(*vq->vring.used)
1812 + (sizeof(vq->vring.used->ring[0]) 1827 + (sizeof(vq->vring.used->ring[0])
1813 * vq->vring.num)); 1828 * vq->vring.num));
@@ -1819,8 +1834,8 @@ static void check_virtqueue(struct device *d, struct virtqueue *vq)
1819 * when allocating the used ring. 1834 * when allocating the used ring.
1820 */ 1835 */
1821 if (vq->vring.used->flags != 0) 1836 if (vq->vring.used->flags != 0)
1822 errx(1, "%s: invalid initial used.flags %#x", 1837 bad_driver_vq(vq, "invalid initial used.flags %#x",
1823 d->name, vq->vring.used->flags); 1838 vq->vring.used->flags);
1824} 1839}
1825 1840
1826static void start_virtqueue(struct virtqueue *vq) 1841static void start_virtqueue(struct virtqueue *vq)
@@ -1877,8 +1892,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
1877 goto feature_write_through32; 1892 goto feature_write_through32;
1878 case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select): 1893 case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select):
1879 if (val > 1) 1894 if (val > 1)
1880 errx(1, "%s: Unexpected driver select %u", 1895 bad_driver(d, "Unexpected driver select %u", val);
1881 d->name, val);
1882 goto feature_write_through32; 1896 goto feature_write_through32;
1883 case offsetof(struct virtio_pci_mmio, cfg.guest_feature): 1897 case offsetof(struct virtio_pci_mmio, cfg.guest_feature):
1884 if (d->mmio->cfg.guest_feature_select == 0) { 1898 if (d->mmio->cfg.guest_feature_select == 0) {
@@ -1896,8 +1910,8 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
1896 * not offer 1910 * not offer
1897 */ 1911 */
1898 if (d->features_accepted & ~d->features) 1912 if (d->features_accepted & ~d->features)
1899 errx(1, "%s: over-accepted features %#llx of %#llx", 1913 bad_driver(d, "over-accepted features %#llx of %#llx",
1900 d->name, d->features_accepted, d->features); 1914 d->features_accepted, d->features);
1901 goto feature_write_through32; 1915 goto feature_write_through32;
1902 case offsetof(struct virtio_pci_mmio, cfg.device_status): { 1916 case offsetof(struct virtio_pci_mmio, cfg.device_status): {
1903 u8 prev; 1917 u8 prev;
@@ -1916,8 +1930,8 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
1916 1930
1917 /* 2.1.1: The driver MUST NOT clear a device status bit. */ 1931 /* 2.1.1: The driver MUST NOT clear a device status bit. */
1918 if (d->mmio->cfg.device_status & ~val) 1932 if (d->mmio->cfg.device_status & ~val)
1919 errx(1, "%s: unset of device status bit %#x -> %#x", 1933 bad_driver(d, "unset of device status bit %#x -> %#x",
1920 d->name, d->mmio->cfg.device_status, val); 1934 d->mmio->cfg.device_status, val);
1921 1935
1922 /* 1936 /*
1923 * 2.1.2: 1937 * 2.1.2:
@@ -1970,12 +1984,12 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
1970 case VIRTIO_CONFIG_S_ACKNOWLEDGE: 1984 case VIRTIO_CONFIG_S_ACKNOWLEDGE:
1971 break; 1985 break;
1972 default: 1986 default:
1973 errx(1, "%s: unknown device status bit %#x -> %#x", 1987 bad_driver(d, "unknown device status bit %#x -> %#x",
1974 d->name, d->mmio->cfg.device_status, val); 1988 d->mmio->cfg.device_status, val);
1975 } 1989 }
1976 if (d->mmio->cfg.device_status != prev) 1990 if (d->mmio->cfg.device_status != prev)
1977 errx(1, "%s: unexpected status transition %#x -> %#x", 1991 bad_driver(d, "unexpected status transition %#x -> %#x",
1978 d->name, d->mmio->cfg.device_status, val); 1992 d->mmio->cfg.device_status, val);
1979 1993
1980 /* If they just wrote FEATURES_OK, we make sure they read */ 1994 /* If they just wrote FEATURES_OK, we make sure they read */
1981 switch (val & ~d->mmio->cfg.device_status) { 1995 switch (val & ~d->mmio->cfg.device_status) {
@@ -1984,8 +1998,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
1984 break; 1998 break;
1985 case VIRTIO_CONFIG_S_DRIVER_OK: 1999 case VIRTIO_CONFIG_S_DRIVER_OK:
1986 if (d->wrote_features_ok) 2000 if (d->wrote_features_ok)
1987 errx(1, "%s: did not re-read FEATURES_OK", 2001 bad_driver(d, "did not re-read FEATURES_OK");
1988 d->name);
1989 break; 2002 break;
1990 } 2003 }
1991 goto write_through8; 2004 goto write_through8;
@@ -2017,14 +2030,12 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
2017 * to queue_size. 2030 * to queue_size.
2018 */ 2031 */
2019 if (val & (val-1)) 2032 if (val & (val-1))
2020 errx(1, "%s: invalid queue size %u\n", d->name, val); 2033 bad_driver(d, "invalid queue size %u", val);
2021 if (d->mmio->cfg.queue_enable) 2034 if (d->mmio->cfg.queue_enable)
2022 errx(1, "%s: changing queue size on live device", 2035 bad_driver(d, "changing queue size on live device");
2023 d->name);
2024 goto write_through16; 2036 goto write_through16;
2025 case offsetof(struct virtio_pci_mmio, cfg.queue_msix_vector): 2037 case offsetof(struct virtio_pci_mmio, cfg.queue_msix_vector):
2026 errx(1, "%s: attempt to set MSIX vector to %u", 2038 bad_driver(d, "attempt to set MSIX vector to %u", val);
2027 d->name, val);
2028 case offsetof(struct virtio_pci_mmio, cfg.queue_enable): { 2039 case offsetof(struct virtio_pci_mmio, cfg.queue_enable): {
2029 struct virtqueue *vq = vq_by_num(d, d->mmio->cfg.queue_select); 2040 struct virtqueue *vq = vq_by_num(d, d->mmio->cfg.queue_select);
2030 2041
@@ -2034,7 +2045,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
2034 * The driver MUST NOT write a 0 to queue_enable. 2045 * The driver MUST NOT write a 0 to queue_enable.
2035 */ 2046 */
2036 if (val != 1) 2047 if (val != 1)
2037 errx(1, "%s: setting queue_enable to %u", d->name, val); 2048 bad_driver(d, "setting queue_enable to %u", val);
2038 2049
2039 /* 2050 /*
2040 * 3.1.1: 2051 * 3.1.1:
@@ -2049,7 +2060,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
2049 * they should have done that before setting DRIVER_OK. 2060 * they should have done that before setting DRIVER_OK.
2050 */ 2061 */
2051 if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK) 2062 if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK)
2052 errx(1, "%s: enabling vs after DRIVER_OK", d->name); 2063 bad_driver(d, "enabling vq after DRIVER_OK");
2053 2064
2054 d->mmio->cfg.queue_enable = val; 2065 d->mmio->cfg.queue_enable = val;
2055 save_vq_config(&d->mmio->cfg, vq); 2066 save_vq_config(&d->mmio->cfg, vq);
@@ -2057,7 +2068,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
2057 goto write_through16; 2068 goto write_through16;
2058 } 2069 }
2059 case offsetof(struct virtio_pci_mmio, cfg.queue_notify_off): 2070 case offsetof(struct virtio_pci_mmio, cfg.queue_notify_off):
2060 errx(1, "%s: attempt to write to queue_notify_off", d->name); 2071 bad_driver(d, "attempt to write to queue_notify_off");
2061 case offsetof(struct virtio_pci_mmio, cfg.queue_desc_lo): 2072 case offsetof(struct virtio_pci_mmio, cfg.queue_desc_lo):
2062 case offsetof(struct virtio_pci_mmio, cfg.queue_desc_hi): 2073 case offsetof(struct virtio_pci_mmio, cfg.queue_desc_hi):
2063 case offsetof(struct virtio_pci_mmio, cfg.queue_avail_lo): 2074 case offsetof(struct virtio_pci_mmio, cfg.queue_avail_lo):
@@ -2071,8 +2082,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
2071 * enabling the virtqueue with queue_enable. 2082 * enabling the virtqueue with queue_enable.
2072 */ 2083 */
2073 if (d->mmio->cfg.queue_enable) 2084 if (d->mmio->cfg.queue_enable)
2074 errx(1, "%s: changing queue on live device", 2085 bad_driver(d, "changing queue on live device");
2075 d->name);
2076 2086
2077 /* 2087 /*
2078 * 3.1.1: 2088 * 3.1.1:
@@ -2083,26 +2093,25 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
2083 * accept new feature bits after this step. 2093 * accept new feature bits after this step.
2084 */ 2094 */
2085 if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK)) 2095 if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK))
2086 errx(1, "%s: enabling vs before FEATURES_OK", d->name); 2096 bad_driver(d, "setting up vq before FEATURES_OK");
2087 2097
2088 /* 2098 /*
2089 * 6. Re-read device status to ensure the FEATURES_OK bit is 2099 * 6. Re-read device status to ensure the FEATURES_OK bit is
2090 * still set... 2100 * still set...
2091 */ 2101 */
2092 if (d->wrote_features_ok) 2102 if (d->wrote_features_ok)
2093 errx(1, "%s: didn't re-read FEATURES_OK before setup", 2103 bad_driver(d, "didn't re-read FEATURES_OK before setup");
2094 d->name);
2095 2104
2096 goto write_through32; 2105 goto write_through32;
2097 case offsetof(struct virtio_pci_mmio, notify): 2106 case offsetof(struct virtio_pci_mmio, notify):
2098 vq = vq_by_num(d, val); 2107 vq = vq_by_num(d, val);
2099 if (!vq) 2108 if (!vq)
2100 errx(1, "Invalid vq notification on %u", val); 2109 bad_driver(d, "Invalid vq notification on %u", val);
2101 /* Notify the process handling this vq by adding 1 to eventfd */ 2110 /* Notify the process handling this vq by adding 1 to eventfd */
2102 write(vq->eventfd, "\1\0\0\0\0\0\0\0", 8); 2111 write(vq->eventfd, "\1\0\0\0\0\0\0\0", 8);
2103 goto write_through16; 2112 goto write_through16;
2104 case offsetof(struct virtio_pci_mmio, isr): 2113 case offsetof(struct virtio_pci_mmio, isr):
2105 errx(1, "%s: Unexpected write to isr", d->name); 2114 bad_driver(d, "Unexpected write to isr");
2106 /* Weird corner case: write to emerg_wr of console */ 2115 /* Weird corner case: write to emerg_wr of console */
2107 case sizeof(struct virtio_pci_mmio) 2116 case sizeof(struct virtio_pci_mmio)
2108 + offsetof(struct virtio_console_config, emerg_wr): 2117 + offsetof(struct virtio_console_config, emerg_wr):
@@ -2119,7 +2128,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
2119 * The driver MUST NOT write to device_feature, num_queues, 2128 * The driver MUST NOT write to device_feature, num_queues,
2120 * config_generation or queue_notify_off. 2129 * config_generation or queue_notify_off.
2121 */ 2130 */
2122 errx(1, "%s: Unexpected write to offset %u", d->name, off); 2131 bad_driver(d, "Unexpected write to offset %u", off);
2123 } 2132 }
2124 2133
2125feature_write_through32: 2134feature_write_through32:
@@ -2138,11 +2147,9 @@ feature_write_through32:
2138 * accept new feature bits after this step. 2147 * accept new feature bits after this step.
2139 */ 2148 */
2140 if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER)) 2149 if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER))
2141 errx(1, "%s: feature write before VIRTIO_CONFIG_S_DRIVER", 2150 bad_driver(d, "feature write before VIRTIO_CONFIG_S_DRIVER");
2142 d->name);
2143 if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK) 2151 if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK)
2144 errx(1, "%s: feature write after VIRTIO_CONFIG_S_FEATURES_OK", 2152 bad_driver(d, "feature write after VIRTIO_CONFIG_S_FEATURES_OK");
2145 d->name);
2146 2153
2147 /* 2154 /*
2148 * 4.1.3.1: 2155 * 4.1.3.1:
@@ -2153,8 +2160,8 @@ feature_write_through32:
2153 */ 2160 */
2154write_through32: 2161write_through32:
2155 if (mask != 0xFFFFFFFF) { 2162 if (mask != 0xFFFFFFFF) {
2156 errx(1, "%s: non-32-bit write to offset %u (%#x)", 2163 bad_driver(d, "non-32-bit write to offset %u (%#x)",
2157 d->name, off, getreg(eip)); 2164 off, getreg(eip));
2158 return; 2165 return;
2159 } 2166 }
2160 memcpy((char *)d->mmio + off, &val, 4); 2167 memcpy((char *)d->mmio + off, &val, 4);
@@ -2162,15 +2169,15 @@ write_through32:
2162 2169
2163write_through16: 2170write_through16:
2164 if (mask != 0xFFFF) 2171 if (mask != 0xFFFF)
2165 errx(1, "%s: non-16-bit (%#x) write to offset %u (%#x)", 2172 bad_driver(d, "non-16-bit write to offset %u (%#x)",
2166 d->name, mask, off, getreg(eip)); 2173 off, getreg(eip));
2167 memcpy((char *)d->mmio + off, &val, 2); 2174 memcpy((char *)d->mmio + off, &val, 2);
2168 return; 2175 return;
2169 2176
2170write_through8: 2177write_through8:
2171 if (mask != 0xFF) 2178 if (mask != 0xFF)
2172 errx(1, "%s: non-8-bit write to offset %u (%#x)", 2179 bad_driver(d, "non-8-bit write to offset %u (%#x)",
2173 d->name, off, getreg(eip)); 2180 off, getreg(eip));
2174 memcpy((char *)d->mmio + off, &val, 1); 2181 memcpy((char *)d->mmio + off, &val, 1);
2175 return; 2182 return;
2176} 2183}
@@ -2197,11 +2204,11 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask)
2197 * to the device. 2204 * to the device.
2198 */ 2205 */
2199 if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER)) 2206 if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER))
2200 errx(1, "%s: feature read before VIRTIO_CONFIG_S_DRIVER", 2207 bad_driver(d,
2201 d->name); 2208 "feature read before VIRTIO_CONFIG_S_DRIVER");
2202 goto read_through32; 2209 goto read_through32;
2203 case offsetof(struct virtio_pci_mmio, cfg.msix_config): 2210 case offsetof(struct virtio_pci_mmio, cfg.msix_config):
2204 errx(1, "%s: read of msix_config", d->name); 2211 bad_driver(d, "read of msix_config");
2205 case offsetof(struct virtio_pci_mmio, cfg.num_queues): 2212 case offsetof(struct virtio_pci_mmio, cfg.num_queues):
2206 goto read_through16; 2213 goto read_through16;
2207 case offsetof(struct virtio_pci_mmio, cfg.device_status): 2214 case offsetof(struct virtio_pci_mmio, cfg.device_status):
@@ -2229,13 +2236,12 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask)
2229 * DRIVER_OK. 2236 * DRIVER_OK.
2230 */ 2237 */
2231 if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK)) 2238 if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK))
2232 errx(1, "%s: notify before VIRTIO_CONFIG_S_DRIVER_OK", 2239 bad_driver(d, "notify before VIRTIO_CONFIG_S_DRIVER_OK");
2233 d->name);
2234 goto read_through16; 2240 goto read_through16;
2235 case offsetof(struct virtio_pci_mmio, isr): 2241 case offsetof(struct virtio_pci_mmio, isr):
2236 if (mask != 0xFF) 2242 if (mask != 0xFF)
2237 errx(1, "%s: non-8-bit read from offset %u (%#x)", 2243 bad_driver(d, "non-8-bit read from offset %u (%#x)",
2238 d->name, off, getreg(eip)); 2244 off, getreg(eip));
2239 isr = d->mmio->isr; 2245 isr = d->mmio->isr;
2240 /* 2246 /*
2241 * 4.1.4.5.1: 2247 * 4.1.4.5.1:
@@ -2245,13 +2251,11 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask)
2245 d->mmio->isr = 0; 2251 d->mmio->isr = 0;
2246 return isr; 2252 return isr;
2247 case offsetof(struct virtio_pci_mmio, padding): 2253 case offsetof(struct virtio_pci_mmio, padding):
2248 errx(1, "%s: read from padding (%#x)", 2254 bad_driver(d, "read from padding (%#x)", getreg(eip));
2249 d->name, getreg(eip));
2250 default: 2255 default:
2251 /* Read from device config space, beware unaligned overflow */ 2256 /* Read from device config space, beware unaligned overflow */
2252 if (off > d->mmio_size - 4) 2257 if (off > d->mmio_size - 4)
2253 errx(1, "%s: read past end (%#x)", 2258 bad_driver(d, "read past end (%#x)", getreg(eip));
2254 d->name, getreg(eip));
2255 2259
2256 /* 2260 /*
2257 * 3.1.1: 2261 * 3.1.1:
@@ -2266,8 +2270,8 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask)
2266 * that it can support the device before accepting it. 2270 * that it can support the device before accepting it.
2267 */ 2271 */
2268 if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER)) 2272 if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER))
2269 errx(1, "%s: config read before VIRTIO_CONFIG_S_DRIVER", 2273 bad_driver(d,
2270 d->name); 2274 "config read before VIRTIO_CONFIG_S_DRIVER");
2271 2275
2272 if (mask == 0xFFFFFFFF) 2276 if (mask == 0xFFFFFFFF)
2273 goto read_through32; 2277 goto read_through32;
@@ -2286,22 +2290,22 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask)
2286 */ 2290 */
2287read_through32: 2291read_through32:
2288 if (mask != 0xFFFFFFFF) 2292 if (mask != 0xFFFFFFFF)
2289 errx(1, "%s: non-32-bit read to offset %u (%#x)", 2293 bad_driver(d, "non-32-bit read to offset %u (%#x)",
2290 d->name, off, getreg(eip)); 2294 off, getreg(eip));
2291 memcpy(&val, (char *)d->mmio + off, 4); 2295 memcpy(&val, (char *)d->mmio + off, 4);
2292 return val; 2296 return val;
2293 2297
2294read_through16: 2298read_through16:
2295 if (mask != 0xFFFF) 2299 if (mask != 0xFFFF)
2296 errx(1, "%s: non-16-bit read to offset %u (%#x)", 2300 bad_driver(d, "non-16-bit read to offset %u (%#x)",
2297 d->name, off, getreg(eip)); 2301 off, getreg(eip));
2298 memcpy(&val, (char *)d->mmio + off, 2); 2302 memcpy(&val, (char *)d->mmio + off, 2);
2299 return val; 2303 return val;
2300 2304
2301read_through8: 2305read_through8:
2302 if (mask != 0xFF) 2306 if (mask != 0xFF)
2303 errx(1, "%s: non-8-bit read to offset %u (%#x)", 2307 bad_driver(d, "non-8-bit read to offset %u (%#x)",
2304 d->name, off, getreg(eip)); 2308 off, getreg(eip));
2305 memcpy(&val, (char *)d->mmio + off, 1); 2309 memcpy(&val, (char *)d->mmio + off, 1);
2306 return val; 2310 return val;
2307} 2311}
@@ -2943,7 +2947,7 @@ static void blk_request(struct virtqueue *vq)
2943 head = wait_for_vq_desc(vq, iov, &out_num, &in_num); 2947 head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
2944 2948
2945 /* Copy the output header from the front of the iov (adjusts iov) */ 2949 /* Copy the output header from the front of the iov (adjusts iov) */
2946 iov_consume(iov, out_num, &out, sizeof(out)); 2950 iov_consume(vq->dev, iov, out_num, &out, sizeof(out));
2947 2951
2948 /* Find and trim end of iov input array, for our status byte. */ 2952 /* Find and trim end of iov input array, for our status byte. */
2949 in = NULL; 2953 in = NULL;
@@ -2955,7 +2959,7 @@ static void blk_request(struct virtqueue *vq)
2955 } 2959 }
2956 } 2960 }
2957 if (!in) 2961 if (!in)
2958 errx(1, "Bad virtblk cmd with no room for status"); 2962 bad_driver_vq(vq, "Bad virtblk cmd with no room for status");
2959 2963
2960 /* 2964 /*
2961 * For historical reasons, block operations are expressed in 512 byte 2965 * For historical reasons, block operations are expressed in 512 byte
@@ -2985,7 +2989,7 @@ static void blk_request(struct virtqueue *vq)
2985 /* Trim it back to the correct length */ 2989 /* Trim it back to the correct length */
2986 ftruncate64(vblk->fd, vblk->len); 2990 ftruncate64(vblk->fd, vblk->len);
2987 /* Die, bad Guest, die. */ 2991 /* Die, bad Guest, die. */
2988 errx(1, "Write past end %llu+%u", off, ret); 2992 bad_driver_vq(vq, "Write past end %llu+%u", off, ret);
2989 } 2993 }
2990 2994
2991 wlen = sizeof(*in); 2995 wlen = sizeof(*in);
@@ -3078,7 +3082,7 @@ static void rng_input(struct virtqueue *vq)
3078 /* First we need a buffer from the Guests's virtqueue. */ 3082 /* First we need a buffer from the Guests's virtqueue. */
3079 head = wait_for_vq_desc(vq, iov, &out_num, &in_num); 3083 head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
3080 if (out_num) 3084 if (out_num)
3081 errx(1, "Output buffers in rng?"); 3085 bad_driver_vq(vq, "Output buffers in rng?");
3082 3086
3083 /* 3087 /*
3084 * Just like the console write, we loop to cover the whole iovec. 3088 * Just like the console write, we loop to cover the whole iovec.
@@ -3088,7 +3092,7 @@ static void rng_input(struct virtqueue *vq)
3088 len = readv(rng_info->rfd, iov, in_num); 3092 len = readv(rng_info->rfd, iov, in_num);
3089 if (len <= 0) 3093 if (len <= 0)
3090 err(1, "Read from /dev/urandom gave %i", len); 3094 err(1, "Read from /dev/urandom gave %i", len);
3091 iov_consume(iov, in_num, NULL, len); 3095 iov_consume(vq->dev, iov, in_num, NULL, len);
3092 totlen += len; 3096 totlen += len;
3093 } 3097 }
3094 3098