diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2015-02-10 23:51:01 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2015-02-11 01:17:42 -0500 |
commit | d9028eda7b381e57246a53bf9bffc04a4a2920b5 (patch) | |
tree | f0d3c89368e104d09402b30512eab1009209620d /tools/lguest | |
parent | e68ccd1f9d3d0fe8085b4e18c2cc2245f384c420 (diff) |
lguest: remove support for lguest bus in demonstration launcher.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'tools/lguest')
-rw-r--r-- | tools/lguest/lguest.c | 355 |
1 files changed, 22 insertions, 333 deletions
diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 663166aff1f5..b5ac73525f6d 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c | |||
@@ -117,14 +117,6 @@ struct device_list { | |||
117 | /* Counter to print out convenient device numbers. */ | 117 | /* Counter to print out convenient device numbers. */ |
118 | unsigned int device_num; | 118 | unsigned int device_num; |
119 | 119 | ||
120 | /* The descriptor page for the devices. */ | ||
121 | u8 *descpage; | ||
122 | |||
123 | /* A single linked list of devices. */ | ||
124 | struct device *dev; | ||
125 | /* And a pointer to the last device for easy append. */ | ||
126 | struct device *lastdev; | ||
127 | |||
128 | /* PCI devices. */ | 120 | /* PCI devices. */ |
129 | struct device *pci[MAX_PCI_DEVICES]; | 121 | struct device *pci[MAX_PCI_DEVICES]; |
130 | }; | 122 | }; |
@@ -170,16 +162,6 @@ struct pci_config { | |||
170 | 162 | ||
171 | /* The device structure describes a single device. */ | 163 | /* The device structure describes a single device. */ |
172 | struct device { | 164 | struct device { |
173 | /* The linked-list pointer. */ | ||
174 | struct device *next; | ||
175 | |||
176 | /* The device's descriptor, as mapped into the Guest. */ | ||
177 | struct lguest_device_desc *desc; | ||
178 | |||
179 | /* We can't trust desc values once Guest has booted: we use these. */ | ||
180 | unsigned int feature_len; | ||
181 | unsigned int num_vq; | ||
182 | |||
183 | /* The name of this device, for --verbose. */ | 165 | /* The name of this device, for --verbose. */ |
184 | const char *name; | 166 | const char *name; |
185 | 167 | ||
@@ -216,9 +198,6 @@ struct virtqueue { | |||
216 | /* Which device owns me. */ | 198 | /* Which device owns me. */ |
217 | struct device *dev; | 199 | struct device *dev; |
218 | 200 | ||
219 | /* The configuration for this queue. */ | ||
220 | struct lguest_vqconfig config; | ||
221 | |||
222 | /* The actual ring of buffers. */ | 201 | /* The actual ring of buffers. */ |
223 | struct vring vring; | 202 | struct vring vring; |
224 | 203 | ||
@@ -301,13 +280,6 @@ static void iov_consume(struct iovec iov[], unsigned num_iov, | |||
301 | errx(1, "iovec too short!"); | 280 | errx(1, "iovec too short!"); |
302 | } | 281 | } |
303 | 282 | ||
304 | /* The device virtqueue descriptors are followed by feature bitmasks. */ | ||
305 | static u8 *get_feature_bits(struct device *dev) | ||
306 | { | ||
307 | return (u8 *)(dev->desc + 1) | ||
308 | + dev->num_vq * sizeof(struct lguest_vqconfig); | ||
309 | } | ||
310 | |||
311 | /*L:100 | 283 | /*L:100 |
312 | * The Launcher code itself takes us out into userspace, that scary place where | 284 | * The Launcher code itself takes us out into userspace, that scary place where |
313 | * pointers run wild and free! Unfortunately, like most userspace programs, | 285 | * pointers run wild and free! Unfortunately, like most userspace programs, |
@@ -378,17 +350,6 @@ static void *map_zeroed_pages(unsigned int num) | |||
378 | return addr + getpagesize(); | 350 | return addr + getpagesize(); |
379 | } | 351 | } |
380 | 352 | ||
381 | /* Get some more pages for a device. */ | ||
382 | static void *get_pages(unsigned int num) | ||
383 | { | ||
384 | void *addr = from_guest_phys(guest_limit); | ||
385 | |||
386 | guest_limit += num * getpagesize(); | ||
387 | if (guest_limit > guest_max) | ||
388 | errx(1, "Not enough memory for devices"); | ||
389 | return addr; | ||
390 | } | ||
391 | |||
392 | /* Get some bytes which won't be mapped into the guest. */ | 353 | /* Get some bytes which won't be mapped into the guest. */ |
393 | static unsigned long get_mmio_region(size_t size) | 354 | static unsigned long get_mmio_region(size_t size) |
394 | { | 355 | { |
@@ -701,7 +662,7 @@ static unsigned next_desc(struct vring_desc *desc, | |||
701 | */ | 662 | */ |
702 | static void trigger_irq(struct virtqueue *vq) | 663 | static void trigger_irq(struct virtqueue *vq) |
703 | { | 664 | { |
704 | unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; | 665 | unsigned long buf[] = { LHREQ_IRQ, vq->dev->config.irq_line }; |
705 | 666 | ||
706 | /* Don't inform them if nothing used. */ | 667 | /* Don't inform them if nothing used. */ |
707 | if (!vq->pending_used) | 668 | if (!vq->pending_used) |
@@ -713,13 +674,12 @@ static void trigger_irq(struct virtqueue *vq) | |||
713 | return; | 674 | return; |
714 | } | 675 | } |
715 | 676 | ||
716 | /* For a PCI device, set isr to 1 (queue interrupt pending) */ | 677 | /* Set isr to 1 (queue interrupt pending) */ |
717 | if (vq->dev->mmio) | 678 | vq->dev->mmio->isr = 0x1; |
718 | vq->dev->mmio->isr = 0x1; | ||
719 | 679 | ||
720 | /* Send the Guest an interrupt tell them we used something up. */ | 680 | /* Send the Guest an interrupt tell them we used something up. */ |
721 | if (write(lguest_fd, buf, sizeof(buf)) != 0) | 681 | if (write(lguest_fd, buf, sizeof(buf)) != 0) |
722 | err(1, "Triggering irq %i", vq->config.irq); | 682 | err(1, "Triggering irq %i", vq->dev->config.irq_line); |
723 | } | 683 | } |
724 | 684 | ||
725 | /* | 685 | /* |
@@ -1085,21 +1045,18 @@ static void reset_device(struct device *dev) | |||
1085 | verbose("Resetting device %s\n", dev->name); | 1045 | verbose("Resetting device %s\n", dev->name); |
1086 | 1046 | ||
1087 | /* Clear any features they've acked. */ | 1047 | /* Clear any features they've acked. */ |
1088 | memset(get_feature_bits(dev) + dev->feature_len, 0, dev->feature_len); | 1048 | dev->features_accepted = 0; |
1089 | 1049 | ||
1090 | /* We're going to be explicitly killing threads, so ignore them. */ | 1050 | /* We're going to be explicitly killing threads, so ignore them. */ |
1091 | signal(SIGCHLD, SIG_IGN); | 1051 | signal(SIGCHLD, SIG_IGN); |
1092 | 1052 | ||
1093 | /* Zero out the virtqueues, get rid of their threads */ | 1053 | /* Get rid of the virtqueue threads */ |
1094 | for (vq = dev->vq; vq; vq = vq->next) { | 1054 | for (vq = dev->vq; vq; vq = vq->next) { |
1095 | if (vq->thread != (pid_t)-1) { | 1055 | if (vq->thread != (pid_t)-1) { |
1096 | kill(vq->thread, SIGTERM); | 1056 | kill(vq->thread, SIGTERM); |
1097 | waitpid(vq->thread, NULL, 0); | 1057 | waitpid(vq->thread, NULL, 0); |
1098 | vq->thread = (pid_t)-1; | 1058 | vq->thread = (pid_t)-1; |
1099 | } | 1059 | } |
1100 | memset(vq->vring.desc, 0, | ||
1101 | vring_size(vq->config.num, LGUEST_VRING_ALIGN)); | ||
1102 | lg_last_avail(vq) = 0; | ||
1103 | } | 1060 | } |
1104 | dev->running = false; | 1061 | dev->running = false; |
1105 | 1062 | ||
@@ -1107,122 +1064,27 @@ static void reset_device(struct device *dev) | |||
1107 | signal(SIGCHLD, (void *)kill_launcher); | 1064 | signal(SIGCHLD, (void *)kill_launcher); |
1108 | } | 1065 | } |
1109 | 1066 | ||
1110 | /*L:216 | 1067 | static void cleanup_devices(void) |
1111 | * This actually creates the thread which services the virtqueue for a device. | ||
1112 | */ | ||
1113 | static void create_thread(struct virtqueue *vq) | ||
1114 | { | ||
1115 | /* | ||
1116 | * Create stack for thread. Since the stack grows upwards, we point | ||
1117 | * the stack pointer to the end of this region. | ||
1118 | */ | ||
1119 | char *stack = malloc(32768); | ||
1120 | unsigned long args[] = { LHREQ_EVENTFD, | ||
1121 | vq->config.pfn*getpagesize(), 0 }; | ||
1122 | |||
1123 | /* Create a zero-initialized eventfd. */ | ||
1124 | vq->eventfd = eventfd(0, 0); | ||
1125 | if (vq->eventfd < 0) | ||
1126 | err(1, "Creating eventfd"); | ||
1127 | args[2] = vq->eventfd; | ||
1128 | |||
1129 | /* | ||
1130 | * Attach an eventfd to this virtqueue: it will go off when the Guest | ||
1131 | * does an LHCALL_NOTIFY for this vq. | ||
1132 | */ | ||
1133 | if (write(lguest_fd, &args, sizeof(args)) != 0) | ||
1134 | err(1, "Attaching eventfd"); | ||
1135 | |||
1136 | /* | ||
1137 | * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so | ||
1138 | * we get a signal if it dies. | ||
1139 | */ | ||
1140 | vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq); | ||
1141 | if (vq->thread == (pid_t)-1) | ||
1142 | err(1, "Creating clone"); | ||
1143 | |||
1144 | /* We close our local copy now the child has it. */ | ||
1145 | close(vq->eventfd); | ||
1146 | } | ||
1147 | |||
1148 | static void start_device(struct device *dev) | ||
1149 | { | 1068 | { |
1150 | unsigned int i; | 1069 | unsigned int i; |
1151 | struct virtqueue *vq; | ||
1152 | |||
1153 | verbose("Device %s OK: offered", dev->name); | ||
1154 | for (i = 0; i < dev->feature_len; i++) | ||
1155 | verbose(" %02x", get_feature_bits(dev)[i]); | ||
1156 | verbose(", accepted"); | ||
1157 | for (i = 0; i < dev->feature_len; i++) | ||
1158 | verbose(" %02x", get_feature_bits(dev) | ||
1159 | [dev->feature_len+i]); | ||
1160 | 1070 | ||
1161 | for (vq = dev->vq; vq; vq = vq->next) { | 1071 | for (i = 1; i < MAX_PCI_DEVICES; i++) { |
1162 | if (vq->service) | 1072 | struct device *d = devices.pci[i]; |
1163 | create_thread(vq); | 1073 | if (!d) |
1074 | continue; | ||
1075 | reset_device(d); | ||
1164 | } | 1076 | } |
1165 | dev->running = true; | ||
1166 | } | ||
1167 | |||
1168 | static void cleanup_devices(void) | ||
1169 | { | ||
1170 | struct device *dev; | ||
1171 | |||
1172 | for (dev = devices.dev; dev; dev = dev->next) | ||
1173 | reset_device(dev); | ||
1174 | 1077 | ||
1175 | /* If we saved off the original terminal settings, restore them now. */ | 1078 | /* If we saved off the original terminal settings, restore them now. */ |
1176 | if (orig_term.c_lflag & (ISIG|ICANON|ECHO)) | 1079 | if (orig_term.c_lflag & (ISIG|ICANON|ECHO)) |
1177 | tcsetattr(STDIN_FILENO, TCSANOW, &orig_term); | 1080 | tcsetattr(STDIN_FILENO, TCSANOW, &orig_term); |
1178 | } | 1081 | } |
1179 | 1082 | ||
1180 | /* When the Guest tells us they updated the status field, we handle it. */ | ||
1181 | static void update_device_status(struct device *dev) | ||
1182 | { | ||
1183 | /* A zero status is a reset, otherwise it's a set of flags. */ | ||
1184 | if (dev->desc->status == 0) | ||
1185 | reset_device(dev); | ||
1186 | else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) { | ||
1187 | warnx("Device %s configuration FAILED", dev->name); | ||
1188 | if (dev->running) | ||
1189 | reset_device(dev); | ||
1190 | } else { | ||
1191 | if (dev->running) | ||
1192 | err(1, "Device %s features finalized twice", dev->name); | ||
1193 | start_device(dev); | ||
1194 | } | ||
1195 | } | ||
1196 | |||
1197 | /*L:215 | 1083 | /*L:215 |
1198 | * This is the generic routine we call when the Guest uses LHCALL_NOTIFY. In | 1084 | * This is the generic routine we call when the Guest uses LHCALL_NOTIFY. |
1199 | * particular, it's used to notify us of device status changes during boot. | ||
1200 | */ | 1085 | */ |
1201 | static void handle_output(unsigned long addr) | 1086 | static void handle_output(unsigned long addr) |
1202 | { | 1087 | { |
1203 | struct device *i; | ||
1204 | |||
1205 | /* Check each device. */ | ||
1206 | for (i = devices.dev; i; i = i->next) { | ||
1207 | struct virtqueue *vq; | ||
1208 | |||
1209 | /* | ||
1210 | * Notifications to device descriptors mean they updated the | ||
1211 | * device status. | ||
1212 | */ | ||
1213 | if (from_guest_phys(addr) == i->desc) { | ||
1214 | update_device_status(i); | ||
1215 | return; | ||
1216 | } | ||
1217 | |||
1218 | /* Devices should not be used before features are finalized. */ | ||
1219 | for (vq = i->vq; vq; vq = vq->next) { | ||
1220 | if (addr != vq->config.pfn*getpagesize()) | ||
1221 | continue; | ||
1222 | errx(1, "Notification on %s before setup!", i->name); | ||
1223 | } | ||
1224 | } | ||
1225 | |||
1226 | /* | 1088 | /* |
1227 | * Early console write is done using notify on a nul-terminated string | 1089 | * Early console write is done using notify on a nul-terminated string |
1228 | * in Guest memory. It's also great for hacking debugging messages | 1090 | * in Guest memory. It's also great for hacking debugging messages |
@@ -1736,11 +1598,6 @@ static void enable_virtqueue(struct device *d, struct virtqueue *vq) | |||
1736 | err(1, "Creating clone"); | 1598 | err(1, "Creating clone"); |
1737 | } | 1599 | } |
1738 | 1600 | ||
1739 | static void reset_pci_device(struct device *dev) | ||
1740 | { | ||
1741 | /* FIXME */ | ||
1742 | } | ||
1743 | |||
1744 | static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) | 1601 | static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) |
1745 | { | 1602 | { |
1746 | struct virtqueue *vq; | 1603 | struct virtqueue *vq; |
@@ -1775,7 +1632,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) | |||
1775 | case offsetof(struct virtio_pci_mmio, cfg.device_status): | 1632 | case offsetof(struct virtio_pci_mmio, cfg.device_status): |
1776 | verbose("%s: device status -> %#x\n", d->name, val); | 1633 | verbose("%s: device status -> %#x\n", d->name, val); |
1777 | if (val == 0) | 1634 | if (val == 0) |
1778 | reset_pci_device(d); | 1635 | reset_device(d); |
1779 | goto write_through8; | 1636 | goto write_through8; |
1780 | case offsetof(struct virtio_pci_mmio, cfg.queue_select): | 1637 | case offsetof(struct virtio_pci_mmio, cfg.queue_select): |
1781 | vq = vq_by_num(d, val); | 1638 | vq = vq_by_num(d, val); |
@@ -1986,102 +1843,6 @@ static void emulate_mmio(unsigned long paddr, const u8 *insn) | |||
1986 | * device" so the Launcher can keep track of it. We have common helper | 1843 | * device" so the Launcher can keep track of it. We have common helper |
1987 | * routines to allocate and manage them. | 1844 | * routines to allocate and manage them. |
1988 | */ | 1845 | */ |
1989 | |||
1990 | /* | ||
1991 | * The layout of the device page is a "struct lguest_device_desc" followed by a | ||
1992 | * number of virtqueue descriptors, then two sets of feature bits, then an | ||
1993 | * array of configuration bytes. This routine returns the configuration | ||
1994 | * pointer. | ||
1995 | */ | ||
1996 | static u8 *device_config(const struct device *dev) | ||
1997 | { | ||
1998 | return (void *)(dev->desc + 1) | ||
1999 | + dev->num_vq * sizeof(struct lguest_vqconfig) | ||
2000 | + dev->feature_len * 2; | ||
2001 | } | ||
2002 | |||
2003 | /* | ||
2004 | * This routine allocates a new "struct lguest_device_desc" from descriptor | ||
2005 | * table page just above the Guest's normal memory. It returns a pointer to | ||
2006 | * that descriptor. | ||
2007 | */ | ||
2008 | static struct lguest_device_desc *new_dev_desc(u16 type) | ||
2009 | { | ||
2010 | struct lguest_device_desc d = { .type = type }; | ||
2011 | void *p; | ||
2012 | |||
2013 | /* Figure out where the next device config is, based on the last one. */ | ||
2014 | if (devices.lastdev) | ||
2015 | p = device_config(devices.lastdev) | ||
2016 | + devices.lastdev->desc->config_len; | ||
2017 | else | ||
2018 | p = devices.descpage; | ||
2019 | |||
2020 | /* We only have one page for all the descriptors. */ | ||
2021 | if (p + sizeof(d) > (void *)devices.descpage + getpagesize()) | ||
2022 | errx(1, "Too many devices"); | ||
2023 | |||
2024 | /* p might not be aligned, so we memcpy in. */ | ||
2025 | return memcpy(p, &d, sizeof(d)); | ||
2026 | } | ||
2027 | |||
2028 | /* | ||
2029 | * Each device descriptor is followed by the description of its virtqueues. We | ||
2030 | * specify how many descriptors the virtqueue is to have. | ||
2031 | */ | ||
2032 | static void add_virtqueue(struct device *dev, unsigned int num_descs, | ||
2033 | void (*service)(struct virtqueue *)) | ||
2034 | { | ||
2035 | unsigned int pages; | ||
2036 | struct virtqueue **i, *vq = malloc(sizeof(*vq)); | ||
2037 | void *p; | ||
2038 | |||
2039 | /* First we need some memory for this virtqueue. */ | ||
2040 | pages = (vring_size(num_descs, LGUEST_VRING_ALIGN) + getpagesize() - 1) | ||
2041 | / getpagesize(); | ||
2042 | p = get_pages(pages); | ||
2043 | |||
2044 | /* Initialize the virtqueue */ | ||
2045 | vq->next = NULL; | ||
2046 | vq->last_avail_idx = 0; | ||
2047 | vq->dev = dev; | ||
2048 | |||
2049 | /* | ||
2050 | * This is the routine the service thread will run, and its Process ID | ||
2051 | * once it's running. | ||
2052 | */ | ||
2053 | vq->service = service; | ||
2054 | vq->thread = (pid_t)-1; | ||
2055 | |||
2056 | /* Initialize the configuration. */ | ||
2057 | vq->config.num = num_descs; | ||
2058 | vq->config.irq = devices.next_irq++; | ||
2059 | vq->config.pfn = to_guest_phys(p) / getpagesize(); | ||
2060 | |||
2061 | /* Initialize the vring. */ | ||
2062 | vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN); | ||
2063 | |||
2064 | /* | ||
2065 | * Append virtqueue to this device's descriptor. We use | ||
2066 | * device_config() to get the end of the device's current virtqueues; | ||
2067 | * we check that we haven't added any config or feature information | ||
2068 | * yet, otherwise we'd be overwriting them. | ||
2069 | */ | ||
2070 | assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); | ||
2071 | memcpy(device_config(dev), &vq->config, sizeof(vq->config)); | ||
2072 | dev->num_vq++; | ||
2073 | dev->desc->num_vq++; | ||
2074 | |||
2075 | verbose("Virtqueue page %#lx\n", to_guest_phys(p)); | ||
2076 | |||
2077 | /* | ||
2078 | * Add to tail of list, so dev->vq is first vq, dev->vq->next is | ||
2079 | * second. | ||
2080 | */ | ||
2081 | for (i = &dev->vq; *i; i = &(*i)->next); | ||
2082 | *i = vq; | ||
2083 | } | ||
2084 | |||
2085 | static void add_pci_virtqueue(struct device *dev, | 1846 | static void add_pci_virtqueue(struct device *dev, |
2086 | void (*service)(struct virtqueue *)) | 1847 | void (*service)(struct virtqueue *)) |
2087 | { | 1848 | { |
@@ -2107,9 +1868,6 @@ static void add_pci_virtqueue(struct device *dev, | |||
2107 | /* Add one to the number of queues */ | 1868 | /* Add one to the number of queues */ |
2108 | vq->dev->mmio->cfg.num_queues++; | 1869 | vq->dev->mmio->cfg.num_queues++; |
2109 | 1870 | ||
2110 | /* FIXME: Do irq per virtqueue, not per device. */ | ||
2111 | vq->config.irq = vq->dev->config.irq_line; | ||
2112 | |||
2113 | /* | 1871 | /* |
2114 | * Add to tail of list, so dev->vq is first vq, dev->vq->next is | 1872 | * Add to tail of list, so dev->vq is first vq, dev->vq->next is |
2115 | * second. | 1873 | * second. |
@@ -2118,47 +1876,12 @@ static void add_pci_virtqueue(struct device *dev, | |||
2118 | *i = vq; | 1876 | *i = vq; |
2119 | } | 1877 | } |
2120 | 1878 | ||
2121 | /* | 1879 | /* The Guest accesses the feature bits via the PCI common config MMIO region */ |
2122 | * The first half of the feature bitmask is for us to advertise features. The | ||
2123 | * second half is for the Guest to accept features. | ||
2124 | */ | ||
2125 | static void add_feature(struct device *dev, unsigned bit) | ||
2126 | { | ||
2127 | u8 *features = get_feature_bits(dev); | ||
2128 | |||
2129 | /* We can't extend the feature bits once we've added config bytes */ | ||
2130 | if (dev->desc->feature_len <= bit / CHAR_BIT) { | ||
2131 | assert(dev->desc->config_len == 0); | ||
2132 | dev->feature_len = dev->desc->feature_len = (bit/CHAR_BIT) + 1; | ||
2133 | } | ||
2134 | |||
2135 | features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT)); | ||
2136 | } | ||
2137 | |||
2138 | static void add_pci_feature(struct device *dev, unsigned bit) | 1880 | static void add_pci_feature(struct device *dev, unsigned bit) |
2139 | { | 1881 | { |
2140 | dev->features |= (1ULL << bit); | 1882 | dev->features |= (1ULL << bit); |
2141 | } | 1883 | } |
2142 | 1884 | ||
2143 | /* | ||
2144 | * This routine sets the configuration fields for an existing device's | ||
2145 | * descriptor. It only works for the last device, but that's OK because that's | ||
2146 | * how we use it. | ||
2147 | */ | ||
2148 | static void set_config(struct device *dev, unsigned len, const void *conf) | ||
2149 | { | ||
2150 | /* Check we haven't overflowed our single page. */ | ||
2151 | if (device_config(dev) + len > devices.descpage + getpagesize()) | ||
2152 | errx(1, "Too many devices"); | ||
2153 | |||
2154 | /* Copy in the config information, and store the length. */ | ||
2155 | memcpy(device_config(dev), conf, len); | ||
2156 | dev->desc->config_len = len; | ||
2157 | |||
2158 | /* Size must fit in config_len field (8 bits)! */ | ||
2159 | assert(dev->desc->config_len == len); | ||
2160 | } | ||
2161 | |||
2162 | /* For devices with no config. */ | 1885 | /* For devices with no config. */ |
2163 | static void no_device_config(struct device *dev) | 1886 | static void no_device_config(struct device *dev) |
2164 | { | 1887 | { |
@@ -2287,59 +2010,28 @@ static void init_pci_config(struct pci_config *pci, u16 type, | |||
2287 | } | 2010 | } |
2288 | 2011 | ||
2289 | /* | 2012 | /* |
2290 | * This routine does all the creation and setup of a new device, including | 2013 | * This routine does all the creation and setup of a new device, but we don't |
2291 | * calling new_dev_desc() to allocate the descriptor and device memory. We | 2014 | * actually place the MMIO region until we know the size (if any) of the |
2292 | * don't actually start the service threads until later. | 2015 | * device-specific config. And we don't actually start the service threads |
2016 | * until later. | ||
2293 | * | 2017 | * |
2294 | * See what I mean about userspace being boring? | 2018 | * See what I mean about userspace being boring? |
2295 | */ | 2019 | */ |
2296 | static struct device *new_device(const char *name, u16 type) | ||
2297 | { | ||
2298 | struct device *dev = malloc(sizeof(*dev)); | ||
2299 | |||
2300 | /* Now we populate the fields one at a time. */ | ||
2301 | dev->desc = new_dev_desc(type); | ||
2302 | dev->name = name; | ||
2303 | dev->vq = NULL; | ||
2304 | dev->feature_len = 0; | ||
2305 | dev->num_vq = 0; | ||
2306 | dev->running = false; | ||
2307 | dev->next = NULL; | ||
2308 | |||
2309 | /* | ||
2310 | * Append to device list. Prepending to a single-linked list is | ||
2311 | * easier, but the user expects the devices to be arranged on the bus | ||
2312 | * in command-line order. The first network device on the command line | ||
2313 | * is eth0, the first block device /dev/vda, etc. | ||
2314 | */ | ||
2315 | if (devices.lastdev) | ||
2316 | devices.lastdev->next = dev; | ||
2317 | else | ||
2318 | devices.dev = dev; | ||
2319 | devices.lastdev = dev; | ||
2320 | |||
2321 | return dev; | ||
2322 | } | ||
2323 | |||
2324 | static struct device *new_pci_device(const char *name, u16 type, | 2020 | static struct device *new_pci_device(const char *name, u16 type, |
2325 | u8 class, u8 subclass) | 2021 | u8 class, u8 subclass) |
2326 | { | 2022 | { |
2327 | struct device *dev = malloc(sizeof(*dev)); | 2023 | struct device *dev = malloc(sizeof(*dev)); |
2328 | 2024 | ||
2329 | /* Now we populate the fields one at a time. */ | 2025 | /* Now we populate the fields one at a time. */ |
2330 | dev->desc = NULL; | ||
2331 | dev->name = name; | 2026 | dev->name = name; |
2332 | dev->vq = NULL; | 2027 | dev->vq = NULL; |
2333 | dev->feature_len = 0; | ||
2334 | dev->num_vq = 0; | ||
2335 | dev->running = false; | 2028 | dev->running = false; |
2336 | dev->next = NULL; | ||
2337 | dev->mmio_size = sizeof(struct virtio_pci_mmio); | 2029 | dev->mmio_size = sizeof(struct virtio_pci_mmio); |
2338 | dev->mmio = calloc(1, dev->mmio_size); | 2030 | dev->mmio = calloc(1, dev->mmio_size); |
2339 | dev->features = (u64)1 << VIRTIO_F_VERSION_1; | 2031 | dev->features = (u64)1 << VIRTIO_F_VERSION_1; |
2340 | dev->features_accepted = 0; | 2032 | dev->features_accepted = 0; |
2341 | 2033 | ||
2342 | if (devices.device_num + 1 >= 32) | 2034 | if (devices.device_num + 1 >= MAX_PCI_DEVICES) |
2343 | errx(1, "Can only handle 31 PCI devices"); | 2035 | errx(1, "Can only handle 31 PCI devices"); |
2344 | 2036 | ||
2345 | init_pci_config(&dev->config, type, class, subclass); | 2037 | init_pci_config(&dev->config, type, class, subclass); |
@@ -2940,11 +2632,9 @@ int main(int argc, char *argv[]) | |||
2940 | main_args = argv; | 2632 | main_args = argv; |
2941 | 2633 | ||
2942 | /* | 2634 | /* |
2943 | * First we initialize the device list. We keep a pointer to the last | 2635 | * First we initialize the device list. We remember next interrupt |
2944 | * device, and the next interrupt number to use for devices (1: | 2636 | * number to use for devices (1: remember that 0 is used by the timer). |
2945 | * remember that 0 is used by the timer). | ||
2946 | */ | 2637 | */ |
2947 | devices.lastdev = NULL; | ||
2948 | devices.next_irq = 1; | 2638 | devices.next_irq = 1; |
2949 | 2639 | ||
2950 | /* We're CPU 0. In fact, that's the only CPU possible right now. */ | 2640 | /* We're CPU 0. In fact, that's the only CPU possible right now. */ |
@@ -2969,7 +2659,6 @@ int main(int argc, char *argv[]) | |||
2969 | + DEVICE_PAGES); | 2659 | + DEVICE_PAGES); |
2970 | guest_limit = mem; | 2660 | guest_limit = mem; |
2971 | guest_max = guest_mmio = mem + DEVICE_PAGES*getpagesize(); | 2661 | guest_max = guest_mmio = mem + DEVICE_PAGES*getpagesize(); |
2972 | devices.descpage = get_pages(1); | ||
2973 | break; | 2662 | break; |
2974 | } | 2663 | } |
2975 | } | 2664 | } |