aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2015-02-10 23:45:11 -0500
committerRusty Russell <rusty@rustcorp.com.au>2015-02-11 01:17:36 -0500
commit93153077107ecfbf35a3412f6220521e8d8c14ba (patch)
tree4f682a617a3be2bebdf9191bd8ba14c1e12c0f44
parentd7fbf6e95e2c5e7ef97c463a97499d7a2341fb09 (diff)
lguest: implement virtio-PCI MMIO accesses.
For each device, We need to include the vendor capabilities to demark where virtio common, notification and ISR regions are (we put them all in BAR0). We need to handle the switching of the virtqueues using the accessors. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r--tools/lguest/lguest.c492
1 files changed, 490 insertions, 2 deletions
diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c
index 0f29657fc065..eafdaf2a14c4 100644
--- a/tools/lguest/lguest.c
+++ b/tools/lguest/lguest.c
@@ -63,12 +63,16 @@ typedef uint16_t u16;
63typedef uint8_t u8; 63typedef uint8_t u8;
64/*:*/ 64/*:*/
65 65
66#include <linux/virtio_config.h> 66#define VIRTIO_PCI_NO_LEGACY
67
68/* Use in-kernel ones, which defines VIRTIO_F_VERSION_1 */
69#include "../../include/uapi/linux/virtio_config.h"
67#include <linux/virtio_net.h> 70#include <linux/virtio_net.h>
68#include <linux/virtio_blk.h> 71#include <linux/virtio_blk.h>
69#include <linux/virtio_console.h> 72#include <linux/virtio_console.h>
70#include <linux/virtio_rng.h> 73#include <linux/virtio_rng.h>
71#include <linux/virtio_ring.h> 74#include <linux/virtio_ring.h>
75#include "../../include/uapi/linux/virtio_pci.h"
72#include <asm/bootparam.h> 76#include <asm/bootparam.h>
73#include "../../include/linux/lguest_launcher.h" 77#include "../../include/linux/lguest_launcher.h"
74 78
@@ -126,6 +130,19 @@ struct device_list {
126/* The list of Guest devices, based on command line arguments. */ 130/* The list of Guest devices, based on command line arguments. */
127static struct device_list devices; 131static struct device_list devices;
128 132
133struct virtio_pci_cfg_cap {
134 struct virtio_pci_cap cap;
135 u32 window; /* Data for BAR access. */
136};
137
138struct virtio_pci_mmio {
139 struct virtio_pci_common_cfg cfg;
140 u16 notify;
141 u8 isr;
142 u8 padding;
143 /* Device-specific configuration follows this. */
144};
145
129/* This is the layout (little-endian) of the PCI config space. */ 146/* This is the layout (little-endian) of the PCI config space. */
130struct pci_config { 147struct pci_config {
131 u16 vendor_id, device_id; 148 u16 vendor_id, device_id;
@@ -139,6 +156,14 @@ struct pci_config {
139 u8 capabilities, reserved1[3]; 156 u8 capabilities, reserved1[3];
140 u32 reserved2; 157 u32 reserved2;
141 u8 irq_line, irq_pin, min_grant, max_latency; 158 u8 irq_line, irq_pin, min_grant, max_latency;
159
160 /* Now, this is the linked capability list. */
161 struct virtio_pci_cap common;
162 struct virtio_pci_notify_cap notify;
163 struct virtio_pci_cap isr;
164 struct virtio_pci_cap device;
165 /* FIXME: Implement this! */
166 struct virtio_pci_cfg_cap cfg_access;
142}; 167};
143 168
144/* The device structure describes a single device. */ 169/* The device structure describes a single device. */
@@ -168,6 +193,9 @@ struct device {
168 u32 config_words[sizeof(struct pci_config) / sizeof(u32)]; 193 u32 config_words[sizeof(struct pci_config) / sizeof(u32)];
169 }; 194 };
170 195
196 /* Features we offer, and those accepted. */
197 u64 features, features_accepted;
198
171 /* Device-specific config hangs off the end of this. */ 199 /* Device-specific config hangs off the end of this. */
172 struct virtio_pci_mmio *mmio; 200 struct virtio_pci_mmio *mmio;
173 201
@@ -192,6 +220,9 @@ struct virtqueue {
192 /* The actual ring of buffers. */ 220 /* The actual ring of buffers. */
193 struct vring vring; 221 struct vring vring;
194 222
223 /* The information about this virtqueue (we only use queue_size on) */
224 struct virtio_pci_common_cfg pci_config;
225
195 /* Last available index we saw. */ 226 /* Last available index we saw. */
196 u16 last_avail_idx; 227 u16 last_avail_idx;
197 228
@@ -680,6 +711,10 @@ static void trigger_irq(struct virtqueue *vq)
680 return; 711 return;
681 } 712 }
682 713
714 /* For a PCI device, set isr to 1 (queue interrupt pending) */
715 if (vq->dev->mmio)
716 vq->dev->mmio->isr = 0x1;
717
683 /* Send the Guest an interrupt tell them we used something up. */ 718 /* Send the Guest an interrupt tell them we used something up. */
684 if (write(lguest_fd, buf, sizeof(buf)) != 0) 719 if (write(lguest_fd, buf, sizeof(buf)) != 0)
685 err(1, "Triggering irq %i", vq->config.irq); 720 err(1, "Triggering irq %i", vq->config.irq);
@@ -1616,13 +1651,264 @@ static struct device *find_mmio_region(unsigned long paddr, u32 *off)
1616 return NULL; 1651 return NULL;
1617} 1652}
1618 1653
1654/* FIXME: Use vq array. */
1655static struct virtqueue *vq_by_num(struct device *d, u32 num)
1656{
1657 struct virtqueue *vq = d->vq;
1658
1659 while (num-- && vq)
1660 vq = vq->next;
1661
1662 return vq;
1663}
1664
1665static void save_vq_config(const struct virtio_pci_common_cfg *cfg,
1666 struct virtqueue *vq)
1667{
1668 vq->pci_config = *cfg;
1669}
1670
1671static void restore_vq_config(struct virtio_pci_common_cfg *cfg,
1672 struct virtqueue *vq)
1673{
1674 /* Only restore the per-vq part */
1675 size_t off = offsetof(struct virtio_pci_common_cfg, queue_size);
1676
1677 memcpy((void *)cfg + off, (void *)&vq->pci_config + off,
1678 sizeof(*cfg) - off);
1679}
1680
1681/*
1682 * When they enable the virtqueue, we check that their setup is valid.
1683 */
1684static void enable_virtqueue(struct device *d, struct virtqueue *vq)
1685{
1686 /*
1687 * Create stack for thread. Since the stack grows upwards, we point
1688 * the stack pointer to the end of this region.
1689 */
1690 char *stack = malloc(32768);
1691
1692 /* Because lguest is 32 bit, all the descriptor high bits must be 0 */
1693 if (vq->pci_config.queue_desc_hi
1694 || vq->pci_config.queue_avail_hi
1695 || vq->pci_config.queue_used_hi)
1696 errx(1, "%s: invalid 64-bit queue address", d->name);
1697
1698 /* Initialize the virtqueue and check they're all in range. */
1699 vq->vring.num = vq->pci_config.queue_size;
1700 vq->vring.desc = check_pointer(vq->pci_config.queue_desc_lo,
1701 sizeof(*vq->vring.desc) * vq->vring.num);
1702 vq->vring.avail = check_pointer(vq->pci_config.queue_avail_lo,
1703 sizeof(*vq->vring.avail)
1704 + (sizeof(vq->vring.avail->ring[0])
1705 * vq->vring.num));
1706 vq->vring.used = check_pointer(vq->pci_config.queue_used_lo,
1707 sizeof(*vq->vring.used)
1708 + (sizeof(vq->vring.used->ring[0])
1709 * vq->vring.num));
1710
1711
1712 /* Create a zero-initialized eventfd. */
1713 vq->eventfd = eventfd(0, 0);
1714 if (vq->eventfd < 0)
1715 err(1, "Creating eventfd");
1716
1717 /*
1718 * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so
1719 * we get a signal if it dies.
1720 */
1721 vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq);
1722 if (vq->thread == (pid_t)-1)
1723 err(1, "Creating clone");
1724}
1725
1726static void reset_pci_device(struct device *dev)
1727{
1728 /* FIXME */
1729}
1730
1619static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) 1731static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
1620{ 1732{
1733 struct virtqueue *vq;
1734
1735 switch (off) {
1736 case offsetof(struct virtio_pci_mmio, cfg.device_feature_select):
1737 if (val == 0)
1738 d->mmio->cfg.device_feature = d->features;
1739 else if (val == 1)
1740 d->mmio->cfg.device_feature = (d->features >> 32);
1741 else
1742 d->mmio->cfg.device_feature = 0;
1743 goto write_through32;
1744 case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select):
1745 if (val > 1)
1746 errx(1, "%s: Unexpected driver select %u",
1747 d->name, val);
1748 goto write_through32;
1749 case offsetof(struct virtio_pci_mmio, cfg.guest_feature):
1750 if (d->mmio->cfg.guest_feature_select == 0) {
1751 d->features_accepted &= ~((u64)0xFFFFFFFF);
1752 d->features_accepted |= val;
1753 } else {
1754 assert(d->mmio->cfg.guest_feature_select == 1);
1755 d->features_accepted &= ((u64)0xFFFFFFFF << 32);
1756 d->features_accepted |= ((u64)val) << 32;
1757 }
1758 if (d->features_accepted & ~d->features)
1759 errx(1, "%s: over-accepted features %#llx of %#llx",
1760 d->name, d->features_accepted, d->features);
1761 goto write_through32;
1762 case offsetof(struct virtio_pci_mmio, cfg.device_status):
1763 verbose("%s: device status -> %#x\n", d->name, val);
1764 if (val == 0)
1765 reset_pci_device(d);
1766 goto write_through8;
1767 case offsetof(struct virtio_pci_mmio, cfg.queue_select):
1768 vq = vq_by_num(d, val);
1769 /* Out of range? Return size 0 */
1770 if (!vq) {
1771 d->mmio->cfg.queue_size = 0;
1772 goto write_through16;
1773 }
1774 /* Save registers for old vq, if it was a valid vq */
1775 if (d->mmio->cfg.queue_size)
1776 save_vq_config(&d->mmio->cfg,
1777 vq_by_num(d, d->mmio->cfg.queue_select));
1778 /* Restore the registers for the queue they asked for */
1779 restore_vq_config(&d->mmio->cfg, vq);
1780 goto write_through16;
1781 case offsetof(struct virtio_pci_mmio, cfg.queue_size):
1782 if (val & (val-1))
1783 errx(1, "%s: invalid queue size %u\n", d->name, val);
1784 if (d->mmio->cfg.queue_enable)
1785 errx(1, "%s: changing queue size on live device",
1786 d->name);
1787 goto write_through16;
1788 case offsetof(struct virtio_pci_mmio, cfg.queue_msix_vector):
1789 errx(1, "%s: attempt to set MSIX vector to %u",
1790 d->name, val);
1791 case offsetof(struct virtio_pci_mmio, cfg.queue_enable):
1792 if (val != 1)
1793 errx(1, "%s: setting queue_enable to %u", d->name, val);
1794 d->mmio->cfg.queue_enable = val;
1795 save_vq_config(&d->mmio->cfg,
1796 vq_by_num(d, d->mmio->cfg.queue_select));
1797 enable_virtqueue(d, vq_by_num(d, d->mmio->cfg.queue_select));
1798 goto write_through16;
1799 case offsetof(struct virtio_pci_mmio, cfg.queue_notify_off):
1800 errx(1, "%s: attempt to write to queue_notify_off", d->name);
1801 case offsetof(struct virtio_pci_mmio, cfg.queue_desc_lo):
1802 case offsetof(struct virtio_pci_mmio, cfg.queue_desc_hi):
1803 case offsetof(struct virtio_pci_mmio, cfg.queue_avail_lo):
1804 case offsetof(struct virtio_pci_mmio, cfg.queue_avail_hi):
1805 case offsetof(struct virtio_pci_mmio, cfg.queue_used_lo):
1806 case offsetof(struct virtio_pci_mmio, cfg.queue_used_hi):
1807 if (d->mmio->cfg.queue_enable)
1808 errx(1, "%s: changing queue on live device",
1809 d->name);
1810 goto write_through32;
1811 case offsetof(struct virtio_pci_mmio, notify):
1812 vq = vq_by_num(d, val);
1813 if (!vq)
1814 errx(1, "Invalid vq notification on %u", val);
1815 /* Notify the process handling this vq by adding 1 to eventfd */
1816 write(vq->eventfd, "\1\0\0\0\0\0\0\0", 8);
1817 goto write_through16;
1818 case offsetof(struct virtio_pci_mmio, isr):
1819 errx(1, "%s: Unexpected write to isr", d->name);
1820 default:
1821 errx(1, "%s: Unexpected write to offset %u", d->name, off);
1822 }
1823
1824write_through32:
1825 if (mask != 0xFFFFFFFF) {
1826 errx(1, "%s: non-32-bit write to offset %u (%#x)",
1827 d->name, off, getreg(eip));
1828 return;
1829 }
1830 memcpy((char *)d->mmio + off, &val, 4);
1831 return;
1832
1833write_through16:
1834 if (mask != 0xFFFF)
1835 errx(1, "%s: non-16-bit (%#x) write to offset %u (%#x)",
1836 d->name, mask, off, getreg(eip));
1837 memcpy((char *)d->mmio + off, &val, 2);
1838 return;
1839
1840write_through8:
1841 if (mask != 0xFF)
1842 errx(1, "%s: non-8-bit write to offset %u (%#x)",
1843 d->name, off, getreg(eip));
1844 memcpy((char *)d->mmio + off, &val, 1);
1845 return;
1621} 1846}
1622 1847
1623static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask) 1848static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask)
1624{ 1849{
1625 return 0xFFFFFFFF; 1850 u8 isr;
1851 u32 val = 0;
1852
1853 switch (off) {
1854 case offsetof(struct virtio_pci_mmio, cfg.device_feature_select):
1855 case offsetof(struct virtio_pci_mmio, cfg.device_feature):
1856 case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select):
1857 case offsetof(struct virtio_pci_mmio, cfg.guest_feature):
1858 goto read_through32;
1859 case offsetof(struct virtio_pci_mmio, cfg.msix_config):
1860 errx(1, "%s: read of msix_config", d->name);
1861 case offsetof(struct virtio_pci_mmio, cfg.num_queues):
1862 goto read_through16;
1863 case offsetof(struct virtio_pci_mmio, cfg.device_status):
1864 case offsetof(struct virtio_pci_mmio, cfg.config_generation):
1865 goto read_through8;
1866 case offsetof(struct virtio_pci_mmio, notify):
1867 goto read_through16;
1868 case offsetof(struct virtio_pci_mmio, isr):
1869 if (mask != 0xFF)
1870 errx(1, "%s: non-8-bit read from offset %u (%#x)",
1871 d->name, off, getreg(eip));
1872 /* Read resets the isr */
1873 isr = d->mmio->isr;
1874 d->mmio->isr = 0;
1875 return isr;
1876 case offsetof(struct virtio_pci_mmio, padding):
1877 errx(1, "%s: read from padding (%#x)",
1878 d->name, getreg(eip));
1879 default:
1880 /* Read from device config space, beware unaligned overflow */
1881 if (off > d->mmio_size - 4)
1882 errx(1, "%s: read past end (%#x)",
1883 d->name, getreg(eip));
1884 if (mask == 0xFFFFFFFF)
1885 goto read_through32;
1886 else if (mask == 0xFFFF)
1887 goto read_through16;
1888 else
1889 goto read_through8;
1890 }
1891
1892read_through32:
1893 if (mask != 0xFFFFFFFF)
1894 errx(1, "%s: non-32-bit read to offset %u (%#x)",
1895 d->name, off, getreg(eip));
1896 memcpy(&val, (char *)d->mmio + off, 4);
1897 return val;
1898
1899read_through16:
1900 if (mask != 0xFFFF)
1901 errx(1, "%s: non-16-bit read to offset %u (%#x)",
1902 d->name, off, getreg(eip));
1903 memcpy(&val, (char *)d->mmio + off, 2);
1904 return val;
1905
1906read_through8:
1907 if (mask != 0xFF)
1908 errx(1, "%s: non-8-bit read to offset %u (%#x)",
1909 d->name, off, getreg(eip));
1910 memcpy(&val, (char *)d->mmio + off, 1);
1911 return val;
1626} 1912}
1627 1913
1628static void emulate_mmio(unsigned long paddr, const u8 *insn) 1914static void emulate_mmio(unsigned long paddr, const u8 *insn)
@@ -1783,6 +2069,42 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs,
1783 *i = vq; 2069 *i = vq;
1784} 2070}
1785 2071
2072static void add_pci_virtqueue(struct device *dev,
2073 void (*service)(struct virtqueue *))
2074{
2075 struct virtqueue **i, *vq = malloc(sizeof(*vq));
2076
2077 /* Initialize the virtqueue */
2078 vq->next = NULL;
2079 vq->last_avail_idx = 0;
2080 vq->dev = dev;
2081
2082 /*
2083 * This is the routine the service thread will run, and its Process ID
2084 * once it's running.
2085 */
2086 vq->service = service;
2087 vq->thread = (pid_t)-1;
2088
2089 /* Initialize the configuration. */
2090 vq->pci_config.queue_size = VIRTQUEUE_NUM;
2091 vq->pci_config.queue_enable = 0;
2092 vq->pci_config.queue_notify_off = 0;
2093
2094 /* Add one to the number of queues */
2095 vq->dev->mmio->cfg.num_queues++;
2096
2097 /* FIXME: Do irq per virtqueue, not per device. */
2098 vq->config.irq = vq->dev->config.irq_line;
2099
2100 /*
2101 * Add to tail of list, so dev->vq is first vq, dev->vq->next is
2102 * second.
2103 */
2104 for (i = &dev->vq; *i; i = &(*i)->next);
2105 *i = vq;
2106}
2107
1786/* 2108/*
1787 * The first half of the feature bitmask is for us to advertise features. The 2109 * The first half of the feature bitmask is for us to advertise features. The
1788 * second half is for the Guest to accept features. 2110 * second half is for the Guest to accept features.
@@ -1800,6 +2122,11 @@ static void add_feature(struct device *dev, unsigned bit)
1800 features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT)); 2122 features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT));
1801} 2123}
1802 2124
2125static void add_pci_feature(struct device *dev, unsigned bit)
2126{
2127 dev->features |= (1ULL << bit);
2128}
2129
1803/* 2130/*
1804 * This routine sets the configuration fields for an existing device's 2131 * This routine sets the configuration fields for an existing device's
1805 * descriptor. It only works for the last device, but that's OK because that's 2132 * descriptor. It only works for the last device, but that's OK because that's
@@ -1819,6 +2146,139 @@ static void set_config(struct device *dev, unsigned len, const void *conf)
1819 assert(dev->desc->config_len == len); 2146 assert(dev->desc->config_len == len);
1820} 2147}
1821 2148
2149/* For devices with no config. */
2150static void no_device_config(struct device *dev)
2151{
2152 dev->mmio_addr = get_mmio_region(dev->mmio_size);
2153
2154 dev->config.bar[0] = dev->mmio_addr;
2155 /* Bottom 4 bits must be zero */
2156 assert(~(dev->config.bar[0] & 0xF));
2157}
2158
2159/* This puts the device config into BAR0 */
2160static void set_device_config(struct device *dev, const void *conf, size_t len)
2161{
2162 /* Set up BAR 0 */
2163 dev->mmio_size += len;
2164 dev->mmio = realloc(dev->mmio, dev->mmio_size);
2165 memcpy(dev->mmio + 1, conf, len);
2166
2167 /* Hook up device cfg */
2168 dev->config.cfg_access.cap.cap_next
2169 = offsetof(struct pci_config, device);
2170
2171 /* Fix up device cfg field length. */
2172 dev->config.device.length = len;
2173
2174 /* The rest is the same as the no-config case */
2175 no_device_config(dev);
2176}
2177
2178static void init_cap(struct virtio_pci_cap *cap, size_t caplen, int type,
2179 size_t bar_offset, size_t bar_bytes, u8 next)
2180{
2181 cap->cap_vndr = PCI_CAP_ID_VNDR;
2182 cap->cap_next = next;
2183 cap->cap_len = caplen;
2184 cap->cfg_type = type;
2185 cap->bar = 0;
2186 memset(cap->padding, 0, sizeof(cap->padding));
2187 cap->offset = bar_offset;
2188 cap->length = bar_bytes;
2189}
2190
2191/*
2192 * This sets up the pci_config structure, as defined in the virtio 1.0
2193 * standard (and PCI standard).
2194 */
2195static void init_pci_config(struct pci_config *pci, u16 type,
2196 u8 class, u8 subclass)
2197{
2198 size_t bar_offset, bar_len;
2199
2200 /* Save typing: most thing are happy being zero. */
2201 memset(pci, 0, sizeof(*pci));
2202
2203 /* 4.1.2.1: Devices MUST have the PCI Vendor ID 0x1AF4 */
2204 pci->vendor_id = 0x1AF4;
2205 /* 4.1.2.1: ... PCI Device ID calculated by adding 0x1040 ... */
2206 pci->device_id = 0x1040 + type;
2207
2208 /*
2209 * PCI have specific codes for different types of devices.
2210 * Linux doesn't care, but it's a good clue for people looking
2211 * at the device.
2212 *
2213 * eg :
2214 * VIRTIO_ID_CONSOLE: class = 0x07, subclass = 0x00
2215 * VIRTIO_ID_NET: class = 0x02, subclass = 0x00
2216 * VIRTIO_ID_BLOCK: class = 0x01, subclass = 0x80
2217 * VIRTIO_ID_RNG: class = 0xff, subclass = 0
2218 */
2219 pci->class = class;
2220 pci->subclass = subclass;
2221
2222 /*
2223 * 4.1.2.1 Non-transitional devices SHOULD have a PCI Revision
2224 * ID of 1 or higher
2225 */
2226 pci->revid = 1;
2227
2228 /*
2229 * 4.1.2.1 Non-transitional devices SHOULD have a PCI
2230 * Subsystem Device ID of 0x40 or higher.
2231 */
2232 pci->subsystem_device_id = 0x40;
2233
2234 /* We use our dummy interrupt controller, and irq_line is the irq */
2235 pci->irq_line = devices.next_irq++;
2236 pci->irq_pin = 0;
2237
2238 /* Support for extended capabilities. */
2239 pci->status = (1 << 4);
2240
2241 /* Link them in. */
2242 pci->capabilities = offsetof(struct pci_config, common);
2243
2244 bar_offset = offsetof(struct virtio_pci_mmio, cfg);
2245 bar_len = sizeof(((struct virtio_pci_mmio *)0)->cfg);
2246 init_cap(&pci->common, sizeof(pci->common), VIRTIO_PCI_CAP_COMMON_CFG,
2247 bar_offset, bar_len,
2248 offsetof(struct pci_config, notify));
2249
2250 bar_offset += bar_len;
2251 bar_len = sizeof(((struct virtio_pci_mmio *)0)->notify);
2252 /* FIXME: Use a non-zero notify_off, for per-queue notification? */
2253 init_cap(&pci->notify.cap, sizeof(pci->notify),
2254 VIRTIO_PCI_CAP_NOTIFY_CFG,
2255 bar_offset, bar_len,
2256 offsetof(struct pci_config, isr));
2257
2258 bar_offset += bar_len;
2259 bar_len = sizeof(((struct virtio_pci_mmio *)0)->isr);
2260 init_cap(&pci->isr, sizeof(pci->isr),
2261 VIRTIO_PCI_CAP_ISR_CFG,
2262 bar_offset, bar_len,
2263 offsetof(struct pci_config, cfg_access));
2264
2265 /* This doesn't have any presence in the BAR */
2266 init_cap(&pci->cfg_access.cap, sizeof(pci->cfg_access),
2267 VIRTIO_PCI_CAP_PCI_CFG,
2268 0, 0, 0);
2269
2270 bar_offset += bar_len + sizeof(((struct virtio_pci_mmio *)0)->padding);
2271 assert(bar_offset == sizeof(struct virtio_pci_mmio));
2272
2273 /*
2274 * This gets sewn in and length set in set_device_config().
2275 * Some devices don't have a device configuration interface, so
2276 * we never expose this if we don't call set_device_config().
2277 */
2278 init_cap(&pci->device, sizeof(pci->device), VIRTIO_PCI_CAP_DEVICE_CFG,
2279 bar_offset, 0, 0);
2280}
2281
1822/* 2282/*
1823 * This routine does all the creation and setup of a new device, including 2283 * This routine does all the creation and setup of a new device, including
1824 * calling new_dev_desc() to allocate the descriptor and device memory. We 2284 * calling new_dev_desc() to allocate the descriptor and device memory. We
@@ -1854,6 +2314,34 @@ static struct device *new_device(const char *name, u16 type)
1854 return dev; 2314 return dev;
1855} 2315}
1856 2316
2317static struct device *new_pci_device(const char *name, u16 type,
2318 u8 class, u8 subclass)
2319{
2320 struct device *dev = malloc(sizeof(*dev));
2321
2322 /* Now we populate the fields one at a time. */
2323 dev->desc = NULL;
2324 dev->name = name;
2325 dev->vq = NULL;
2326 dev->feature_len = 0;
2327 dev->num_vq = 0;
2328 dev->running = false;
2329 dev->next = NULL;
2330 dev->mmio_size = sizeof(struct virtio_pci_mmio);
2331 dev->mmio = calloc(1, dev->mmio_size);
2332 dev->features = (u64)1 << VIRTIO_F_VERSION_1;
2333 dev->features_accepted = 0;
2334
2335 if (devices.device_num + 1 >= 32)
2336 errx(1, "Can only handle 31 PCI devices");
2337
2338 init_pci_config(&dev->config, type, class, subclass);
2339 assert(!devices.pci[devices.device_num+1]);
2340 devices.pci[++devices.device_num] = dev;
2341
2342 return dev;
2343}
2344
1857/* 2345/*
1858 * Our first setup routine is the console. It's a fairly simple device, but 2346 * Our first setup routine is the console. It's a fairly simple device, but
1859 * UNIX tty handling makes it uglier than it could be. 2347 * UNIX tty handling makes it uglier than it could be.