diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:55:21 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:55:21 -0400 |
commit | bbb20089a3275a19e475dbc21320c3742e3ca423 (patch) | |
tree | 216fdc1cbef450ca688135c5b8969169482d9a48 /drivers/virtio | |
parent | 3e48e656903e9fd8bc805c6a2c4264d7808d315b (diff) | |
parent | 657a77fa7284d8ae28dfa48f1dc5d919bf5b2843 (diff) |
Merge branch 'dmaengine' into async-tx-next
Conflicts:
crypto/async_tx/async_xor.c
drivers/dma/ioat/dma_v2.h
drivers/dma/ioat/pci.c
drivers/md/raid5.c
Diffstat (limited to 'drivers/virtio')
-rw-r--r-- | drivers/virtio/virtio.c | 29 | ||||
-rw-r--r-- | drivers/virtio/virtio_balloon.c | 27 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci.c | 307 | ||||
-rw-r--r-- | drivers/virtio/virtio_ring.c | 102 |
4 files changed, 396 insertions, 69 deletions
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 018c070a357f..3a43ebf83a49 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c | |||
@@ -31,21 +31,37 @@ static ssize_t modalias_show(struct device *_d, | |||
31 | return sprintf(buf, "virtio:d%08Xv%08X\n", | 31 | return sprintf(buf, "virtio:d%08Xv%08X\n", |
32 | dev->id.device, dev->id.vendor); | 32 | dev->id.device, dev->id.vendor); |
33 | } | 33 | } |
34 | static ssize_t features_show(struct device *_d, | ||
35 | struct device_attribute *attr, char *buf) | ||
36 | { | ||
37 | struct virtio_device *dev = container_of(_d, struct virtio_device, dev); | ||
38 | unsigned int i; | ||
39 | ssize_t len = 0; | ||
40 | |||
41 | /* We actually represent this as a bitstring, as it could be | ||
42 | * arbitrary length in future. */ | ||
43 | for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++) | ||
44 | len += sprintf(buf+len, "%c", | ||
45 | test_bit(i, dev->features) ? '1' : '0'); | ||
46 | len += sprintf(buf+len, "\n"); | ||
47 | return len; | ||
48 | } | ||
34 | static struct device_attribute virtio_dev_attrs[] = { | 49 | static struct device_attribute virtio_dev_attrs[] = { |
35 | __ATTR_RO(device), | 50 | __ATTR_RO(device), |
36 | __ATTR_RO(vendor), | 51 | __ATTR_RO(vendor), |
37 | __ATTR_RO(status), | 52 | __ATTR_RO(status), |
38 | __ATTR_RO(modalias), | 53 | __ATTR_RO(modalias), |
54 | __ATTR_RO(features), | ||
39 | __ATTR_NULL | 55 | __ATTR_NULL |
40 | }; | 56 | }; |
41 | 57 | ||
42 | static inline int virtio_id_match(const struct virtio_device *dev, | 58 | static inline int virtio_id_match(const struct virtio_device *dev, |
43 | const struct virtio_device_id *id) | 59 | const struct virtio_device_id *id) |
44 | { | 60 | { |
45 | if (id->device != dev->id.device) | 61 | if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID) |
46 | return 0; | 62 | return 0; |
47 | 63 | ||
48 | return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor != dev->id.vendor; | 64 | return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor; |
49 | } | 65 | } |
50 | 66 | ||
51 | /* This looks through all the IDs a driver claims to support. If any of them | 67 | /* This looks through all the IDs a driver claims to support. If any of them |
@@ -118,13 +134,14 @@ static int virtio_dev_probe(struct device *_d) | |||
118 | if (device_features & (1 << i)) | 134 | if (device_features & (1 << i)) |
119 | set_bit(i, dev->features); | 135 | set_bit(i, dev->features); |
120 | 136 | ||
137 | dev->config->finalize_features(dev); | ||
138 | |||
121 | err = drv->probe(dev); | 139 | err = drv->probe(dev); |
122 | if (err) | 140 | if (err) |
123 | add_status(dev, VIRTIO_CONFIG_S_FAILED); | 141 | add_status(dev, VIRTIO_CONFIG_S_FAILED); |
124 | else { | 142 | else |
125 | dev->config->finalize_features(dev); | ||
126 | add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); | 143 | add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); |
127 | } | 144 | |
128 | return err; | 145 | return err; |
129 | } | 146 | } |
130 | 147 | ||
@@ -185,6 +202,8 @@ int register_virtio_device(struct virtio_device *dev) | |||
185 | /* Acknowledge that we've seen the device. */ | 202 | /* Acknowledge that we've seen the device. */ |
186 | add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); | 203 | add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); |
187 | 204 | ||
205 | INIT_LIST_HEAD(&dev->vqs); | ||
206 | |||
188 | /* device_register() causes the bus infrastructure to look for a | 207 | /* device_register() causes the bus infrastructure to look for a |
189 | * matching driver. */ | 208 | * matching driver. */ |
190 | err = device_register(&dev->dev); | 209 | err = device_register(&dev->dev); |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 9c76a061a04d..26b278264796 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -204,6 +204,9 @@ static int balloon(void *_vballoon) | |||
204 | static int virtballoon_probe(struct virtio_device *vdev) | 204 | static int virtballoon_probe(struct virtio_device *vdev) |
205 | { | 205 | { |
206 | struct virtio_balloon *vb; | 206 | struct virtio_balloon *vb; |
207 | struct virtqueue *vqs[2]; | ||
208 | vq_callback_t *callbacks[] = { balloon_ack, balloon_ack }; | ||
209 | const char *names[] = { "inflate", "deflate" }; | ||
207 | int err; | 210 | int err; |
208 | 211 | ||
209 | vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); | 212 | vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); |
@@ -218,22 +221,17 @@ static int virtballoon_probe(struct virtio_device *vdev) | |||
218 | vb->vdev = vdev; | 221 | vb->vdev = vdev; |
219 | 222 | ||
220 | /* We expect two virtqueues. */ | 223 | /* We expect two virtqueues. */ |
221 | vb->inflate_vq = vdev->config->find_vq(vdev, 0, balloon_ack); | 224 | err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); |
222 | if (IS_ERR(vb->inflate_vq)) { | 225 | if (err) |
223 | err = PTR_ERR(vb->inflate_vq); | ||
224 | goto out_free_vb; | 226 | goto out_free_vb; |
225 | } | ||
226 | 227 | ||
227 | vb->deflate_vq = vdev->config->find_vq(vdev, 1, balloon_ack); | 228 | vb->inflate_vq = vqs[0]; |
228 | if (IS_ERR(vb->deflate_vq)) { | 229 | vb->deflate_vq = vqs[1]; |
229 | err = PTR_ERR(vb->deflate_vq); | ||
230 | goto out_del_inflate_vq; | ||
231 | } | ||
232 | 230 | ||
233 | vb->thread = kthread_run(balloon, vb, "vballoon"); | 231 | vb->thread = kthread_run(balloon, vb, "vballoon"); |
234 | if (IS_ERR(vb->thread)) { | 232 | if (IS_ERR(vb->thread)) { |
235 | err = PTR_ERR(vb->thread); | 233 | err = PTR_ERR(vb->thread); |
236 | goto out_del_deflate_vq; | 234 | goto out_del_vqs; |
237 | } | 235 | } |
238 | 236 | ||
239 | vb->tell_host_first | 237 | vb->tell_host_first |
@@ -241,10 +239,8 @@ static int virtballoon_probe(struct virtio_device *vdev) | |||
241 | 239 | ||
242 | return 0; | 240 | return 0; |
243 | 241 | ||
244 | out_del_deflate_vq: | 242 | out_del_vqs: |
245 | vdev->config->del_vq(vb->deflate_vq); | 243 | vdev->config->del_vqs(vdev); |
246 | out_del_inflate_vq: | ||
247 | vdev->config->del_vq(vb->inflate_vq); | ||
248 | out_free_vb: | 244 | out_free_vb: |
249 | kfree(vb); | 245 | kfree(vb); |
250 | out: | 246 | out: |
@@ -264,8 +260,7 @@ static void virtballoon_remove(struct virtio_device *vdev) | |||
264 | /* Now we reset the device so we can clean up the queues. */ | 260 | /* Now we reset the device so we can clean up the queues. */ |
265 | vdev->config->reset(vdev); | 261 | vdev->config->reset(vdev); |
266 | 262 | ||
267 | vdev->config->del_vq(vb->deflate_vq); | 263 | vdev->config->del_vqs(vdev); |
268 | vdev->config->del_vq(vb->inflate_vq); | ||
269 | kfree(vb); | 264 | kfree(vb); |
270 | } | 265 | } |
271 | 266 | ||
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 330aacbdec1f..193c8f0e5cc5 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
@@ -42,6 +42,26 @@ struct virtio_pci_device | |||
42 | /* a list of queues so we can dispatch IRQs */ | 42 | /* a list of queues so we can dispatch IRQs */ |
43 | spinlock_t lock; | 43 | spinlock_t lock; |
44 | struct list_head virtqueues; | 44 | struct list_head virtqueues; |
45 | |||
46 | /* MSI-X support */ | ||
47 | int msix_enabled; | ||
48 | int intx_enabled; | ||
49 | struct msix_entry *msix_entries; | ||
50 | /* Name strings for interrupts. This size should be enough, | ||
51 | * and I'm too lazy to allocate each name separately. */ | ||
52 | char (*msix_names)[256]; | ||
53 | /* Number of available vectors */ | ||
54 | unsigned msix_vectors; | ||
55 | /* Vectors allocated */ | ||
56 | unsigned msix_used_vectors; | ||
57 | }; | ||
58 | |||
59 | /* Constants for MSI-X */ | ||
60 | /* Use first vector for configuration changes, second and the rest for | ||
61 | * virtqueues Thus, we need at least 2 vectors for MSI. */ | ||
62 | enum { | ||
63 | VP_MSIX_CONFIG_VECTOR = 0, | ||
64 | VP_MSIX_VQ_VECTOR = 1, | ||
45 | }; | 65 | }; |
46 | 66 | ||
47 | struct virtio_pci_vq_info | 67 | struct virtio_pci_vq_info |
@@ -60,6 +80,9 @@ struct virtio_pci_vq_info | |||
60 | 80 | ||
61 | /* the list node for the virtqueues list */ | 81 | /* the list node for the virtqueues list */ |
62 | struct list_head node; | 82 | struct list_head node; |
83 | |||
84 | /* MSI-X vector (or none) */ | ||
85 | unsigned vector; | ||
63 | }; | 86 | }; |
64 | 87 | ||
65 | /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ | 88 | /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ |
@@ -109,7 +132,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset, | |||
109 | void *buf, unsigned len) | 132 | void *buf, unsigned len) |
110 | { | 133 | { |
111 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 134 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
112 | void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; | 135 | void __iomem *ioaddr = vp_dev->ioaddr + |
136 | VIRTIO_PCI_CONFIG(vp_dev) + offset; | ||
113 | u8 *ptr = buf; | 137 | u8 *ptr = buf; |
114 | int i; | 138 | int i; |
115 | 139 | ||
@@ -123,7 +147,8 @@ static void vp_set(struct virtio_device *vdev, unsigned offset, | |||
123 | const void *buf, unsigned len) | 147 | const void *buf, unsigned len) |
124 | { | 148 | { |
125 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 149 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
126 | void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; | 150 | void __iomem *ioaddr = vp_dev->ioaddr + |
151 | VIRTIO_PCI_CONFIG(vp_dev) + offset; | ||
127 | const u8 *ptr = buf; | 152 | const u8 *ptr = buf; |
128 | int i; | 153 | int i; |
129 | 154 | ||
@@ -164,6 +189,37 @@ static void vp_notify(struct virtqueue *vq) | |||
164 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); | 189 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); |
165 | } | 190 | } |
166 | 191 | ||
192 | /* Handle a configuration change: Tell driver if it wants to know. */ | ||
193 | static irqreturn_t vp_config_changed(int irq, void *opaque) | ||
194 | { | ||
195 | struct virtio_pci_device *vp_dev = opaque; | ||
196 | struct virtio_driver *drv; | ||
197 | drv = container_of(vp_dev->vdev.dev.driver, | ||
198 | struct virtio_driver, driver); | ||
199 | |||
200 | if (drv && drv->config_changed) | ||
201 | drv->config_changed(&vp_dev->vdev); | ||
202 | return IRQ_HANDLED; | ||
203 | } | ||
204 | |||
205 | /* Notify all virtqueues on an interrupt. */ | ||
206 | static irqreturn_t vp_vring_interrupt(int irq, void *opaque) | ||
207 | { | ||
208 | struct virtio_pci_device *vp_dev = opaque; | ||
209 | struct virtio_pci_vq_info *info; | ||
210 | irqreturn_t ret = IRQ_NONE; | ||
211 | unsigned long flags; | ||
212 | |||
213 | spin_lock_irqsave(&vp_dev->lock, flags); | ||
214 | list_for_each_entry(info, &vp_dev->virtqueues, node) { | ||
215 | if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) | ||
216 | ret = IRQ_HANDLED; | ||
217 | } | ||
218 | spin_unlock_irqrestore(&vp_dev->lock, flags); | ||
219 | |||
220 | return ret; | ||
221 | } | ||
222 | |||
167 | /* A small wrapper to also acknowledge the interrupt when it's handled. | 223 | /* A small wrapper to also acknowledge the interrupt when it's handled. |
168 | * I really need an EIO hook for the vring so I can ack the interrupt once we | 224 | * I really need an EIO hook for the vring so I can ack the interrupt once we |
169 | * know that we'll be handling the IRQ but before we invoke the callback since | 225 | * know that we'll be handling the IRQ but before we invoke the callback since |
@@ -173,9 +229,6 @@ static void vp_notify(struct virtqueue *vq) | |||
173 | static irqreturn_t vp_interrupt(int irq, void *opaque) | 229 | static irqreturn_t vp_interrupt(int irq, void *opaque) |
174 | { | 230 | { |
175 | struct virtio_pci_device *vp_dev = opaque; | 231 | struct virtio_pci_device *vp_dev = opaque; |
176 | struct virtio_pci_vq_info *info; | ||
177 | irqreturn_t ret = IRQ_NONE; | ||
178 | unsigned long flags; | ||
179 | u8 isr; | 232 | u8 isr; |
180 | 233 | ||
181 | /* reading the ISR has the effect of also clearing it so it's very | 234 | /* reading the ISR has the effect of also clearing it so it's very |
@@ -187,34 +240,137 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) | |||
187 | return IRQ_NONE; | 240 | return IRQ_NONE; |
188 | 241 | ||
189 | /* Configuration change? Tell driver if it wants to know. */ | 242 | /* Configuration change? Tell driver if it wants to know. */ |
190 | if (isr & VIRTIO_PCI_ISR_CONFIG) { | 243 | if (isr & VIRTIO_PCI_ISR_CONFIG) |
191 | struct virtio_driver *drv; | 244 | vp_config_changed(irq, opaque); |
192 | drv = container_of(vp_dev->vdev.dev.driver, | ||
193 | struct virtio_driver, driver); | ||
194 | 245 | ||
195 | if (drv && drv->config_changed) | 246 | return vp_vring_interrupt(irq, opaque); |
196 | drv->config_changed(&vp_dev->vdev); | 247 | } |
248 | |||
249 | static void vp_free_vectors(struct virtio_device *vdev) | ||
250 | { | ||
251 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
252 | int i; | ||
253 | |||
254 | if (vp_dev->intx_enabled) { | ||
255 | free_irq(vp_dev->pci_dev->irq, vp_dev); | ||
256 | vp_dev->intx_enabled = 0; | ||
197 | } | 257 | } |
198 | 258 | ||
199 | spin_lock_irqsave(&vp_dev->lock, flags); | 259 | for (i = 0; i < vp_dev->msix_used_vectors; ++i) |
200 | list_for_each_entry(info, &vp_dev->virtqueues, node) { | 260 | free_irq(vp_dev->msix_entries[i].vector, vp_dev); |
201 | if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) | 261 | vp_dev->msix_used_vectors = 0; |
202 | ret = IRQ_HANDLED; | 262 | |
263 | if (vp_dev->msix_enabled) { | ||
264 | /* Disable the vector used for configuration */ | ||
265 | iowrite16(VIRTIO_MSI_NO_VECTOR, | ||
266 | vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
267 | /* Flush the write out to device */ | ||
268 | ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
269 | |||
270 | vp_dev->msix_enabled = 0; | ||
271 | pci_disable_msix(vp_dev->pci_dev); | ||
203 | } | 272 | } |
204 | spin_unlock_irqrestore(&vp_dev->lock, flags); | 273 | } |
205 | 274 | ||
206 | return ret; | 275 | static int vp_enable_msix(struct pci_dev *dev, struct msix_entry *entries, |
276 | int *options, int noptions) | ||
277 | { | ||
278 | int i; | ||
279 | for (i = 0; i < noptions; ++i) | ||
280 | if (!pci_enable_msix(dev, entries, options[i])) | ||
281 | return options[i]; | ||
282 | return -EBUSY; | ||
283 | } | ||
284 | |||
285 | static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs) | ||
286 | { | ||
287 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
288 | const char *name = dev_name(&vp_dev->vdev.dev); | ||
289 | unsigned i, v; | ||
290 | int err = -ENOMEM; | ||
291 | /* We want at most one vector per queue and one for config changes. | ||
292 | * Fallback to separate vectors for config and a shared for queues. | ||
293 | * Finally fall back to regular interrupts. */ | ||
294 | int options[] = { max_vqs + 1, 2 }; | ||
295 | int nvectors = max(options[0], options[1]); | ||
296 | |||
297 | vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, | ||
298 | GFP_KERNEL); | ||
299 | if (!vp_dev->msix_entries) | ||
300 | goto error_entries; | ||
301 | vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, | ||
302 | GFP_KERNEL); | ||
303 | if (!vp_dev->msix_names) | ||
304 | goto error_names; | ||
305 | |||
306 | for (i = 0; i < nvectors; ++i) | ||
307 | vp_dev->msix_entries[i].entry = i; | ||
308 | |||
309 | err = vp_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, | ||
310 | options, ARRAY_SIZE(options)); | ||
311 | if (err < 0) { | ||
312 | /* Can't allocate enough MSI-X vectors, use regular interrupt */ | ||
313 | vp_dev->msix_vectors = 0; | ||
314 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, | ||
315 | IRQF_SHARED, name, vp_dev); | ||
316 | if (err) | ||
317 | goto error_irq; | ||
318 | vp_dev->intx_enabled = 1; | ||
319 | } else { | ||
320 | vp_dev->msix_vectors = err; | ||
321 | vp_dev->msix_enabled = 1; | ||
322 | |||
323 | /* Set the vector used for configuration */ | ||
324 | v = vp_dev->msix_used_vectors; | ||
325 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, | ||
326 | "%s-config", name); | ||
327 | err = request_irq(vp_dev->msix_entries[v].vector, | ||
328 | vp_config_changed, 0, vp_dev->msix_names[v], | ||
329 | vp_dev); | ||
330 | if (err) | ||
331 | goto error_irq; | ||
332 | ++vp_dev->msix_used_vectors; | ||
333 | |||
334 | iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
335 | /* Verify we had enough resources to assign the vector */ | ||
336 | v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
337 | if (v == VIRTIO_MSI_NO_VECTOR) { | ||
338 | err = -EBUSY; | ||
339 | goto error_irq; | ||
340 | } | ||
341 | } | ||
342 | |||
343 | if (vp_dev->msix_vectors && vp_dev->msix_vectors != max_vqs + 1) { | ||
344 | /* Shared vector for all VQs */ | ||
345 | v = vp_dev->msix_used_vectors; | ||
346 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, | ||
347 | "%s-virtqueues", name); | ||
348 | err = request_irq(vp_dev->msix_entries[v].vector, | ||
349 | vp_vring_interrupt, 0, vp_dev->msix_names[v], | ||
350 | vp_dev); | ||
351 | if (err) | ||
352 | goto error_irq; | ||
353 | ++vp_dev->msix_used_vectors; | ||
354 | } | ||
355 | return 0; | ||
356 | error_irq: | ||
357 | vp_free_vectors(vdev); | ||
358 | kfree(vp_dev->msix_names); | ||
359 | error_names: | ||
360 | kfree(vp_dev->msix_entries); | ||
361 | error_entries: | ||
362 | return err; | ||
207 | } | 363 | } |
208 | 364 | ||
209 | /* the config->find_vq() implementation */ | ||
210 | static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | 365 | static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, |
211 | void (*callback)(struct virtqueue *vq)) | 366 | void (*callback)(struct virtqueue *vq), |
367 | const char *name) | ||
212 | { | 368 | { |
213 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 369 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
214 | struct virtio_pci_vq_info *info; | 370 | struct virtio_pci_vq_info *info; |
215 | struct virtqueue *vq; | 371 | struct virtqueue *vq; |
216 | unsigned long flags, size; | 372 | unsigned long flags, size; |
217 | u16 num; | 373 | u16 num, vector; |
218 | int err; | 374 | int err; |
219 | 375 | ||
220 | /* Select the queue we're interested in */ | 376 | /* Select the queue we're interested in */ |
@@ -233,6 +389,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
233 | 389 | ||
234 | info->queue_index = index; | 390 | info->queue_index = index; |
235 | info->num = num; | 391 | info->num = num; |
392 | info->vector = VIRTIO_MSI_NO_VECTOR; | ||
236 | 393 | ||
237 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); | 394 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); |
238 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); | 395 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); |
@@ -247,7 +404,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
247 | 404 | ||
248 | /* create the vring */ | 405 | /* create the vring */ |
249 | vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, | 406 | vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, |
250 | vdev, info->queue, vp_notify, callback); | 407 | vdev, info->queue, vp_notify, callback, name); |
251 | if (!vq) { | 408 | if (!vq) { |
252 | err = -ENOMEM; | 409 | err = -ENOMEM; |
253 | goto out_activate_queue; | 410 | goto out_activate_queue; |
@@ -256,12 +413,43 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
256 | vq->priv = info; | 413 | vq->priv = info; |
257 | info->vq = vq; | 414 | info->vq = vq; |
258 | 415 | ||
416 | /* allocate per-vq vector if available and necessary */ | ||
417 | if (callback && vp_dev->msix_used_vectors < vp_dev->msix_vectors) { | ||
418 | vector = vp_dev->msix_used_vectors; | ||
419 | snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names, | ||
420 | "%s-%s", dev_name(&vp_dev->vdev.dev), name); | ||
421 | err = request_irq(vp_dev->msix_entries[vector].vector, | ||
422 | vring_interrupt, 0, | ||
423 | vp_dev->msix_names[vector], vq); | ||
424 | if (err) | ||
425 | goto out_request_irq; | ||
426 | info->vector = vector; | ||
427 | ++vp_dev->msix_used_vectors; | ||
428 | } else | ||
429 | vector = VP_MSIX_VQ_VECTOR; | ||
430 | |||
431 | if (callback && vp_dev->msix_enabled) { | ||
432 | iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | ||
433 | vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | ||
434 | if (vector == VIRTIO_MSI_NO_VECTOR) { | ||
435 | err = -EBUSY; | ||
436 | goto out_assign; | ||
437 | } | ||
438 | } | ||
439 | |||
259 | spin_lock_irqsave(&vp_dev->lock, flags); | 440 | spin_lock_irqsave(&vp_dev->lock, flags); |
260 | list_add(&info->node, &vp_dev->virtqueues); | 441 | list_add(&info->node, &vp_dev->virtqueues); |
261 | spin_unlock_irqrestore(&vp_dev->lock, flags); | 442 | spin_unlock_irqrestore(&vp_dev->lock, flags); |
262 | 443 | ||
263 | return vq; | 444 | return vq; |
264 | 445 | ||
446 | out_assign: | ||
447 | if (info->vector != VIRTIO_MSI_NO_VECTOR) { | ||
448 | free_irq(vp_dev->msix_entries[info->vector].vector, vq); | ||
449 | --vp_dev->msix_used_vectors; | ||
450 | } | ||
451 | out_request_irq: | ||
452 | vring_del_virtqueue(vq); | ||
265 | out_activate_queue: | 453 | out_activate_queue: |
266 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | 454 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
267 | free_pages_exact(info->queue, size); | 455 | free_pages_exact(info->queue, size); |
@@ -270,21 +458,27 @@ out_info: | |||
270 | return ERR_PTR(err); | 458 | return ERR_PTR(err); |
271 | } | 459 | } |
272 | 460 | ||
273 | /* the config->del_vq() implementation */ | ||
274 | static void vp_del_vq(struct virtqueue *vq) | 461 | static void vp_del_vq(struct virtqueue *vq) |
275 | { | 462 | { |
276 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); | 463 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); |
277 | struct virtio_pci_vq_info *info = vq->priv; | 464 | struct virtio_pci_vq_info *info = vq->priv; |
278 | unsigned long flags, size; | 465 | unsigned long size; |
279 | 466 | ||
280 | spin_lock_irqsave(&vp_dev->lock, flags); | 467 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); |
281 | list_del(&info->node); | 468 | |
282 | spin_unlock_irqrestore(&vp_dev->lock, flags); | 469 | if (info->vector != VIRTIO_MSI_NO_VECTOR) |
470 | free_irq(vp_dev->msix_entries[info->vector].vector, vq); | ||
471 | |||
472 | if (vp_dev->msix_enabled) { | ||
473 | iowrite16(VIRTIO_MSI_NO_VECTOR, | ||
474 | vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | ||
475 | /* Flush the write out to device */ | ||
476 | ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); | ||
477 | } | ||
283 | 478 | ||
284 | vring_del_virtqueue(vq); | 479 | vring_del_virtqueue(vq); |
285 | 480 | ||
286 | /* Select and deactivate the queue */ | 481 | /* Select and deactivate the queue */ |
287 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | ||
288 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | 482 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
289 | 483 | ||
290 | size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); | 484 | size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); |
@@ -292,14 +486,57 @@ static void vp_del_vq(struct virtqueue *vq) | |||
292 | kfree(info); | 486 | kfree(info); |
293 | } | 487 | } |
294 | 488 | ||
489 | /* the config->del_vqs() implementation */ | ||
490 | static void vp_del_vqs(struct virtio_device *vdev) | ||
491 | { | ||
492 | struct virtqueue *vq, *n; | ||
493 | |||
494 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) | ||
495 | vp_del_vq(vq); | ||
496 | |||
497 | vp_free_vectors(vdev); | ||
498 | } | ||
499 | |||
500 | /* the config->find_vqs() implementation */ | ||
501 | static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, | ||
502 | struct virtqueue *vqs[], | ||
503 | vq_callback_t *callbacks[], | ||
504 | const char *names[]) | ||
505 | { | ||
506 | int vectors = 0; | ||
507 | int i, err; | ||
508 | |||
509 | /* How many vectors would we like? */ | ||
510 | for (i = 0; i < nvqs; ++i) | ||
511 | if (callbacks[i]) | ||
512 | ++vectors; | ||
513 | |||
514 | err = vp_request_vectors(vdev, vectors); | ||
515 | if (err) | ||
516 | goto error_request; | ||
517 | |||
518 | for (i = 0; i < nvqs; ++i) { | ||
519 | vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]); | ||
520 | if (IS_ERR(vqs[i])) | ||
521 | goto error_find; | ||
522 | } | ||
523 | return 0; | ||
524 | |||
525 | error_find: | ||
526 | vp_del_vqs(vdev); | ||
527 | |||
528 | error_request: | ||
529 | return PTR_ERR(vqs[i]); | ||
530 | } | ||
531 | |||
295 | static struct virtio_config_ops virtio_pci_config_ops = { | 532 | static struct virtio_config_ops virtio_pci_config_ops = { |
296 | .get = vp_get, | 533 | .get = vp_get, |
297 | .set = vp_set, | 534 | .set = vp_set, |
298 | .get_status = vp_get_status, | 535 | .get_status = vp_get_status, |
299 | .set_status = vp_set_status, | 536 | .set_status = vp_set_status, |
300 | .reset = vp_reset, | 537 | .reset = vp_reset, |
301 | .find_vq = vp_find_vq, | 538 | .find_vqs = vp_find_vqs, |
302 | .del_vq = vp_del_vq, | 539 | .del_vqs = vp_del_vqs, |
303 | .get_features = vp_get_features, | 540 | .get_features = vp_get_features, |
304 | .finalize_features = vp_finalize_features, | 541 | .finalize_features = vp_finalize_features, |
305 | }; | 542 | }; |
@@ -310,7 +547,7 @@ static void virtio_pci_release_dev(struct device *_d) | |||
310 | struct virtio_pci_device *vp_dev = to_vp_device(dev); | 547 | struct virtio_pci_device *vp_dev = to_vp_device(dev); |
311 | struct pci_dev *pci_dev = vp_dev->pci_dev; | 548 | struct pci_dev *pci_dev = vp_dev->pci_dev; |
312 | 549 | ||
313 | free_irq(pci_dev->irq, vp_dev); | 550 | vp_del_vqs(dev); |
314 | pci_set_drvdata(pci_dev, NULL); | 551 | pci_set_drvdata(pci_dev, NULL); |
315 | pci_iounmap(pci_dev, vp_dev->ioaddr); | 552 | pci_iounmap(pci_dev, vp_dev->ioaddr); |
316 | pci_release_regions(pci_dev); | 553 | pci_release_regions(pci_dev); |
@@ -369,21 +606,13 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, | |||
369 | vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; | 606 | vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; |
370 | vp_dev->vdev.id.device = pci_dev->subsystem_device; | 607 | vp_dev->vdev.id.device = pci_dev->subsystem_device; |
371 | 608 | ||
372 | /* register a handler for the queue with the PCI device's interrupt */ | ||
373 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, | ||
374 | dev_name(&vp_dev->vdev.dev), vp_dev); | ||
375 | if (err) | ||
376 | goto out_set_drvdata; | ||
377 | |||
378 | /* finally register the virtio device */ | 609 | /* finally register the virtio device */ |
379 | err = register_virtio_device(&vp_dev->vdev); | 610 | err = register_virtio_device(&vp_dev->vdev); |
380 | if (err) | 611 | if (err) |
381 | goto out_req_irq; | 612 | goto out_set_drvdata; |
382 | 613 | ||
383 | return 0; | 614 | return 0; |
384 | 615 | ||
385 | out_req_irq: | ||
386 | free_irq(pci_dev->irq, vp_dev); | ||
387 | out_set_drvdata: | 616 | out_set_drvdata: |
388 | pci_set_drvdata(pci_dev, NULL); | 617 | pci_set_drvdata(pci_dev, NULL); |
389 | pci_iounmap(pci_dev, vp_dev->ioaddr); | 618 | pci_iounmap(pci_dev, vp_dev->ioaddr); |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 5c52369ab9bb..a882f2606515 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -23,21 +23,30 @@ | |||
23 | 23 | ||
24 | #ifdef DEBUG | 24 | #ifdef DEBUG |
25 | /* For development, we want to crash whenever the ring is screwed. */ | 25 | /* For development, we want to crash whenever the ring is screwed. */ |
26 | #define BAD_RING(_vq, fmt...) \ | 26 | #define BAD_RING(_vq, fmt, args...) \ |
27 | do { dev_err(&(_vq)->vq.vdev->dev, fmt); BUG(); } while(0) | 27 | do { \ |
28 | dev_err(&(_vq)->vq.vdev->dev, \ | ||
29 | "%s:"fmt, (_vq)->vq.name, ##args); \ | ||
30 | BUG(); \ | ||
31 | } while (0) | ||
28 | /* Caller is supposed to guarantee no reentry. */ | 32 | /* Caller is supposed to guarantee no reentry. */ |
29 | #define START_USE(_vq) \ | 33 | #define START_USE(_vq) \ |
30 | do { \ | 34 | do { \ |
31 | if ((_vq)->in_use) \ | 35 | if ((_vq)->in_use) \ |
32 | panic("in_use = %i\n", (_vq)->in_use); \ | 36 | panic("%s:in_use = %i\n", \ |
37 | (_vq)->vq.name, (_vq)->in_use); \ | ||
33 | (_vq)->in_use = __LINE__; \ | 38 | (_vq)->in_use = __LINE__; \ |
34 | mb(); \ | 39 | mb(); \ |
35 | } while(0) | 40 | } while (0) |
36 | #define END_USE(_vq) \ | 41 | #define END_USE(_vq) \ |
37 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) | 42 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) |
38 | #else | 43 | #else |
39 | #define BAD_RING(_vq, fmt...) \ | 44 | #define BAD_RING(_vq, fmt, args...) \ |
40 | do { dev_err(&_vq->vq.vdev->dev, fmt); (_vq)->broken = true; } while(0) | 45 | do { \ |
46 | dev_err(&_vq->vq.vdev->dev, \ | ||
47 | "%s:"fmt, (_vq)->vq.name, ##args); \ | ||
48 | (_vq)->broken = true; \ | ||
49 | } while (0) | ||
41 | #define START_USE(vq) | 50 | #define START_USE(vq) |
42 | #define END_USE(vq) | 51 | #define END_USE(vq) |
43 | #endif | 52 | #endif |
@@ -52,6 +61,9 @@ struct vring_virtqueue | |||
52 | /* Other side has made a mess, don't try any more. */ | 61 | /* Other side has made a mess, don't try any more. */ |
53 | bool broken; | 62 | bool broken; |
54 | 63 | ||
64 | /* Host supports indirect buffers */ | ||
65 | bool indirect; | ||
66 | |||
55 | /* Number of free buffers */ | 67 | /* Number of free buffers */ |
56 | unsigned int num_free; | 68 | unsigned int num_free; |
57 | /* Head of free buffer list. */ | 69 | /* Head of free buffer list. */ |
@@ -76,6 +88,55 @@ struct vring_virtqueue | |||
76 | 88 | ||
77 | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) | 89 | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) |
78 | 90 | ||
91 | /* Set up an indirect table of descriptors and add it to the queue. */ | ||
92 | static int vring_add_indirect(struct vring_virtqueue *vq, | ||
93 | struct scatterlist sg[], | ||
94 | unsigned int out, | ||
95 | unsigned int in) | ||
96 | { | ||
97 | struct vring_desc *desc; | ||
98 | unsigned head; | ||
99 | int i; | ||
100 | |||
101 | desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC); | ||
102 | if (!desc) | ||
103 | return vq->vring.num; | ||
104 | |||
105 | /* Transfer entries from the sg list into the indirect page */ | ||
106 | for (i = 0; i < out; i++) { | ||
107 | desc[i].flags = VRING_DESC_F_NEXT; | ||
108 | desc[i].addr = sg_phys(sg); | ||
109 | desc[i].len = sg->length; | ||
110 | desc[i].next = i+1; | ||
111 | sg++; | ||
112 | } | ||
113 | for (; i < (out + in); i++) { | ||
114 | desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; | ||
115 | desc[i].addr = sg_phys(sg); | ||
116 | desc[i].len = sg->length; | ||
117 | desc[i].next = i+1; | ||
118 | sg++; | ||
119 | } | ||
120 | |||
121 | /* Last one doesn't continue. */ | ||
122 | desc[i-1].flags &= ~VRING_DESC_F_NEXT; | ||
123 | desc[i-1].next = 0; | ||
124 | |||
125 | /* We're about to use a buffer */ | ||
126 | vq->num_free--; | ||
127 | |||
128 | /* Use a single buffer which doesn't continue */ | ||
129 | head = vq->free_head; | ||
130 | vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; | ||
131 | vq->vring.desc[head].addr = virt_to_phys(desc); | ||
132 | vq->vring.desc[head].len = i * sizeof(struct vring_desc); | ||
133 | |||
134 | /* Update free pointer */ | ||
135 | vq->free_head = vq->vring.desc[head].next; | ||
136 | |||
137 | return head; | ||
138 | } | ||
139 | |||
79 | static int vring_add_buf(struct virtqueue *_vq, | 140 | static int vring_add_buf(struct virtqueue *_vq, |
80 | struct scatterlist sg[], | 141 | struct scatterlist sg[], |
81 | unsigned int out, | 142 | unsigned int out, |
@@ -85,12 +146,21 @@ static int vring_add_buf(struct virtqueue *_vq, | |||
85 | struct vring_virtqueue *vq = to_vvq(_vq); | 146 | struct vring_virtqueue *vq = to_vvq(_vq); |
86 | unsigned int i, avail, head, uninitialized_var(prev); | 147 | unsigned int i, avail, head, uninitialized_var(prev); |
87 | 148 | ||
149 | START_USE(vq); | ||
150 | |||
88 | BUG_ON(data == NULL); | 151 | BUG_ON(data == NULL); |
152 | |||
153 | /* If the host supports indirect descriptor tables, and we have multiple | ||
154 | * buffers, then go indirect. FIXME: tune this threshold */ | ||
155 | if (vq->indirect && (out + in) > 1 && vq->num_free) { | ||
156 | head = vring_add_indirect(vq, sg, out, in); | ||
157 | if (head != vq->vring.num) | ||
158 | goto add_head; | ||
159 | } | ||
160 | |||
89 | BUG_ON(out + in > vq->vring.num); | 161 | BUG_ON(out + in > vq->vring.num); |
90 | BUG_ON(out + in == 0); | 162 | BUG_ON(out + in == 0); |
91 | 163 | ||
92 | START_USE(vq); | ||
93 | |||
94 | if (vq->num_free < out + in) { | 164 | if (vq->num_free < out + in) { |
95 | pr_debug("Can't add buf len %i - avail = %i\n", | 165 | pr_debug("Can't add buf len %i - avail = %i\n", |
96 | out + in, vq->num_free); | 166 | out + in, vq->num_free); |
@@ -127,6 +197,7 @@ static int vring_add_buf(struct virtqueue *_vq, | |||
127 | /* Update free pointer */ | 197 | /* Update free pointer */ |
128 | vq->free_head = i; | 198 | vq->free_head = i; |
129 | 199 | ||
200 | add_head: | ||
130 | /* Set token. */ | 201 | /* Set token. */ |
131 | vq->data[head] = data; | 202 | vq->data[head] = data; |
132 | 203 | ||
@@ -170,6 +241,11 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) | |||
170 | 241 | ||
171 | /* Put back on free list: find end */ | 242 | /* Put back on free list: find end */ |
172 | i = head; | 243 | i = head; |
244 | |||
245 | /* Free the indirect table */ | ||
246 | if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) | ||
247 | kfree(phys_to_virt(vq->vring.desc[i].addr)); | ||
248 | |||
173 | while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { | 249 | while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { |
174 | i = vq->vring.desc[i].next; | 250 | i = vq->vring.desc[i].next; |
175 | vq->num_free++; | 251 | vq->num_free++; |
@@ -284,7 +360,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, | |||
284 | struct virtio_device *vdev, | 360 | struct virtio_device *vdev, |
285 | void *pages, | 361 | void *pages, |
286 | void (*notify)(struct virtqueue *), | 362 | void (*notify)(struct virtqueue *), |
287 | void (*callback)(struct virtqueue *)) | 363 | void (*callback)(struct virtqueue *), |
364 | const char *name) | ||
288 | { | 365 | { |
289 | struct vring_virtqueue *vq; | 366 | struct vring_virtqueue *vq; |
290 | unsigned int i; | 367 | unsigned int i; |
@@ -303,14 +380,18 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, | |||
303 | vq->vq.callback = callback; | 380 | vq->vq.callback = callback; |
304 | vq->vq.vdev = vdev; | 381 | vq->vq.vdev = vdev; |
305 | vq->vq.vq_ops = &vring_vq_ops; | 382 | vq->vq.vq_ops = &vring_vq_ops; |
383 | vq->vq.name = name; | ||
306 | vq->notify = notify; | 384 | vq->notify = notify; |
307 | vq->broken = false; | 385 | vq->broken = false; |
308 | vq->last_used_idx = 0; | 386 | vq->last_used_idx = 0; |
309 | vq->num_added = 0; | 387 | vq->num_added = 0; |
388 | list_add_tail(&vq->vq.list, &vdev->vqs); | ||
310 | #ifdef DEBUG | 389 | #ifdef DEBUG |
311 | vq->in_use = false; | 390 | vq->in_use = false; |
312 | #endif | 391 | #endif |
313 | 392 | ||
393 | vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); | ||
394 | |||
314 | /* No callback? Tell other side not to bother us. */ | 395 | /* No callback? Tell other side not to bother us. */ |
315 | if (!callback) | 396 | if (!callback) |
316 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; | 397 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; |
@@ -327,6 +408,7 @@ EXPORT_SYMBOL_GPL(vring_new_virtqueue); | |||
327 | 408 | ||
328 | void vring_del_virtqueue(struct virtqueue *vq) | 409 | void vring_del_virtqueue(struct virtqueue *vq) |
329 | { | 410 | { |
411 | list_del(&vq->list); | ||
330 | kfree(to_vvq(vq)); | 412 | kfree(to_vvq(vq)); |
331 | } | 413 | } |
332 | EXPORT_SYMBOL_GPL(vring_del_virtqueue); | 414 | EXPORT_SYMBOL_GPL(vring_del_virtqueue); |
@@ -338,6 +420,8 @@ void vring_transport_features(struct virtio_device *vdev) | |||
338 | 420 | ||
339 | for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { | 421 | for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { |
340 | switch (i) { | 422 | switch (i) { |
423 | case VIRTIO_RING_F_INDIRECT_DESC: | ||
424 | break; | ||
341 | default: | 425 | default: |
342 | /* We don't understand this bit. */ | 426 | /* We don't understand this bit. */ |
343 | clear_bit(i, vdev->features); | 427 | clear_bit(i, vdev->features); |