diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-20 11:37:04 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-20 11:37:05 -0500 |
commit | b7dfde956daee23f4439d0c8562a5e38b43e79d9 (patch) | |
tree | 2ed71fb5c5eac6957fd1e1ad0a67be6c3282167a /drivers/virtio | |
parent | 03c850ec327c42a97e44c448b75983e12da417d9 (diff) | |
parent | 1b6370463e88b0c1c317de16d7b962acc1dab4f2 (diff) |
Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull virtio update from Rusty Russell:
"Some nice cleanups, and even a patch my wife did as a "live" demo for
Latinoware 2012.
There's a slightly non-trivial merge in virtio-net, as we cleaned up
the virtio add_buf interface while DaveM accepted the mq virtio-net
patches."
* tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (27 commits)
virtio_console: Add support for remoteproc serial
virtio_console: Merge struct buffer_token into struct port_buffer
virtio: add drv_to_virtio to make code clearly
virtio: use dev_to_virtio wrapper in virtio
virtio-mmio: Fix irq parsing in command line parameter
virtio_console: Free buffers from out-queue upon close
virtio: Convert dev_printk(KERN_<LEVEL> to dev_<level>(
virtio_console: Use kmalloc instead of kzalloc
virtio_console: Free buffer if splice fails
virtio: tools: make it clear that virtqueue_add_buf() no longer returns > 0
virtio: scsi: make it clear that virtqueue_add_buf() no longer returns > 0
virtio: rpmsg: make it clear that virtqueue_add_buf() no longer returns > 0
virtio: net: make it clear that virtqueue_add_buf() no longer returns > 0
virtio: console: make it clear that virtqueue_add_buf() no longer returns > 0
virtio: make virtqueue_add_buf() returning 0 on success, not capacity.
virtio: console: don't rely on virtqueue_add_buf() returning capacity.
virtio_net: don't rely on virtqueue_add_buf() returning capacity.
virtio-net: remove unused skb_vnet_hdr->num_sg field
virtio-net: correct capacity math on ring full
virtio: move queue_index and num_free fields into core struct virtqueue.
...
Diffstat (limited to 'drivers/virtio')
-rw-r--r-- | drivers/virtio/virtio.c | 30 | ||||
-rw-r--r-- | drivers/virtio/virtio_balloon.c | 7 | ||||
-rw-r--r-- | drivers/virtio/virtio_mmio.c | 30 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci.c | 20 | ||||
-rw-r--r-- | drivers/virtio/virtio_ring.c | 46 |
5 files changed, 57 insertions, 76 deletions
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 809b0de59c09..ee59b74768d9 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c | |||
@@ -10,33 +10,32 @@ static DEFINE_IDA(virtio_index_ida); | |||
10 | static ssize_t device_show(struct device *_d, | 10 | static ssize_t device_show(struct device *_d, |
11 | struct device_attribute *attr, char *buf) | 11 | struct device_attribute *attr, char *buf) |
12 | { | 12 | { |
13 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 13 | struct virtio_device *dev = dev_to_virtio(_d); |
14 | return sprintf(buf, "0x%04x\n", dev->id.device); | 14 | return sprintf(buf, "0x%04x\n", dev->id.device); |
15 | } | 15 | } |
16 | static ssize_t vendor_show(struct device *_d, | 16 | static ssize_t vendor_show(struct device *_d, |
17 | struct device_attribute *attr, char *buf) | 17 | struct device_attribute *attr, char *buf) |
18 | { | 18 | { |
19 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 19 | struct virtio_device *dev = dev_to_virtio(_d); |
20 | return sprintf(buf, "0x%04x\n", dev->id.vendor); | 20 | return sprintf(buf, "0x%04x\n", dev->id.vendor); |
21 | } | 21 | } |
22 | static ssize_t status_show(struct device *_d, | 22 | static ssize_t status_show(struct device *_d, |
23 | struct device_attribute *attr, char *buf) | 23 | struct device_attribute *attr, char *buf) |
24 | { | 24 | { |
25 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 25 | struct virtio_device *dev = dev_to_virtio(_d); |
26 | return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); | 26 | return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); |
27 | } | 27 | } |
28 | static ssize_t modalias_show(struct device *_d, | 28 | static ssize_t modalias_show(struct device *_d, |
29 | struct device_attribute *attr, char *buf) | 29 | struct device_attribute *attr, char *buf) |
30 | { | 30 | { |
31 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 31 | struct virtio_device *dev = dev_to_virtio(_d); |
32 | |||
33 | return sprintf(buf, "virtio:d%08Xv%08X\n", | 32 | return sprintf(buf, "virtio:d%08Xv%08X\n", |
34 | dev->id.device, dev->id.vendor); | 33 | dev->id.device, dev->id.vendor); |
35 | } | 34 | } |
36 | static ssize_t features_show(struct device *_d, | 35 | static ssize_t features_show(struct device *_d, |
37 | struct device_attribute *attr, char *buf) | 36 | struct device_attribute *attr, char *buf) |
38 | { | 37 | { |
39 | struct virtio_device *dev = container_of(_d, struct virtio_device, dev); | 38 | struct virtio_device *dev = dev_to_virtio(_d); |
40 | unsigned int i; | 39 | unsigned int i; |
41 | ssize_t len = 0; | 40 | ssize_t len = 0; |
42 | 41 | ||
@@ -71,10 +70,10 @@ static inline int virtio_id_match(const struct virtio_device *dev, | |||
71 | static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) | 70 | static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) |
72 | { | 71 | { |
73 | unsigned int i; | 72 | unsigned int i; |
74 | struct virtio_device *dev = container_of(_dv,struct virtio_device,dev); | 73 | struct virtio_device *dev = dev_to_virtio(_dv); |
75 | const struct virtio_device_id *ids; | 74 | const struct virtio_device_id *ids; |
76 | 75 | ||
77 | ids = container_of(_dr, struct virtio_driver, driver)->id_table; | 76 | ids = drv_to_virtio(_dr)->id_table; |
78 | for (i = 0; ids[i].device; i++) | 77 | for (i = 0; ids[i].device; i++) |
79 | if (virtio_id_match(dev, &ids[i])) | 78 | if (virtio_id_match(dev, &ids[i])) |
80 | return 1; | 79 | return 1; |
@@ -83,7 +82,7 @@ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) | |||
83 | 82 | ||
84 | static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) | 83 | static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) |
85 | { | 84 | { |
86 | struct virtio_device *dev = container_of(_dv,struct virtio_device,dev); | 85 | struct virtio_device *dev = dev_to_virtio(_dv); |
87 | 86 | ||
88 | return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", | 87 | return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", |
89 | dev->id.device, dev->id.vendor); | 88 | dev->id.device, dev->id.vendor); |
@@ -98,8 +97,7 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, | |||
98 | unsigned int fbit) | 97 | unsigned int fbit) |
99 | { | 98 | { |
100 | unsigned int i; | 99 | unsigned int i; |
101 | struct virtio_driver *drv = container_of(vdev->dev.driver, | 100 | struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); |
102 | struct virtio_driver, driver); | ||
103 | 101 | ||
104 | for (i = 0; i < drv->feature_table_size; i++) | 102 | for (i = 0; i < drv->feature_table_size; i++) |
105 | if (drv->feature_table[i] == fbit) | 103 | if (drv->feature_table[i] == fbit) |
@@ -111,9 +109,8 @@ EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); | |||
111 | static int virtio_dev_probe(struct device *_d) | 109 | static int virtio_dev_probe(struct device *_d) |
112 | { | 110 | { |
113 | int err, i; | 111 | int err, i; |
114 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 112 | struct virtio_device *dev = dev_to_virtio(_d); |
115 | struct virtio_driver *drv = container_of(dev->dev.driver, | 113 | struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); |
116 | struct virtio_driver, driver); | ||
117 | u32 device_features; | 114 | u32 device_features; |
118 | 115 | ||
119 | /* We have a driver! */ | 116 | /* We have a driver! */ |
@@ -152,9 +149,8 @@ static int virtio_dev_probe(struct device *_d) | |||
152 | 149 | ||
153 | static int virtio_dev_remove(struct device *_d) | 150 | static int virtio_dev_remove(struct device *_d) |
154 | { | 151 | { |
155 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 152 | struct virtio_device *dev = dev_to_virtio(_d); |
156 | struct virtio_driver *drv = container_of(dev->dev.driver, | 153 | struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); |
157 | struct virtio_driver, driver); | ||
158 | 154 | ||
159 | drv->remove(dev); | 155 | drv->remove(dev); |
160 | 156 | ||
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 2a70558b36ea..d19fe3e323b4 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -139,10 +139,9 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num) | |||
139 | struct page *page = balloon_page_enqueue(vb_dev_info); | 139 | struct page *page = balloon_page_enqueue(vb_dev_info); |
140 | 140 | ||
141 | if (!page) { | 141 | if (!page) { |
142 | if (printk_ratelimit()) | 142 | dev_info_ratelimited(&vb->vdev->dev, |
143 | dev_printk(KERN_INFO, &vb->vdev->dev, | 143 | "Out of puff! Can't get %u pages\n", |
144 | "Out of puff! Can't get %u pages\n", | 144 | VIRTIO_BALLOON_PAGES_PER_PAGE); |
145 | VIRTIO_BALLOON_PAGES_PER_PAGE); | ||
146 | /* Sleep for at least 1/5 of a second before retry. */ | 145 | /* Sleep for at least 1/5 of a second before retry. */ |
147 | msleep(200); | 146 | msleep(200); |
148 | break; | 147 | break; |
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 6b1b7e184939..634f80bcdbd7 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c | |||
@@ -225,7 +225,7 @@ static void vm_notify(struct virtqueue *vq) | |||
225 | 225 | ||
226 | /* We write the queue's selector into the notification register to | 226 | /* We write the queue's selector into the notification register to |
227 | * signal the other end */ | 227 | * signal the other end */ |
228 | writel(virtqueue_get_queue_index(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); | 228 | writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); |
229 | } | 229 | } |
230 | 230 | ||
231 | /* Notify all virtqueues on an interrupt. */ | 231 | /* Notify all virtqueues on an interrupt. */ |
@@ -266,7 +266,7 @@ static void vm_del_vq(struct virtqueue *vq) | |||
266 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); | 266 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); |
267 | struct virtio_mmio_vq_info *info = vq->priv; | 267 | struct virtio_mmio_vq_info *info = vq->priv; |
268 | unsigned long flags, size; | 268 | unsigned long flags, size; |
269 | unsigned int index = virtqueue_get_queue_index(vq); | 269 | unsigned int index = vq->index; |
270 | 270 | ||
271 | spin_lock_irqsave(&vm_dev->lock, flags); | 271 | spin_lock_irqsave(&vm_dev->lock, flags); |
272 | list_del(&info->node); | 272 | list_del(&info->node); |
@@ -521,25 +521,33 @@ static int vm_cmdline_set(const char *device, | |||
521 | int err; | 521 | int err; |
522 | struct resource resources[2] = {}; | 522 | struct resource resources[2] = {}; |
523 | char *str; | 523 | char *str; |
524 | long long int base; | 524 | long long int base, size; |
525 | unsigned int irq; | ||
525 | int processed, consumed = 0; | 526 | int processed, consumed = 0; |
526 | struct platform_device *pdev; | 527 | struct platform_device *pdev; |
527 | 528 | ||
528 | resources[0].flags = IORESOURCE_MEM; | 529 | /* Consume "size" part of the command line parameter */ |
529 | resources[1].flags = IORESOURCE_IRQ; | 530 | size = memparse(device, &str); |
530 | |||
531 | resources[0].end = memparse(device, &str) - 1; | ||
532 | 531 | ||
532 | /* Get "@<base>:<irq>[:<id>]" chunks */ | ||
533 | processed = sscanf(str, "@%lli:%u%n:%d%n", | 533 | processed = sscanf(str, "@%lli:%u%n:%d%n", |
534 | &base, &resources[1].start, &consumed, | 534 | &base, &irq, &consumed, |
535 | &vm_cmdline_id, &consumed); | 535 | &vm_cmdline_id, &consumed); |
536 | 536 | ||
537 | if (processed < 2 || processed > 3 || str[consumed]) | 537 | /* |
538 | * sscanf() must processes at least 2 chunks; also there | ||
539 | * must be no extra characters after the last chunk, so | ||
540 | * str[consumed] must be '\0' | ||
541 | */ | ||
542 | if (processed < 2 || str[consumed]) | ||
538 | return -EINVAL; | 543 | return -EINVAL; |
539 | 544 | ||
545 | resources[0].flags = IORESOURCE_MEM; | ||
540 | resources[0].start = base; | 546 | resources[0].start = base; |
541 | resources[0].end += base; | 547 | resources[0].end = base + size - 1; |
542 | resources[1].end = resources[1].start; | 548 | |
549 | resources[1].flags = IORESOURCE_IRQ; | ||
550 | resources[1].start = resources[1].end = irq; | ||
543 | 551 | ||
544 | if (!vm_cmdline_parent_registered) { | 552 | if (!vm_cmdline_parent_registered) { |
545 | err = device_register(&vm_cmdline_parent); | 553 | err = device_register(&vm_cmdline_parent); |
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index c33aea36598a..e3ecc94591ad 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
@@ -203,8 +203,7 @@ static void vp_notify(struct virtqueue *vq) | |||
203 | 203 | ||
204 | /* we write the queue's selector into the notification register to | 204 | /* we write the queue's selector into the notification register to |
205 | * signal the other end */ | 205 | * signal the other end */ |
206 | iowrite16(virtqueue_get_queue_index(vq), | 206 | iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); |
207 | vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); | ||
208 | } | 207 | } |
209 | 208 | ||
210 | /* Handle a configuration change: Tell driver if it wants to know. */ | 209 | /* Handle a configuration change: Tell driver if it wants to know. */ |
@@ -479,8 +478,7 @@ static void vp_del_vq(struct virtqueue *vq) | |||
479 | list_del(&info->node); | 478 | list_del(&info->node); |
480 | spin_unlock_irqrestore(&vp_dev->lock, flags); | 479 | spin_unlock_irqrestore(&vp_dev->lock, flags); |
481 | 480 | ||
482 | iowrite16(virtqueue_get_queue_index(vq), | 481 | iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); |
483 | vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | ||
484 | 482 | ||
485 | if (vp_dev->msix_enabled) { | 483 | if (vp_dev->msix_enabled) { |
486 | iowrite16(VIRTIO_MSI_NO_VECTOR, | 484 | iowrite16(VIRTIO_MSI_NO_VECTOR, |
@@ -830,16 +828,4 @@ static struct pci_driver virtio_pci_driver = { | |||
830 | #endif | 828 | #endif |
831 | }; | 829 | }; |
832 | 830 | ||
833 | static int __init virtio_pci_init(void) | 831 | module_pci_driver(virtio_pci_driver); |
834 | { | ||
835 | return pci_register_driver(&virtio_pci_driver); | ||
836 | } | ||
837 | |||
838 | module_init(virtio_pci_init); | ||
839 | |||
840 | static void __exit virtio_pci_exit(void) | ||
841 | { | ||
842 | pci_unregister_driver(&virtio_pci_driver); | ||
843 | } | ||
844 | |||
845 | module_exit(virtio_pci_exit); | ||
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index e639584b2dbd..ffd7e7da5d3b 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -93,8 +93,6 @@ struct vring_virtqueue | |||
93 | /* Host publishes avail event idx */ | 93 | /* Host publishes avail event idx */ |
94 | bool event; | 94 | bool event; |
95 | 95 | ||
96 | /* Number of free buffers */ | ||
97 | unsigned int num_free; | ||
98 | /* Head of free buffer list. */ | 96 | /* Head of free buffer list. */ |
99 | unsigned int free_head; | 97 | unsigned int free_head; |
100 | /* Number we've added since last sync. */ | 98 | /* Number we've added since last sync. */ |
@@ -106,9 +104,6 @@ struct vring_virtqueue | |||
106 | /* How to notify other side. FIXME: commonalize hcalls! */ | 104 | /* How to notify other side. FIXME: commonalize hcalls! */ |
107 | void (*notify)(struct virtqueue *vq); | 105 | void (*notify)(struct virtqueue *vq); |
108 | 106 | ||
109 | /* Index of the queue */ | ||
110 | int queue_index; | ||
111 | |||
112 | #ifdef DEBUG | 107 | #ifdef DEBUG |
113 | /* They're supposed to lock for us. */ | 108 | /* They're supposed to lock for us. */ |
114 | unsigned int in_use; | 109 | unsigned int in_use; |
@@ -135,6 +130,13 @@ static int vring_add_indirect(struct vring_virtqueue *vq, | |||
135 | unsigned head; | 130 | unsigned head; |
136 | int i; | 131 | int i; |
137 | 132 | ||
133 | /* | ||
134 | * We require lowmem mappings for the descriptors because | ||
135 | * otherwise virt_to_phys will give us bogus addresses in the | ||
136 | * virtqueue. | ||
137 | */ | ||
138 | gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); | ||
139 | |||
138 | desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); | 140 | desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); |
139 | if (!desc) | 141 | if (!desc) |
140 | return -ENOMEM; | 142 | return -ENOMEM; |
@@ -160,7 +162,7 @@ static int vring_add_indirect(struct vring_virtqueue *vq, | |||
160 | desc[i-1].next = 0; | 162 | desc[i-1].next = 0; |
161 | 163 | ||
162 | /* We're about to use a buffer */ | 164 | /* We're about to use a buffer */ |
163 | vq->num_free--; | 165 | vq->vq.num_free--; |
164 | 166 | ||
165 | /* Use a single buffer which doesn't continue */ | 167 | /* Use a single buffer which doesn't continue */ |
166 | head = vq->free_head; | 168 | head = vq->free_head; |
@@ -174,13 +176,6 @@ static int vring_add_indirect(struct vring_virtqueue *vq, | |||
174 | return head; | 176 | return head; |
175 | } | 177 | } |
176 | 178 | ||
177 | int virtqueue_get_queue_index(struct virtqueue *_vq) | ||
178 | { | ||
179 | struct vring_virtqueue *vq = to_vvq(_vq); | ||
180 | return vq->queue_index; | ||
181 | } | ||
182 | EXPORT_SYMBOL_GPL(virtqueue_get_queue_index); | ||
183 | |||
184 | /** | 179 | /** |
185 | * virtqueue_add_buf - expose buffer to other end | 180 | * virtqueue_add_buf - expose buffer to other end |
186 | * @vq: the struct virtqueue we're talking about. | 181 | * @vq: the struct virtqueue we're talking about. |
@@ -193,10 +188,7 @@ EXPORT_SYMBOL_GPL(virtqueue_get_queue_index); | |||
193 | * Caller must ensure we don't call this with other virtqueue operations | 188 | * Caller must ensure we don't call this with other virtqueue operations |
194 | * at the same time (except where noted). | 189 | * at the same time (except where noted). |
195 | * | 190 | * |
196 | * Returns remaining capacity of queue or a negative error | 191 | * Returns zero or a negative error (ie. ENOSPC, ENOMEM). |
197 | * (ie. ENOSPC). Note that it only really makes sense to treat all | ||
198 | * positive return values as "available": indirect buffers mean that | ||
199 | * we can put an entire sg[] array inside a single queue entry. | ||
200 | */ | 192 | */ |
201 | int virtqueue_add_buf(struct virtqueue *_vq, | 193 | int virtqueue_add_buf(struct virtqueue *_vq, |
202 | struct scatterlist sg[], | 194 | struct scatterlist sg[], |
@@ -228,7 +220,7 @@ int virtqueue_add_buf(struct virtqueue *_vq, | |||
228 | 220 | ||
229 | /* If the host supports indirect descriptor tables, and we have multiple | 221 | /* If the host supports indirect descriptor tables, and we have multiple |
230 | * buffers, then go indirect. FIXME: tune this threshold */ | 222 | * buffers, then go indirect. FIXME: tune this threshold */ |
231 | if (vq->indirect && (out + in) > 1 && vq->num_free) { | 223 | if (vq->indirect && (out + in) > 1 && vq->vq.num_free) { |
232 | head = vring_add_indirect(vq, sg, out, in, gfp); | 224 | head = vring_add_indirect(vq, sg, out, in, gfp); |
233 | if (likely(head >= 0)) | 225 | if (likely(head >= 0)) |
234 | goto add_head; | 226 | goto add_head; |
@@ -237,9 +229,9 @@ int virtqueue_add_buf(struct virtqueue *_vq, | |||
237 | BUG_ON(out + in > vq->vring.num); | 229 | BUG_ON(out + in > vq->vring.num); |
238 | BUG_ON(out + in == 0); | 230 | BUG_ON(out + in == 0); |
239 | 231 | ||
240 | if (vq->num_free < out + in) { | 232 | if (vq->vq.num_free < out + in) { |
241 | pr_debug("Can't add buf len %i - avail = %i\n", | 233 | pr_debug("Can't add buf len %i - avail = %i\n", |
242 | out + in, vq->num_free); | 234 | out + in, vq->vq.num_free); |
243 | /* FIXME: for historical reasons, we force a notify here if | 235 | /* FIXME: for historical reasons, we force a notify here if |
244 | * there are outgoing parts to the buffer. Presumably the | 236 | * there are outgoing parts to the buffer. Presumably the |
245 | * host should service the ring ASAP. */ | 237 | * host should service the ring ASAP. */ |
@@ -250,7 +242,7 @@ int virtqueue_add_buf(struct virtqueue *_vq, | |||
250 | } | 242 | } |
251 | 243 | ||
252 | /* We're about to use some buffers from the free list. */ | 244 | /* We're about to use some buffers from the free list. */ |
253 | vq->num_free -= out + in; | 245 | vq->vq.num_free -= out + in; |
254 | 246 | ||
255 | head = vq->free_head; | 247 | head = vq->free_head; |
256 | for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { | 248 | for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { |
@@ -296,7 +288,7 @@ add_head: | |||
296 | pr_debug("Added buffer head %i to %p\n", head, vq); | 288 | pr_debug("Added buffer head %i to %p\n", head, vq); |
297 | END_USE(vq); | 289 | END_USE(vq); |
298 | 290 | ||
299 | return vq->num_free; | 291 | return 0; |
300 | } | 292 | } |
301 | EXPORT_SYMBOL_GPL(virtqueue_add_buf); | 293 | EXPORT_SYMBOL_GPL(virtqueue_add_buf); |
302 | 294 | ||
@@ -393,13 +385,13 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) | |||
393 | 385 | ||
394 | while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { | 386 | while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { |
395 | i = vq->vring.desc[i].next; | 387 | i = vq->vring.desc[i].next; |
396 | vq->num_free++; | 388 | vq->vq.num_free++; |
397 | } | 389 | } |
398 | 390 | ||
399 | vq->vring.desc[i].next = vq->free_head; | 391 | vq->vring.desc[i].next = vq->free_head; |
400 | vq->free_head = head; | 392 | vq->free_head = head; |
401 | /* Plus final descriptor */ | 393 | /* Plus final descriptor */ |
402 | vq->num_free++; | 394 | vq->vq.num_free++; |
403 | } | 395 | } |
404 | 396 | ||
405 | static inline bool more_used(const struct vring_virtqueue *vq) | 397 | static inline bool more_used(const struct vring_virtqueue *vq) |
@@ -599,7 +591,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) | |||
599 | return buf; | 591 | return buf; |
600 | } | 592 | } |
601 | /* That should have freed everything. */ | 593 | /* That should have freed everything. */ |
602 | BUG_ON(vq->num_free != vq->vring.num); | 594 | BUG_ON(vq->vq.num_free != vq->vring.num); |
603 | 595 | ||
604 | END_USE(vq); | 596 | END_USE(vq); |
605 | return NULL; | 597 | return NULL; |
@@ -653,12 +645,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, | |||
653 | vq->vq.callback = callback; | 645 | vq->vq.callback = callback; |
654 | vq->vq.vdev = vdev; | 646 | vq->vq.vdev = vdev; |
655 | vq->vq.name = name; | 647 | vq->vq.name = name; |
648 | vq->vq.num_free = num; | ||
649 | vq->vq.index = index; | ||
656 | vq->notify = notify; | 650 | vq->notify = notify; |
657 | vq->weak_barriers = weak_barriers; | 651 | vq->weak_barriers = weak_barriers; |
658 | vq->broken = false; | 652 | vq->broken = false; |
659 | vq->last_used_idx = 0; | 653 | vq->last_used_idx = 0; |
660 | vq->num_added = 0; | 654 | vq->num_added = 0; |
661 | vq->queue_index = index; | ||
662 | list_add_tail(&vq->vq.list, &vdev->vqs); | 655 | list_add_tail(&vq->vq.list, &vdev->vqs); |
663 | #ifdef DEBUG | 656 | #ifdef DEBUG |
664 | vq->in_use = false; | 657 | vq->in_use = false; |
@@ -673,7 +666,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, | |||
673 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; | 666 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; |
674 | 667 | ||
675 | /* Put everything in free lists. */ | 668 | /* Put everything in free lists. */ |
676 | vq->num_free = num; | ||
677 | vq->free_head = 0; | 669 | vq->free_head = 0; |
678 | for (i = 0; i < num-1; i++) { | 670 | for (i = 0; i < num-1; i++) { |
679 | vq->vring.desc[i].next = i+1; | 671 | vq->vring.desc[i].next = i+1; |