diff options
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/block/virtio_blk.c | 10 | ||||
| -rw-r--r-- | drivers/char/hw_random/virtio-rng.c | 30 | ||||
| -rw-r--r-- | drivers/char/virtio_console.c | 26 | ||||
| -rw-r--r-- | drivers/lguest/lguest_device.c | 41 | ||||
| -rw-r--r-- | drivers/net/virtio_net.c | 45 | ||||
| -rw-r--r-- | drivers/s390/kvm/kvm_virtio.c | 43 | ||||
| -rw-r--r-- | drivers/virtio/virtio.c | 29 | ||||
| -rw-r--r-- | drivers/virtio/virtio_balloon.c | 27 | ||||
| -rw-r--r-- | drivers/virtio/virtio_pci.c | 307 | ||||
| -rw-r--r-- | drivers/virtio/virtio_ring.c | 102 |
10 files changed, 524 insertions, 136 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index c0facaa55cf4..43db3ea15b54 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
| @@ -254,7 +254,7 @@ static int index_to_minor(int index) | |||
| 254 | return index << PART_BITS; | 254 | return index << PART_BITS; |
| 255 | } | 255 | } |
| 256 | 256 | ||
| 257 | static int virtblk_probe(struct virtio_device *vdev) | 257 | static int __devinit virtblk_probe(struct virtio_device *vdev) |
| 258 | { | 258 | { |
| 259 | struct virtio_blk *vblk; | 259 | struct virtio_blk *vblk; |
| 260 | int err; | 260 | int err; |
| @@ -288,7 +288,7 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
| 288 | sg_init_table(vblk->sg, vblk->sg_elems); | 288 | sg_init_table(vblk->sg, vblk->sg_elems); |
| 289 | 289 | ||
| 290 | /* We expect one virtqueue, for output. */ | 290 | /* We expect one virtqueue, for output. */ |
| 291 | vblk->vq = vdev->config->find_vq(vdev, 0, blk_done); | 291 | vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests"); |
| 292 | if (IS_ERR(vblk->vq)) { | 292 | if (IS_ERR(vblk->vq)) { |
| 293 | err = PTR_ERR(vblk->vq); | 293 | err = PTR_ERR(vblk->vq); |
| 294 | goto out_free_vblk; | 294 | goto out_free_vblk; |
| @@ -388,14 +388,14 @@ out_put_disk: | |||
| 388 | out_mempool: | 388 | out_mempool: |
| 389 | mempool_destroy(vblk->pool); | 389 | mempool_destroy(vblk->pool); |
| 390 | out_free_vq: | 390 | out_free_vq: |
| 391 | vdev->config->del_vq(vblk->vq); | 391 | vdev->config->del_vqs(vdev); |
| 392 | out_free_vblk: | 392 | out_free_vblk: |
| 393 | kfree(vblk); | 393 | kfree(vblk); |
| 394 | out: | 394 | out: |
| 395 | return err; | 395 | return err; |
| 396 | } | 396 | } |
| 397 | 397 | ||
| 398 | static void virtblk_remove(struct virtio_device *vdev) | 398 | static void __devexit virtblk_remove(struct virtio_device *vdev) |
| 399 | { | 399 | { |
| 400 | struct virtio_blk *vblk = vdev->priv; | 400 | struct virtio_blk *vblk = vdev->priv; |
| 401 | 401 | ||
| @@ -409,7 +409,7 @@ static void virtblk_remove(struct virtio_device *vdev) | |||
| 409 | blk_cleanup_queue(vblk->disk->queue); | 409 | blk_cleanup_queue(vblk->disk->queue); |
| 410 | put_disk(vblk->disk); | 410 | put_disk(vblk->disk); |
| 411 | mempool_destroy(vblk->pool); | 411 | mempool_destroy(vblk->pool); |
| 412 | vdev->config->del_vq(vblk->vq); | 412 | vdev->config->del_vqs(vdev); |
| 413 | kfree(vblk); | 413 | kfree(vblk); |
| 414 | } | 414 | } |
| 415 | 415 | ||
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 86e83f883139..32216b623248 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c | |||
| @@ -35,13 +35,13 @@ static DECLARE_COMPLETION(have_data); | |||
| 35 | 35 | ||
| 36 | static void random_recv_done(struct virtqueue *vq) | 36 | static void random_recv_done(struct virtqueue *vq) |
| 37 | { | 37 | { |
| 38 | int len; | 38 | unsigned int len; |
| 39 | 39 | ||
| 40 | /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ | 40 | /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ |
| 41 | if (!vq->vq_ops->get_buf(vq, &len)) | 41 | if (!vq->vq_ops->get_buf(vq, &len)) |
| 42 | return; | 42 | return; |
| 43 | 43 | ||
| 44 | data_left = len / sizeof(random_data[0]); | 44 | data_left += len; |
| 45 | complete(&have_data); | 45 | complete(&have_data); |
| 46 | } | 46 | } |
| 47 | 47 | ||
| @@ -49,7 +49,7 @@ static void register_buffer(void) | |||
| 49 | { | 49 | { |
| 50 | struct scatterlist sg; | 50 | struct scatterlist sg; |
| 51 | 51 | ||
| 52 | sg_init_one(&sg, random_data, RANDOM_DATA_SIZE); | 52 | sg_init_one(&sg, random_data+data_left, RANDOM_DATA_SIZE-data_left); |
| 53 | /* There should always be room for one buffer. */ | 53 | /* There should always be room for one buffer. */ |
| 54 | if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) != 0) | 54 | if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) != 0) |
| 55 | BUG(); | 55 | BUG(); |
| @@ -59,24 +59,32 @@ static void register_buffer(void) | |||
| 59 | /* At least we don't udelay() in a loop like some other drivers. */ | 59 | /* At least we don't udelay() in a loop like some other drivers. */ |
| 60 | static int virtio_data_present(struct hwrng *rng, int wait) | 60 | static int virtio_data_present(struct hwrng *rng, int wait) |
| 61 | { | 61 | { |
| 62 | if (data_left) | 62 | if (data_left >= sizeof(u32)) |
| 63 | return 1; | 63 | return 1; |
| 64 | 64 | ||
| 65 | again: | ||
| 65 | if (!wait) | 66 | if (!wait) |
| 66 | return 0; | 67 | return 0; |
| 67 | 68 | ||
| 68 | wait_for_completion(&have_data); | 69 | wait_for_completion(&have_data); |
| 70 | |||
| 71 | /* Not enough? Re-register. */ | ||
| 72 | if (unlikely(data_left < sizeof(u32))) { | ||
| 73 | register_buffer(); | ||
| 74 | goto again; | ||
| 75 | } | ||
| 76 | |||
| 69 | return 1; | 77 | return 1; |
| 70 | } | 78 | } |
| 71 | 79 | ||
| 72 | /* virtio_data_present() must have succeeded before this is called. */ | 80 | /* virtio_data_present() must have succeeded before this is called. */ |
| 73 | static int virtio_data_read(struct hwrng *rng, u32 *data) | 81 | static int virtio_data_read(struct hwrng *rng, u32 *data) |
| 74 | { | 82 | { |
| 75 | BUG_ON(!data_left); | 83 | BUG_ON(data_left < sizeof(u32)); |
| 76 | 84 | data_left -= sizeof(u32); | |
| 77 | *data = random_data[--data_left]; | 85 | *data = random_data[data_left / 4]; |
| 78 | 86 | ||
| 79 | if (!data_left) { | 87 | if (data_left < sizeof(u32)) { |
| 80 | init_completion(&have_data); | 88 | init_completion(&have_data); |
| 81 | register_buffer(); | 89 | register_buffer(); |
| 82 | } | 90 | } |
| @@ -94,13 +102,13 @@ static int virtrng_probe(struct virtio_device *vdev) | |||
| 94 | int err; | 102 | int err; |
| 95 | 103 | ||
| 96 | /* We expect a single virtqueue. */ | 104 | /* We expect a single virtqueue. */ |
| 97 | vq = vdev->config->find_vq(vdev, 0, random_recv_done); | 105 | vq = virtio_find_single_vq(vdev, random_recv_done, "input"); |
| 98 | if (IS_ERR(vq)) | 106 | if (IS_ERR(vq)) |
| 99 | return PTR_ERR(vq); | 107 | return PTR_ERR(vq); |
| 100 | 108 | ||
| 101 | err = hwrng_register(&virtio_hwrng); | 109 | err = hwrng_register(&virtio_hwrng); |
| 102 | if (err) { | 110 | if (err) { |
| 103 | vdev->config->del_vq(vq); | 111 | vdev->config->del_vqs(vdev); |
| 104 | return err; | 112 | return err; |
| 105 | } | 113 | } |
| 106 | 114 | ||
| @@ -112,7 +120,7 @@ static void virtrng_remove(struct virtio_device *vdev) | |||
| 112 | { | 120 | { |
| 113 | vdev->config->reset(vdev); | 121 | vdev->config->reset(vdev); |
| 114 | hwrng_unregister(&virtio_hwrng); | 122 | hwrng_unregister(&virtio_hwrng); |
| 115 | vdev->config->del_vq(vq); | 123 | vdev->config->del_vqs(vdev); |
| 116 | } | 124 | } |
| 117 | 125 | ||
| 118 | static struct virtio_device_id id_table[] = { | 126 | static struct virtio_device_id id_table[] = { |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index ff6f5a4b58fb..c74dacfa6795 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
| @@ -188,6 +188,9 @@ static void hvc_handle_input(struct virtqueue *vq) | |||
| 188 | * Finally we put our input buffer in the input queue, ready to receive. */ | 188 | * Finally we put our input buffer in the input queue, ready to receive. */ |
| 189 | static int __devinit virtcons_probe(struct virtio_device *dev) | 189 | static int __devinit virtcons_probe(struct virtio_device *dev) |
| 190 | { | 190 | { |
| 191 | vq_callback_t *callbacks[] = { hvc_handle_input, NULL}; | ||
| 192 | const char *names[] = { "input", "output" }; | ||
| 193 | struct virtqueue *vqs[2]; | ||
| 191 | int err; | 194 | int err; |
| 192 | 195 | ||
| 193 | vdev = dev; | 196 | vdev = dev; |
| @@ -199,20 +202,15 @@ static int __devinit virtcons_probe(struct virtio_device *dev) | |||
| 199 | goto fail; | 202 | goto fail; |
| 200 | } | 203 | } |
| 201 | 204 | ||
| 202 | /* Find the input queue. */ | 205 | /* Find the queues. */ |
| 203 | /* FIXME: This is why we want to wean off hvc: we do nothing | 206 | /* FIXME: This is why we want to wean off hvc: we do nothing |
| 204 | * when input comes in. */ | 207 | * when input comes in. */ |
| 205 | in_vq = vdev->config->find_vq(vdev, 0, hvc_handle_input); | 208 | err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); |
| 206 | if (IS_ERR(in_vq)) { | 209 | if (err) |
| 207 | err = PTR_ERR(in_vq); | ||
| 208 | goto free; | 210 | goto free; |
| 209 | } | ||
| 210 | 211 | ||
| 211 | out_vq = vdev->config->find_vq(vdev, 1, NULL); | 212 | in_vq = vqs[0]; |
| 212 | if (IS_ERR(out_vq)) { | 213 | out_vq = vqs[1]; |
| 213 | err = PTR_ERR(out_vq); | ||
| 214 | goto free_in_vq; | ||
| 215 | } | ||
| 216 | 214 | ||
| 217 | /* Start using the new console output. */ | 215 | /* Start using the new console output. */ |
| 218 | virtio_cons.get_chars = get_chars; | 216 | virtio_cons.get_chars = get_chars; |
| @@ -233,17 +231,15 @@ static int __devinit virtcons_probe(struct virtio_device *dev) | |||
| 233 | hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE); | 231 | hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE); |
| 234 | if (IS_ERR(hvc)) { | 232 | if (IS_ERR(hvc)) { |
| 235 | err = PTR_ERR(hvc); | 233 | err = PTR_ERR(hvc); |
| 236 | goto free_out_vq; | 234 | goto free_vqs; |
| 237 | } | 235 | } |
| 238 | 236 | ||
| 239 | /* Register the input buffer the first time. */ | 237 | /* Register the input buffer the first time. */ |
| 240 | add_inbuf(); | 238 | add_inbuf(); |
| 241 | return 0; | 239 | return 0; |
| 242 | 240 | ||
| 243 | free_out_vq: | 241 | free_vqs: |
| 244 | vdev->config->del_vq(out_vq); | 242 | vdev->config->del_vqs(vdev); |
| 245 | free_in_vq: | ||
| 246 | vdev->config->del_vq(in_vq); | ||
| 247 | free: | 243 | free: |
| 248 | kfree(inbuf); | 244 | kfree(inbuf); |
| 249 | fail: | 245 | fail: |
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index df44d962626d..e082cdac88b4 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c | |||
| @@ -228,7 +228,8 @@ extern void lguest_setup_irq(unsigned int irq); | |||
| 228 | * function. */ | 228 | * function. */ |
| 229 | static struct virtqueue *lg_find_vq(struct virtio_device *vdev, | 229 | static struct virtqueue *lg_find_vq(struct virtio_device *vdev, |
| 230 | unsigned index, | 230 | unsigned index, |
| 231 | void (*callback)(struct virtqueue *vq)) | 231 | void (*callback)(struct virtqueue *vq), |
| 232 | const char *name) | ||
| 232 | { | 233 | { |
| 233 | struct lguest_device *ldev = to_lgdev(vdev); | 234 | struct lguest_device *ldev = to_lgdev(vdev); |
| 234 | struct lguest_vq_info *lvq; | 235 | struct lguest_vq_info *lvq; |
| @@ -263,7 +264,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, | |||
| 263 | /* OK, tell virtio_ring.c to set up a virtqueue now we know its size | 264 | /* OK, tell virtio_ring.c to set up a virtqueue now we know its size |
| 264 | * and we've got a pointer to its pages. */ | 265 | * and we've got a pointer to its pages. */ |
| 265 | vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, | 266 | vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, |
| 266 | vdev, lvq->pages, lg_notify, callback); | 267 | vdev, lvq->pages, lg_notify, callback, name); |
| 267 | if (!vq) { | 268 | if (!vq) { |
| 268 | err = -ENOMEM; | 269 | err = -ENOMEM; |
| 269 | goto unmap; | 270 | goto unmap; |
| @@ -312,6 +313,38 @@ static void lg_del_vq(struct virtqueue *vq) | |||
| 312 | kfree(lvq); | 313 | kfree(lvq); |
| 313 | } | 314 | } |
| 314 | 315 | ||
| 316 | static void lg_del_vqs(struct virtio_device *vdev) | ||
| 317 | { | ||
| 318 | struct virtqueue *vq, *n; | ||
| 319 | |||
| 320 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) | ||
| 321 | lg_del_vq(vq); | ||
| 322 | } | ||
| 323 | |||
| 324 | static int lg_find_vqs(struct virtio_device *vdev, unsigned nvqs, | ||
| 325 | struct virtqueue *vqs[], | ||
| 326 | vq_callback_t *callbacks[], | ||
| 327 | const char *names[]) | ||
| 328 | { | ||
| 329 | struct lguest_device *ldev = to_lgdev(vdev); | ||
| 330 | int i; | ||
| 331 | |||
| 332 | /* We must have this many virtqueues. */ | ||
| 333 | if (nvqs > ldev->desc->num_vq) | ||
| 334 | return -ENOENT; | ||
| 335 | |||
| 336 | for (i = 0; i < nvqs; ++i) { | ||
| 337 | vqs[i] = lg_find_vq(vdev, i, callbacks[i], names[i]); | ||
| 338 | if (IS_ERR(vqs[i])) | ||
| 339 | goto error; | ||
| 340 | } | ||
| 341 | return 0; | ||
| 342 | |||
| 343 | error: | ||
| 344 | lg_del_vqs(vdev); | ||
| 345 | return PTR_ERR(vqs[i]); | ||
| 346 | } | ||
| 347 | |||
| 315 | /* The ops structure which hooks everything together. */ | 348 | /* The ops structure which hooks everything together. */ |
| 316 | static struct virtio_config_ops lguest_config_ops = { | 349 | static struct virtio_config_ops lguest_config_ops = { |
| 317 | .get_features = lg_get_features, | 350 | .get_features = lg_get_features, |
| @@ -321,8 +354,8 @@ static struct virtio_config_ops lguest_config_ops = { | |||
| 321 | .get_status = lg_get_status, | 354 | .get_status = lg_get_status, |
| 322 | .set_status = lg_set_status, | 355 | .set_status = lg_set_status, |
| 323 | .reset = lg_reset, | 356 | .reset = lg_reset, |
| 324 | .find_vq = lg_find_vq, | 357 | .find_vqs = lg_find_vqs, |
| 325 | .del_vq = lg_del_vq, | 358 | .del_vqs = lg_del_vqs, |
| 326 | }; | 359 | }; |
| 327 | 360 | ||
| 328 | /* The root device for the lguest virtio devices. This makes them appear as | 361 | /* The root device for the lguest virtio devices. This makes them appear as |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4d1d47953fc6..7fa620ddeb21 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -845,6 +845,10 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
| 845 | int err; | 845 | int err; |
| 846 | struct net_device *dev; | 846 | struct net_device *dev; |
| 847 | struct virtnet_info *vi; | 847 | struct virtnet_info *vi; |
| 848 | struct virtqueue *vqs[3]; | ||
| 849 | vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL}; | ||
| 850 | const char *names[] = { "input", "output", "control" }; | ||
| 851 | int nvqs; | ||
| 848 | 852 | ||
| 849 | /* Allocate ourselves a network device with room for our info */ | 853 | /* Allocate ourselves a network device with room for our info */ |
| 850 | dev = alloc_etherdev(sizeof(struct virtnet_info)); | 854 | dev = alloc_etherdev(sizeof(struct virtnet_info)); |
| @@ -905,25 +909,19 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
| 905 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) | 909 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
| 906 | vi->mergeable_rx_bufs = true; | 910 | vi->mergeable_rx_bufs = true; |
| 907 | 911 | ||
| 908 | /* We expect two virtqueues, receive then send. */ | 912 | /* We expect two virtqueues, receive then send, |
| 909 | vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); | 913 | * and optionally control. */ |
| 910 | if (IS_ERR(vi->rvq)) { | 914 | nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2; |
| 911 | err = PTR_ERR(vi->rvq); | 915 | |
| 916 | err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names); | ||
| 917 | if (err) | ||
| 912 | goto free; | 918 | goto free; |
| 913 | } | ||
| 914 | 919 | ||
| 915 | vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done); | 920 | vi->rvq = vqs[0]; |
| 916 | if (IS_ERR(vi->svq)) { | 921 | vi->svq = vqs[1]; |
| 917 | err = PTR_ERR(vi->svq); | ||
| 918 | goto free_recv; | ||
| 919 | } | ||
| 920 | 922 | ||
| 921 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { | 923 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { |
| 922 | vi->cvq = vdev->config->find_vq(vdev, 2, NULL); | 924 | vi->cvq = vqs[2]; |
| 923 | if (IS_ERR(vi->cvq)) { | ||
| 924 | err = PTR_ERR(vi->svq); | ||
| 925 | goto free_send; | ||
| 926 | } | ||
| 927 | 925 | ||
| 928 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) | 926 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
| 929 | dev->features |= NETIF_F_HW_VLAN_FILTER; | 927 | dev->features |= NETIF_F_HW_VLAN_FILTER; |
| @@ -941,7 +939,7 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
| 941 | err = register_netdev(dev); | 939 | err = register_netdev(dev); |
| 942 | if (err) { | 940 | if (err) { |
| 943 | pr_debug("virtio_net: registering device failed\n"); | 941 | pr_debug("virtio_net: registering device failed\n"); |
| 944 | goto free_ctrl; | 942 | goto free_vqs; |
| 945 | } | 943 | } |
| 946 | 944 | ||
| 947 | /* Last of all, set up some receive buffers. */ | 945 | /* Last of all, set up some receive buffers. */ |
| @@ -962,13 +960,8 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
| 962 | 960 | ||
| 963 | unregister: | 961 | unregister: |
| 964 | unregister_netdev(dev); | 962 | unregister_netdev(dev); |
| 965 | free_ctrl: | 963 | free_vqs: |
| 966 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) | 964 | vdev->config->del_vqs(vdev); |
| 967 | vdev->config->del_vq(vi->cvq); | ||
| 968 | free_send: | ||
| 969 | vdev->config->del_vq(vi->svq); | ||
| 970 | free_recv: | ||
| 971 | vdev->config->del_vq(vi->rvq); | ||
| 972 | free: | 965 | free: |
| 973 | free_netdev(dev); | 966 | free_netdev(dev); |
| 974 | return err; | 967 | return err; |
| @@ -994,12 +987,10 @@ static void virtnet_remove(struct virtio_device *vdev) | |||
| 994 | 987 | ||
| 995 | BUG_ON(vi->num != 0); | 988 | BUG_ON(vi->num != 0); |
| 996 | 989 | ||
| 997 | vdev->config->del_vq(vi->svq); | ||
| 998 | vdev->config->del_vq(vi->rvq); | ||
| 999 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) | ||
| 1000 | vdev->config->del_vq(vi->cvq); | ||
| 1001 | unregister_netdev(vi->dev); | 990 | unregister_netdev(vi->dev); |
| 1002 | 991 | ||
| 992 | vdev->config->del_vqs(vi->vdev); | ||
| 993 | |||
| 1003 | while (vi->pages) | 994 | while (vi->pages) |
| 1004 | __free_pages(get_a_page(vi, GFP_KERNEL), 0); | 995 | __free_pages(get_a_page(vi, GFP_KERNEL), 0); |
| 1005 | 996 | ||
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index cbc8566fab70..e38e5d306faf 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c | |||
| @@ -173,8 +173,9 @@ static void kvm_notify(struct virtqueue *vq) | |||
| 173 | * this device and sets it up. | 173 | * this device and sets it up. |
| 174 | */ | 174 | */ |
| 175 | static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, | 175 | static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, |
| 176 | unsigned index, | 176 | unsigned index, |
| 177 | void (*callback)(struct virtqueue *vq)) | 177 | void (*callback)(struct virtqueue *vq), |
| 178 | const char *name) | ||
| 178 | { | 179 | { |
| 179 | struct kvm_device *kdev = to_kvmdev(vdev); | 180 | struct kvm_device *kdev = to_kvmdev(vdev); |
| 180 | struct kvm_vqconfig *config; | 181 | struct kvm_vqconfig *config; |
| @@ -194,7 +195,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, | |||
| 194 | 195 | ||
| 195 | vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN, | 196 | vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN, |
| 196 | vdev, (void *) config->address, | 197 | vdev, (void *) config->address, |
| 197 | kvm_notify, callback); | 198 | kvm_notify, callback, name); |
| 198 | if (!vq) { | 199 | if (!vq) { |
| 199 | err = -ENOMEM; | 200 | err = -ENOMEM; |
| 200 | goto unmap; | 201 | goto unmap; |
| @@ -226,6 +227,38 @@ static void kvm_del_vq(struct virtqueue *vq) | |||
| 226 | KVM_S390_VIRTIO_RING_ALIGN)); | 227 | KVM_S390_VIRTIO_RING_ALIGN)); |
| 227 | } | 228 | } |
| 228 | 229 | ||
| 230 | static void kvm_del_vqs(struct virtio_device *vdev) | ||
| 231 | { | ||
| 232 | struct virtqueue *vq, *n; | ||
| 233 | |||
| 234 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) | ||
| 235 | kvm_del_vq(vq); | ||
| 236 | } | ||
| 237 | |||
| 238 | static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, | ||
| 239 | struct virtqueue *vqs[], | ||
| 240 | vq_callback_t *callbacks[], | ||
| 241 | const char *names[]) | ||
| 242 | { | ||
| 243 | struct kvm_device *kdev = to_kvmdev(vdev); | ||
| 244 | int i; | ||
| 245 | |||
| 246 | /* We must have this many virtqueues. */ | ||
| 247 | if (nvqs > kdev->desc->num_vq) | ||
| 248 | return -ENOENT; | ||
| 249 | |||
| 250 | for (i = 0; i < nvqs; ++i) { | ||
| 251 | vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]); | ||
| 252 | if (IS_ERR(vqs[i])) | ||
| 253 | goto error; | ||
| 254 | } | ||
| 255 | return 0; | ||
| 256 | |||
| 257 | error: | ||
| 258 | kvm_del_vqs(vdev); | ||
| 259 | return PTR_ERR(vqs[i]); | ||
| 260 | } | ||
| 261 | |||
| 229 | /* | 262 | /* |
| 230 | * The config ops structure as defined by virtio config | 263 | * The config ops structure as defined by virtio config |
| 231 | */ | 264 | */ |
| @@ -237,8 +270,8 @@ static struct virtio_config_ops kvm_vq_configspace_ops = { | |||
| 237 | .get_status = kvm_get_status, | 270 | .get_status = kvm_get_status, |
| 238 | .set_status = kvm_set_status, | 271 | .set_status = kvm_set_status, |
| 239 | .reset = kvm_reset, | 272 | .reset = kvm_reset, |
| 240 | .find_vq = kvm_find_vq, | 273 | .find_vqs = kvm_find_vqs, |
| 241 | .del_vq = kvm_del_vq, | 274 | .del_vqs = kvm_del_vqs, |
| 242 | }; | 275 | }; |
| 243 | 276 | ||
| 244 | /* | 277 | /* |
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 018c070a357f..3a43ebf83a49 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c | |||
| @@ -31,21 +31,37 @@ static ssize_t modalias_show(struct device *_d, | |||
| 31 | return sprintf(buf, "virtio:d%08Xv%08X\n", | 31 | return sprintf(buf, "virtio:d%08Xv%08X\n", |
| 32 | dev->id.device, dev->id.vendor); | 32 | dev->id.device, dev->id.vendor); |
| 33 | } | 33 | } |
| 34 | static ssize_t features_show(struct device *_d, | ||
| 35 | struct device_attribute *attr, char *buf) | ||
| 36 | { | ||
| 37 | struct virtio_device *dev = container_of(_d, struct virtio_device, dev); | ||
| 38 | unsigned int i; | ||
| 39 | ssize_t len = 0; | ||
| 40 | |||
| 41 | /* We actually represent this as a bitstring, as it could be | ||
| 42 | * arbitrary length in future. */ | ||
| 43 | for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++) | ||
| 44 | len += sprintf(buf+len, "%c", | ||
| 45 | test_bit(i, dev->features) ? '1' : '0'); | ||
| 46 | len += sprintf(buf+len, "\n"); | ||
| 47 | return len; | ||
| 48 | } | ||
| 34 | static struct device_attribute virtio_dev_attrs[] = { | 49 | static struct device_attribute virtio_dev_attrs[] = { |
| 35 | __ATTR_RO(device), | 50 | __ATTR_RO(device), |
| 36 | __ATTR_RO(vendor), | 51 | __ATTR_RO(vendor), |
| 37 | __ATTR_RO(status), | 52 | __ATTR_RO(status), |
| 38 | __ATTR_RO(modalias), | 53 | __ATTR_RO(modalias), |
| 54 | __ATTR_RO(features), | ||
| 39 | __ATTR_NULL | 55 | __ATTR_NULL |
| 40 | }; | 56 | }; |
| 41 | 57 | ||
| 42 | static inline int virtio_id_match(const struct virtio_device *dev, | 58 | static inline int virtio_id_match(const struct virtio_device *dev, |
| 43 | const struct virtio_device_id *id) | 59 | const struct virtio_device_id *id) |
| 44 | { | 60 | { |
| 45 | if (id->device != dev->id.device) | 61 | if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID) |
| 46 | return 0; | 62 | return 0; |
| 47 | 63 | ||
| 48 | return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor != dev->id.vendor; | 64 | return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor; |
| 49 | } | 65 | } |
| 50 | 66 | ||
| 51 | /* This looks through all the IDs a driver claims to support. If any of them | 67 | /* This looks through all the IDs a driver claims to support. If any of them |
| @@ -118,13 +134,14 @@ static int virtio_dev_probe(struct device *_d) | |||
| 118 | if (device_features & (1 << i)) | 134 | if (device_features & (1 << i)) |
| 119 | set_bit(i, dev->features); | 135 | set_bit(i, dev->features); |
| 120 | 136 | ||
| 137 | dev->config->finalize_features(dev); | ||
| 138 | |||
| 121 | err = drv->probe(dev); | 139 | err = drv->probe(dev); |
| 122 | if (err) | 140 | if (err) |
| 123 | add_status(dev, VIRTIO_CONFIG_S_FAILED); | 141 | add_status(dev, VIRTIO_CONFIG_S_FAILED); |
| 124 | else { | 142 | else |
| 125 | dev->config->finalize_features(dev); | ||
| 126 | add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); | 143 | add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); |
| 127 | } | 144 | |
| 128 | return err; | 145 | return err; |
| 129 | } | 146 | } |
| 130 | 147 | ||
| @@ -185,6 +202,8 @@ int register_virtio_device(struct virtio_device *dev) | |||
| 185 | /* Acknowledge that we've seen the device. */ | 202 | /* Acknowledge that we've seen the device. */ |
| 186 | add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); | 203 | add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); |
| 187 | 204 | ||
| 205 | INIT_LIST_HEAD(&dev->vqs); | ||
| 206 | |||
| 188 | /* device_register() causes the bus infrastructure to look for a | 207 | /* device_register() causes the bus infrastructure to look for a |
| 189 | * matching driver. */ | 208 | * matching driver. */ |
| 190 | err = device_register(&dev->dev); | 209 | err = device_register(&dev->dev); |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 9c76a061a04d..26b278264796 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
| @@ -204,6 +204,9 @@ static int balloon(void *_vballoon) | |||
| 204 | static int virtballoon_probe(struct virtio_device *vdev) | 204 | static int virtballoon_probe(struct virtio_device *vdev) |
| 205 | { | 205 | { |
| 206 | struct virtio_balloon *vb; | 206 | struct virtio_balloon *vb; |
| 207 | struct virtqueue *vqs[2]; | ||
| 208 | vq_callback_t *callbacks[] = { balloon_ack, balloon_ack }; | ||
| 209 | const char *names[] = { "inflate", "deflate" }; | ||
| 207 | int err; | 210 | int err; |
| 208 | 211 | ||
| 209 | vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); | 212 | vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); |
| @@ -218,22 +221,17 @@ static int virtballoon_probe(struct virtio_device *vdev) | |||
| 218 | vb->vdev = vdev; | 221 | vb->vdev = vdev; |
| 219 | 222 | ||
| 220 | /* We expect two virtqueues. */ | 223 | /* We expect two virtqueues. */ |
| 221 | vb->inflate_vq = vdev->config->find_vq(vdev, 0, balloon_ack); | 224 | err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); |
| 222 | if (IS_ERR(vb->inflate_vq)) { | 225 | if (err) |
| 223 | err = PTR_ERR(vb->inflate_vq); | ||
| 224 | goto out_free_vb; | 226 | goto out_free_vb; |
| 225 | } | ||
| 226 | 227 | ||
| 227 | vb->deflate_vq = vdev->config->find_vq(vdev, 1, balloon_ack); | 228 | vb->inflate_vq = vqs[0]; |
| 228 | if (IS_ERR(vb->deflate_vq)) { | 229 | vb->deflate_vq = vqs[1]; |
| 229 | err = PTR_ERR(vb->deflate_vq); | ||
| 230 | goto out_del_inflate_vq; | ||
| 231 | } | ||
| 232 | 230 | ||
| 233 | vb->thread = kthread_run(balloon, vb, "vballoon"); | 231 | vb->thread = kthread_run(balloon, vb, "vballoon"); |
| 234 | if (IS_ERR(vb->thread)) { | 232 | if (IS_ERR(vb->thread)) { |
| 235 | err = PTR_ERR(vb->thread); | 233 | err = PTR_ERR(vb->thread); |
| 236 | goto out_del_deflate_vq; | 234 | goto out_del_vqs; |
| 237 | } | 235 | } |
| 238 | 236 | ||
| 239 | vb->tell_host_first | 237 | vb->tell_host_first |
| @@ -241,10 +239,8 @@ static int virtballoon_probe(struct virtio_device *vdev) | |||
| 241 | 239 | ||
| 242 | return 0; | 240 | return 0; |
| 243 | 241 | ||
| 244 | out_del_deflate_vq: | 242 | out_del_vqs: |
| 245 | vdev->config->del_vq(vb->deflate_vq); | 243 | vdev->config->del_vqs(vdev); |
| 246 | out_del_inflate_vq: | ||
| 247 | vdev->config->del_vq(vb->inflate_vq); | ||
| 248 | out_free_vb: | 244 | out_free_vb: |
| 249 | kfree(vb); | 245 | kfree(vb); |
| 250 | out: | 246 | out: |
| @@ -264,8 +260,7 @@ static void virtballoon_remove(struct virtio_device *vdev) | |||
| 264 | /* Now we reset the device so we can clean up the queues. */ | 260 | /* Now we reset the device so we can clean up the queues. */ |
| 265 | vdev->config->reset(vdev); | 261 | vdev->config->reset(vdev); |
| 266 | 262 | ||
| 267 | vdev->config->del_vq(vb->deflate_vq); | 263 | vdev->config->del_vqs(vdev); |
| 268 | vdev->config->del_vq(vb->inflate_vq); | ||
| 269 | kfree(vb); | 264 | kfree(vb); |
| 270 | } | 265 | } |
| 271 | 266 | ||
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 330aacbdec1f..193c8f0e5cc5 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
| @@ -42,6 +42,26 @@ struct virtio_pci_device | |||
| 42 | /* a list of queues so we can dispatch IRQs */ | 42 | /* a list of queues so we can dispatch IRQs */ |
| 43 | spinlock_t lock; | 43 | spinlock_t lock; |
| 44 | struct list_head virtqueues; | 44 | struct list_head virtqueues; |
| 45 | |||
| 46 | /* MSI-X support */ | ||
| 47 | int msix_enabled; | ||
| 48 | int intx_enabled; | ||
| 49 | struct msix_entry *msix_entries; | ||
| 50 | /* Name strings for interrupts. This size should be enough, | ||
| 51 | * and I'm too lazy to allocate each name separately. */ | ||
| 52 | char (*msix_names)[256]; | ||
| 53 | /* Number of available vectors */ | ||
| 54 | unsigned msix_vectors; | ||
| 55 | /* Vectors allocated */ | ||
| 56 | unsigned msix_used_vectors; | ||
| 57 | }; | ||
| 58 | |||
| 59 | /* Constants for MSI-X */ | ||
| 60 | /* Use first vector for configuration changes, second and the rest for | ||
| 61 | * virtqueues Thus, we need at least 2 vectors for MSI. */ | ||
| 62 | enum { | ||
| 63 | VP_MSIX_CONFIG_VECTOR = 0, | ||
| 64 | VP_MSIX_VQ_VECTOR = 1, | ||
| 45 | }; | 65 | }; |
| 46 | 66 | ||
| 47 | struct virtio_pci_vq_info | 67 | struct virtio_pci_vq_info |
| @@ -60,6 +80,9 @@ struct virtio_pci_vq_info | |||
| 60 | 80 | ||
| 61 | /* the list node for the virtqueues list */ | 81 | /* the list node for the virtqueues list */ |
| 62 | struct list_head node; | 82 | struct list_head node; |
| 83 | |||
| 84 | /* MSI-X vector (or none) */ | ||
| 85 | unsigned vector; | ||
| 63 | }; | 86 | }; |
| 64 | 87 | ||
| 65 | /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ | 88 | /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ |
| @@ -109,7 +132,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset, | |||
| 109 | void *buf, unsigned len) | 132 | void *buf, unsigned len) |
| 110 | { | 133 | { |
| 111 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 134 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
| 112 | void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; | 135 | void __iomem *ioaddr = vp_dev->ioaddr + |
| 136 | VIRTIO_PCI_CONFIG(vp_dev) + offset; | ||
| 113 | u8 *ptr = buf; | 137 | u8 *ptr = buf; |
| 114 | int i; | 138 | int i; |
| 115 | 139 | ||
| @@ -123,7 +147,8 @@ static void vp_set(struct virtio_device *vdev, unsigned offset, | |||
| 123 | const void *buf, unsigned len) | 147 | const void *buf, unsigned len) |
| 124 | { | 148 | { |
| 125 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 149 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
| 126 | void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; | 150 | void __iomem *ioaddr = vp_dev->ioaddr + |
| 151 | VIRTIO_PCI_CONFIG(vp_dev) + offset; | ||
| 127 | const u8 *ptr = buf; | 152 | const u8 *ptr = buf; |
| 128 | int i; | 153 | int i; |
| 129 | 154 | ||
| @@ -164,6 +189,37 @@ static void vp_notify(struct virtqueue *vq) | |||
| 164 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); | 189 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); |
| 165 | } | 190 | } |
| 166 | 191 | ||
| 192 | /* Handle a configuration change: Tell driver if it wants to know. */ | ||
| 193 | static irqreturn_t vp_config_changed(int irq, void *opaque) | ||
| 194 | { | ||
| 195 | struct virtio_pci_device *vp_dev = opaque; | ||
| 196 | struct virtio_driver *drv; | ||
| 197 | drv = container_of(vp_dev->vdev.dev.driver, | ||
| 198 | struct virtio_driver, driver); | ||
| 199 | |||
| 200 | if (drv && drv->config_changed) | ||
| 201 | drv->config_changed(&vp_dev->vdev); | ||
| 202 | return IRQ_HANDLED; | ||
| 203 | } | ||
| 204 | |||
| 205 | /* Notify all virtqueues on an interrupt. */ | ||
| 206 | static irqreturn_t vp_vring_interrupt(int irq, void *opaque) | ||
| 207 | { | ||
| 208 | struct virtio_pci_device *vp_dev = opaque; | ||
| 209 | struct virtio_pci_vq_info *info; | ||
| 210 | irqreturn_t ret = IRQ_NONE; | ||
| 211 | unsigned long flags; | ||
| 212 | |||
| 213 | spin_lock_irqsave(&vp_dev->lock, flags); | ||
| 214 | list_for_each_entry(info, &vp_dev->virtqueues, node) { | ||
| 215 | if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) | ||
| 216 | ret = IRQ_HANDLED; | ||
| 217 | } | ||
| 218 | spin_unlock_irqrestore(&vp_dev->lock, flags); | ||
| 219 | |||
| 220 | return ret; | ||
| 221 | } | ||
| 222 | |||
| 167 | /* A small wrapper to also acknowledge the interrupt when it's handled. | 223 | /* A small wrapper to also acknowledge the interrupt when it's handled. |
| 168 | * I really need an EIO hook for the vring so I can ack the interrupt once we | 224 | * I really need an EIO hook for the vring so I can ack the interrupt once we |
| 169 | * know that we'll be handling the IRQ but before we invoke the callback since | 225 | * know that we'll be handling the IRQ but before we invoke the callback since |
| @@ -173,9 +229,6 @@ static void vp_notify(struct virtqueue *vq) | |||
| 173 | static irqreturn_t vp_interrupt(int irq, void *opaque) | 229 | static irqreturn_t vp_interrupt(int irq, void *opaque) |
| 174 | { | 230 | { |
| 175 | struct virtio_pci_device *vp_dev = opaque; | 231 | struct virtio_pci_device *vp_dev = opaque; |
| 176 | struct virtio_pci_vq_info *info; | ||
| 177 | irqreturn_t ret = IRQ_NONE; | ||
| 178 | unsigned long flags; | ||
| 179 | u8 isr; | 232 | u8 isr; |
| 180 | 233 | ||
| 181 | /* reading the ISR has the effect of also clearing it so it's very | 234 | /* reading the ISR has the effect of also clearing it so it's very |
| @@ -187,34 +240,137 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) | |||
| 187 | return IRQ_NONE; | 240 | return IRQ_NONE; |
| 188 | 241 | ||
| 189 | /* Configuration change? Tell driver if it wants to know. */ | 242 | /* Configuration change? Tell driver if it wants to know. */ |
| 190 | if (isr & VIRTIO_PCI_ISR_CONFIG) { | 243 | if (isr & VIRTIO_PCI_ISR_CONFIG) |
| 191 | struct virtio_driver *drv; | 244 | vp_config_changed(irq, opaque); |
| 192 | drv = container_of(vp_dev->vdev.dev.driver, | ||
| 193 | struct virtio_driver, driver); | ||
| 194 | 245 | ||
| 195 | if (drv && drv->config_changed) | 246 | return vp_vring_interrupt(irq, opaque); |
| 196 | drv->config_changed(&vp_dev->vdev); | 247 | } |
| 248 | |||
| 249 | static void vp_free_vectors(struct virtio_device *vdev) | ||
| 250 | { | ||
| 251 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
| 252 | int i; | ||
| 253 | |||
| 254 | if (vp_dev->intx_enabled) { | ||
| 255 | free_irq(vp_dev->pci_dev->irq, vp_dev); | ||
| 256 | vp_dev->intx_enabled = 0; | ||
| 197 | } | 257 | } |
| 198 | 258 | ||
| 199 | spin_lock_irqsave(&vp_dev->lock, flags); | 259 | for (i = 0; i < vp_dev->msix_used_vectors; ++i) |
| 200 | list_for_each_entry(info, &vp_dev->virtqueues, node) { | 260 | free_irq(vp_dev->msix_entries[i].vector, vp_dev); |
| 201 | if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) | 261 | vp_dev->msix_used_vectors = 0; |
| 202 | ret = IRQ_HANDLED; | 262 | |
| 263 | if (vp_dev->msix_enabled) { | ||
| 264 | /* Disable the vector used for configuration */ | ||
| 265 | iowrite16(VIRTIO_MSI_NO_VECTOR, | ||
| 266 | vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
| 267 | /* Flush the write out to device */ | ||
| 268 | ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
| 269 | |||
| 270 | vp_dev->msix_enabled = 0; | ||
| 271 | pci_disable_msix(vp_dev->pci_dev); | ||
| 203 | } | 272 | } |
| 204 | spin_unlock_irqrestore(&vp_dev->lock, flags); | 273 | } |
| 205 | 274 | ||
| 206 | return ret; | 275 | static int vp_enable_msix(struct pci_dev *dev, struct msix_entry *entries, |
| 276 | int *options, int noptions) | ||
| 277 | { | ||
| 278 | int i; | ||
| 279 | for (i = 0; i < noptions; ++i) | ||
| 280 | if (!pci_enable_msix(dev, entries, options[i])) | ||
| 281 | return options[i]; | ||
| 282 | return -EBUSY; | ||
| 283 | } | ||
| 284 | |||
| 285 | static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs) | ||
| 286 | { | ||
| 287 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
| 288 | const char *name = dev_name(&vp_dev->vdev.dev); | ||
| 289 | unsigned i, v; | ||
| 290 | int err = -ENOMEM; | ||
| 291 | /* We want at most one vector per queue and one for config changes. | ||
| 292 | * Fallback to separate vectors for config and a shared for queues. | ||
| 293 | * Finally fall back to regular interrupts. */ | ||
| 294 | int options[] = { max_vqs + 1, 2 }; | ||
| 295 | int nvectors = max(options[0], options[1]); | ||
| 296 | |||
| 297 | vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, | ||
| 298 | GFP_KERNEL); | ||
| 299 | if (!vp_dev->msix_entries) | ||
| 300 | goto error_entries; | ||
| 301 | vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, | ||
| 302 | GFP_KERNEL); | ||
| 303 | if (!vp_dev->msix_names) | ||
| 304 | goto error_names; | ||
| 305 | |||
| 306 | for (i = 0; i < nvectors; ++i) | ||
| 307 | vp_dev->msix_entries[i].entry = i; | ||
| 308 | |||
| 309 | err = vp_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, | ||
| 310 | options, ARRAY_SIZE(options)); | ||
| 311 | if (err < 0) { | ||
| 312 | /* Can't allocate enough MSI-X vectors, use regular interrupt */ | ||
| 313 | vp_dev->msix_vectors = 0; | ||
| 314 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, | ||
| 315 | IRQF_SHARED, name, vp_dev); | ||
| 316 | if (err) | ||
| 317 | goto error_irq; | ||
| 318 | vp_dev->intx_enabled = 1; | ||
| 319 | } else { | ||
| 320 | vp_dev->msix_vectors = err; | ||
| 321 | vp_dev->msix_enabled = 1; | ||
| 322 | |||
| 323 | /* Set the vector used for configuration */ | ||
| 324 | v = vp_dev->msix_used_vectors; | ||
| 325 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, | ||
| 326 | "%s-config", name); | ||
| 327 | err = request_irq(vp_dev->msix_entries[v].vector, | ||
| 328 | vp_config_changed, 0, vp_dev->msix_names[v], | ||
| 329 | vp_dev); | ||
| 330 | if (err) | ||
| 331 | goto error_irq; | ||
| 332 | ++vp_dev->msix_used_vectors; | ||
| 333 | |||
| 334 | iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
| 335 | /* Verify we had enough resources to assign the vector */ | ||
| 336 | v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
| 337 | if (v == VIRTIO_MSI_NO_VECTOR) { | ||
| 338 | err = -EBUSY; | ||
| 339 | goto error_irq; | ||
| 340 | } | ||
| 341 | } | ||
| 342 | |||
| 343 | if (vp_dev->msix_vectors && vp_dev->msix_vectors != max_vqs + 1) { | ||
| 344 | /* Shared vector for all VQs */ | ||
| 345 | v = vp_dev->msix_used_vectors; | ||
| 346 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, | ||
| 347 | "%s-virtqueues", name); | ||
| 348 | err = request_irq(vp_dev->msix_entries[v].vector, | ||
| 349 | vp_vring_interrupt, 0, vp_dev->msix_names[v], | ||
| 350 | vp_dev); | ||
| 351 | if (err) | ||
| 352 | goto error_irq; | ||
| 353 | ++vp_dev->msix_used_vectors; | ||
| 354 | } | ||
| 355 | return 0; | ||
| 356 | error_irq: | ||
| 357 | vp_free_vectors(vdev); | ||
| 358 | kfree(vp_dev->msix_names); | ||
| 359 | error_names: | ||
| 360 | kfree(vp_dev->msix_entries); | ||
| 361 | error_entries: | ||
| 362 | return err; | ||
| 207 | } | 363 | } |
| 208 | 364 | ||
| 209 | /* the config->find_vq() implementation */ | ||
| 210 | static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | 365 | static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, |
| 211 | void (*callback)(struct virtqueue *vq)) | 366 | void (*callback)(struct virtqueue *vq), |
| 367 | const char *name) | ||
| 212 | { | 368 | { |
| 213 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 369 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
| 214 | struct virtio_pci_vq_info *info; | 370 | struct virtio_pci_vq_info *info; |
| 215 | struct virtqueue *vq; | 371 | struct virtqueue *vq; |
| 216 | unsigned long flags, size; | 372 | unsigned long flags, size; |
| 217 | u16 num; | 373 | u16 num, vector; |
| 218 | int err; | 374 | int err; |
| 219 | 375 | ||
| 220 | /* Select the queue we're interested in */ | 376 | /* Select the queue we're interested in */ |
| @@ -233,6 +389,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
| 233 | 389 | ||
| 234 | info->queue_index = index; | 390 | info->queue_index = index; |
| 235 | info->num = num; | 391 | info->num = num; |
| 392 | info->vector = VIRTIO_MSI_NO_VECTOR; | ||
| 236 | 393 | ||
| 237 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); | 394 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); |
| 238 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); | 395 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); |
| @@ -247,7 +404,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
| 247 | 404 | ||
| 248 | /* create the vring */ | 405 | /* create the vring */ |
| 249 | vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, | 406 | vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, |
| 250 | vdev, info->queue, vp_notify, callback); | 407 | vdev, info->queue, vp_notify, callback, name); |
| 251 | if (!vq) { | 408 | if (!vq) { |
| 252 | err = -ENOMEM; | 409 | err = -ENOMEM; |
| 253 | goto out_activate_queue; | 410 | goto out_activate_queue; |
| @@ -256,12 +413,43 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
| 256 | vq->priv = info; | 413 | vq->priv = info; |
| 257 | info->vq = vq; | 414 | info->vq = vq; |
| 258 | 415 | ||
| 416 | /* allocate per-vq vector if available and necessary */ | ||
| 417 | if (callback && vp_dev->msix_used_vectors < vp_dev->msix_vectors) { | ||
| 418 | vector = vp_dev->msix_used_vectors; | ||
| 419 | snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names, | ||
| 420 | "%s-%s", dev_name(&vp_dev->vdev.dev), name); | ||
| 421 | err = request_irq(vp_dev->msix_entries[vector].vector, | ||
| 422 | vring_interrupt, 0, | ||
| 423 | vp_dev->msix_names[vector], vq); | ||
| 424 | if (err) | ||
| 425 | goto out_request_irq; | ||
| 426 | info->vector = vector; | ||
| 427 | ++vp_dev->msix_used_vectors; | ||
| 428 | } else | ||
| 429 | vector = VP_MSIX_VQ_VECTOR; | ||
| 430 | |||
| 431 | if (callback && vp_dev->msix_enabled) { | ||
| 432 | iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | ||
| 433 | vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | ||
| 434 | if (vector == VIRTIO_MSI_NO_VECTOR) { | ||
| 435 | err = -EBUSY; | ||
| 436 | goto out_assign; | ||
| 437 | } | ||
| 438 | } | ||
| 439 | |||
| 259 | spin_lock_irqsave(&vp_dev->lock, flags); | 440 | spin_lock_irqsave(&vp_dev->lock, flags); |
| 260 | list_add(&info->node, &vp_dev->virtqueues); | 441 | list_add(&info->node, &vp_dev->virtqueues); |
| 261 | spin_unlock_irqrestore(&vp_dev->lock, flags); | 442 | spin_unlock_irqrestore(&vp_dev->lock, flags); |
| 262 | 443 | ||
| 263 | return vq; | 444 | return vq; |
| 264 | 445 | ||
| 446 | out_assign: | ||
| 447 | if (info->vector != VIRTIO_MSI_NO_VECTOR) { | ||
| 448 | free_irq(vp_dev->msix_entries[info->vector].vector, vq); | ||
| 449 | --vp_dev->msix_used_vectors; | ||
| 450 | } | ||
| 451 | out_request_irq: | ||
| 452 | vring_del_virtqueue(vq); | ||
| 265 | out_activate_queue: | 453 | out_activate_queue: |
| 266 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | 454 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
| 267 | free_pages_exact(info->queue, size); | 455 | free_pages_exact(info->queue, size); |
| @@ -270,21 +458,27 @@ out_info: | |||
| 270 | return ERR_PTR(err); | 458 | return ERR_PTR(err); |
| 271 | } | 459 | } |
| 272 | 460 | ||
| 273 | /* the config->del_vq() implementation */ | ||
| 274 | static void vp_del_vq(struct virtqueue *vq) | 461 | static void vp_del_vq(struct virtqueue *vq) |
| 275 | { | 462 | { |
| 276 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); | 463 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); |
| 277 | struct virtio_pci_vq_info *info = vq->priv; | 464 | struct virtio_pci_vq_info *info = vq->priv; |
| 278 | unsigned long flags, size; | 465 | unsigned long size; |
| 279 | 466 | ||
| 280 | spin_lock_irqsave(&vp_dev->lock, flags); | 467 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); |
| 281 | list_del(&info->node); | 468 | |
| 282 | spin_unlock_irqrestore(&vp_dev->lock, flags); | 469 | if (info->vector != VIRTIO_MSI_NO_VECTOR) |
| 470 | free_irq(vp_dev->msix_entries[info->vector].vector, vq); | ||
| 471 | |||
| 472 | if (vp_dev->msix_enabled) { | ||
| 473 | iowrite16(VIRTIO_MSI_NO_VECTOR, | ||
| 474 | vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | ||
| 475 | /* Flush the write out to device */ | ||
| 476 | ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); | ||
| 477 | } | ||
| 283 | 478 | ||
| 284 | vring_del_virtqueue(vq); | 479 | vring_del_virtqueue(vq); |
| 285 | 480 | ||
| 286 | /* Select and deactivate the queue */ | 481 | /* Select and deactivate the queue */ |
| 287 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | ||
| 288 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | 482 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
| 289 | 483 | ||
| 290 | size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); | 484 | size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); |
| @@ -292,14 +486,57 @@ static void vp_del_vq(struct virtqueue *vq) | |||
| 292 | kfree(info); | 486 | kfree(info); |
| 293 | } | 487 | } |
| 294 | 488 | ||
| 489 | /* the config->del_vqs() implementation */ | ||
| 490 | static void vp_del_vqs(struct virtio_device *vdev) | ||
| 491 | { | ||
| 492 | struct virtqueue *vq, *n; | ||
| 493 | |||
| 494 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) | ||
| 495 | vp_del_vq(vq); | ||
| 496 | |||
| 497 | vp_free_vectors(vdev); | ||
| 498 | } | ||
| 499 | |||
| 500 | /* the config->find_vqs() implementation */ | ||
| 501 | static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, | ||
| 502 | struct virtqueue *vqs[], | ||
| 503 | vq_callback_t *callbacks[], | ||
| 504 | const char *names[]) | ||
| 505 | { | ||
| 506 | int vectors = 0; | ||
| 507 | int i, err; | ||
| 508 | |||
| 509 | /* How many vectors would we like? */ | ||
| 510 | for (i = 0; i < nvqs; ++i) | ||
| 511 | if (callbacks[i]) | ||
| 512 | ++vectors; | ||
| 513 | |||
| 514 | err = vp_request_vectors(vdev, vectors); | ||
| 515 | if (err) | ||
| 516 | goto error_request; | ||
| 517 | |||
| 518 | for (i = 0; i < nvqs; ++i) { | ||
| 519 | vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]); | ||
| 520 | if (IS_ERR(vqs[i])) | ||
| 521 | goto error_find; | ||
| 522 | } | ||
| 523 | return 0; | ||
| 524 | |||
| 525 | error_find: | ||
| 526 | vp_del_vqs(vdev); | ||
| 527 | |||
| 528 | error_request: | ||
| 529 | return PTR_ERR(vqs[i]); | ||
| 530 | } | ||
| 531 | |||
| 295 | static struct virtio_config_ops virtio_pci_config_ops = { | 532 | static struct virtio_config_ops virtio_pci_config_ops = { |
| 296 | .get = vp_get, | 533 | .get = vp_get, |
| 297 | .set = vp_set, | 534 | .set = vp_set, |
| 298 | .get_status = vp_get_status, | 535 | .get_status = vp_get_status, |
| 299 | .set_status = vp_set_status, | 536 | .set_status = vp_set_status, |
| 300 | .reset = vp_reset, | 537 | .reset = vp_reset, |
| 301 | .find_vq = vp_find_vq, | 538 | .find_vqs = vp_find_vqs, |
| 302 | .del_vq = vp_del_vq, | 539 | .del_vqs = vp_del_vqs, |
| 303 | .get_features = vp_get_features, | 540 | .get_features = vp_get_features, |
| 304 | .finalize_features = vp_finalize_features, | 541 | .finalize_features = vp_finalize_features, |
| 305 | }; | 542 | }; |
| @@ -310,7 +547,7 @@ static void virtio_pci_release_dev(struct device *_d) | |||
| 310 | struct virtio_pci_device *vp_dev = to_vp_device(dev); | 547 | struct virtio_pci_device *vp_dev = to_vp_device(dev); |
| 311 | struct pci_dev *pci_dev = vp_dev->pci_dev; | 548 | struct pci_dev *pci_dev = vp_dev->pci_dev; |
| 312 | 549 | ||
| 313 | free_irq(pci_dev->irq, vp_dev); | 550 | vp_del_vqs(dev); |
| 314 | pci_set_drvdata(pci_dev, NULL); | 551 | pci_set_drvdata(pci_dev, NULL); |
| 315 | pci_iounmap(pci_dev, vp_dev->ioaddr); | 552 | pci_iounmap(pci_dev, vp_dev->ioaddr); |
| 316 | pci_release_regions(pci_dev); | 553 | pci_release_regions(pci_dev); |
| @@ -369,21 +606,13 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, | |||
| 369 | vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; | 606 | vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; |
| 370 | vp_dev->vdev.id.device = pci_dev->subsystem_device; | 607 | vp_dev->vdev.id.device = pci_dev->subsystem_device; |
| 371 | 608 | ||
| 372 | /* register a handler for the queue with the PCI device's interrupt */ | ||
| 373 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, | ||
| 374 | dev_name(&vp_dev->vdev.dev), vp_dev); | ||
| 375 | if (err) | ||
| 376 | goto out_set_drvdata; | ||
| 377 | |||
| 378 | /* finally register the virtio device */ | 609 | /* finally register the virtio device */ |
| 379 | err = register_virtio_device(&vp_dev->vdev); | 610 | err = register_virtio_device(&vp_dev->vdev); |
| 380 | if (err) | 611 | if (err) |
| 381 | goto out_req_irq; | 612 | goto out_set_drvdata; |
| 382 | 613 | ||
| 383 | return 0; | 614 | return 0; |
| 384 | 615 | ||
| 385 | out_req_irq: | ||
| 386 | free_irq(pci_dev->irq, vp_dev); | ||
| 387 | out_set_drvdata: | 616 | out_set_drvdata: |
| 388 | pci_set_drvdata(pci_dev, NULL); | 617 | pci_set_drvdata(pci_dev, NULL); |
| 389 | pci_iounmap(pci_dev, vp_dev->ioaddr); | 618 | pci_iounmap(pci_dev, vp_dev->ioaddr); |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 5c52369ab9bb..a882f2606515 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
| @@ -23,21 +23,30 @@ | |||
| 23 | 23 | ||
| 24 | #ifdef DEBUG | 24 | #ifdef DEBUG |
| 25 | /* For development, we want to crash whenever the ring is screwed. */ | 25 | /* For development, we want to crash whenever the ring is screwed. */ |
| 26 | #define BAD_RING(_vq, fmt...) \ | 26 | #define BAD_RING(_vq, fmt, args...) \ |
| 27 | do { dev_err(&(_vq)->vq.vdev->dev, fmt); BUG(); } while(0) | 27 | do { \ |
| 28 | dev_err(&(_vq)->vq.vdev->dev, \ | ||
| 29 | "%s:"fmt, (_vq)->vq.name, ##args); \ | ||
| 30 | BUG(); \ | ||
| 31 | } while (0) | ||
| 28 | /* Caller is supposed to guarantee no reentry. */ | 32 | /* Caller is supposed to guarantee no reentry. */ |
| 29 | #define START_USE(_vq) \ | 33 | #define START_USE(_vq) \ |
| 30 | do { \ | 34 | do { \ |
| 31 | if ((_vq)->in_use) \ | 35 | if ((_vq)->in_use) \ |
| 32 | panic("in_use = %i\n", (_vq)->in_use); \ | 36 | panic("%s:in_use = %i\n", \ |
| 37 | (_vq)->vq.name, (_vq)->in_use); \ | ||
| 33 | (_vq)->in_use = __LINE__; \ | 38 | (_vq)->in_use = __LINE__; \ |
| 34 | mb(); \ | 39 | mb(); \ |
| 35 | } while(0) | 40 | } while (0) |
| 36 | #define END_USE(_vq) \ | 41 | #define END_USE(_vq) \ |
| 37 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) | 42 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) |
| 38 | #else | 43 | #else |
| 39 | #define BAD_RING(_vq, fmt...) \ | 44 | #define BAD_RING(_vq, fmt, args...) \ |
| 40 | do { dev_err(&_vq->vq.vdev->dev, fmt); (_vq)->broken = true; } while(0) | 45 | do { \ |
| 46 | dev_err(&_vq->vq.vdev->dev, \ | ||
| 47 | "%s:"fmt, (_vq)->vq.name, ##args); \ | ||
| 48 | (_vq)->broken = true; \ | ||
| 49 | } while (0) | ||
| 41 | #define START_USE(vq) | 50 | #define START_USE(vq) |
| 42 | #define END_USE(vq) | 51 | #define END_USE(vq) |
| 43 | #endif | 52 | #endif |
| @@ -52,6 +61,9 @@ struct vring_virtqueue | |||
| 52 | /* Other side has made a mess, don't try any more. */ | 61 | /* Other side has made a mess, don't try any more. */ |
| 53 | bool broken; | 62 | bool broken; |
| 54 | 63 | ||
| 64 | /* Host supports indirect buffers */ | ||
| 65 | bool indirect; | ||
| 66 | |||
| 55 | /* Number of free buffers */ | 67 | /* Number of free buffers */ |
| 56 | unsigned int num_free; | 68 | unsigned int num_free; |
| 57 | /* Head of free buffer list. */ | 69 | /* Head of free buffer list. */ |
| @@ -76,6 +88,55 @@ struct vring_virtqueue | |||
| 76 | 88 | ||
| 77 | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) | 89 | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) |
| 78 | 90 | ||
| 91 | /* Set up an indirect table of descriptors and add it to the queue. */ | ||
| 92 | static int vring_add_indirect(struct vring_virtqueue *vq, | ||
| 93 | struct scatterlist sg[], | ||
| 94 | unsigned int out, | ||
| 95 | unsigned int in) | ||
| 96 | { | ||
| 97 | struct vring_desc *desc; | ||
| 98 | unsigned head; | ||
| 99 | int i; | ||
| 100 | |||
| 101 | desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC); | ||
| 102 | if (!desc) | ||
| 103 | return vq->vring.num; | ||
| 104 | |||
| 105 | /* Transfer entries from the sg list into the indirect page */ | ||
| 106 | for (i = 0; i < out; i++) { | ||
| 107 | desc[i].flags = VRING_DESC_F_NEXT; | ||
| 108 | desc[i].addr = sg_phys(sg); | ||
| 109 | desc[i].len = sg->length; | ||
| 110 | desc[i].next = i+1; | ||
| 111 | sg++; | ||
| 112 | } | ||
| 113 | for (; i < (out + in); i++) { | ||
| 114 | desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; | ||
| 115 | desc[i].addr = sg_phys(sg); | ||
| 116 | desc[i].len = sg->length; | ||
| 117 | desc[i].next = i+1; | ||
| 118 | sg++; | ||
| 119 | } | ||
| 120 | |||
| 121 | /* Last one doesn't continue. */ | ||
| 122 | desc[i-1].flags &= ~VRING_DESC_F_NEXT; | ||
| 123 | desc[i-1].next = 0; | ||
| 124 | |||
| 125 | /* We're about to use a buffer */ | ||
| 126 | vq->num_free--; | ||
| 127 | |||
| 128 | /* Use a single buffer which doesn't continue */ | ||
| 129 | head = vq->free_head; | ||
| 130 | vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; | ||
| 131 | vq->vring.desc[head].addr = virt_to_phys(desc); | ||
| 132 | vq->vring.desc[head].len = i * sizeof(struct vring_desc); | ||
| 133 | |||
| 134 | /* Update free pointer */ | ||
| 135 | vq->free_head = vq->vring.desc[head].next; | ||
| 136 | |||
| 137 | return head; | ||
| 138 | } | ||
| 139 | |||
| 79 | static int vring_add_buf(struct virtqueue *_vq, | 140 | static int vring_add_buf(struct virtqueue *_vq, |
| 80 | struct scatterlist sg[], | 141 | struct scatterlist sg[], |
| 81 | unsigned int out, | 142 | unsigned int out, |
| @@ -85,12 +146,21 @@ static int vring_add_buf(struct virtqueue *_vq, | |||
| 85 | struct vring_virtqueue *vq = to_vvq(_vq); | 146 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 86 | unsigned int i, avail, head, uninitialized_var(prev); | 147 | unsigned int i, avail, head, uninitialized_var(prev); |
| 87 | 148 | ||
| 149 | START_USE(vq); | ||
| 150 | |||
| 88 | BUG_ON(data == NULL); | 151 | BUG_ON(data == NULL); |
| 152 | |||
| 153 | /* If the host supports indirect descriptor tables, and we have multiple | ||
| 154 | * buffers, then go indirect. FIXME: tune this threshold */ | ||
| 155 | if (vq->indirect && (out + in) > 1 && vq->num_free) { | ||
| 156 | head = vring_add_indirect(vq, sg, out, in); | ||
| 157 | if (head != vq->vring.num) | ||
| 158 | goto add_head; | ||
| 159 | } | ||
| 160 | |||
| 89 | BUG_ON(out + in > vq->vring.num); | 161 | BUG_ON(out + in > vq->vring.num); |
| 90 | BUG_ON(out + in == 0); | 162 | BUG_ON(out + in == 0); |
| 91 | 163 | ||
| 92 | START_USE(vq); | ||
| 93 | |||
| 94 | if (vq->num_free < out + in) { | 164 | if (vq->num_free < out + in) { |
| 95 | pr_debug("Can't add buf len %i - avail = %i\n", | 165 | pr_debug("Can't add buf len %i - avail = %i\n", |
| 96 | out + in, vq->num_free); | 166 | out + in, vq->num_free); |
| @@ -127,6 +197,7 @@ static int vring_add_buf(struct virtqueue *_vq, | |||
| 127 | /* Update free pointer */ | 197 | /* Update free pointer */ |
| 128 | vq->free_head = i; | 198 | vq->free_head = i; |
| 129 | 199 | ||
| 200 | add_head: | ||
| 130 | /* Set token. */ | 201 | /* Set token. */ |
| 131 | vq->data[head] = data; | 202 | vq->data[head] = data; |
| 132 | 203 | ||
| @@ -170,6 +241,11 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) | |||
| 170 | 241 | ||
| 171 | /* Put back on free list: find end */ | 242 | /* Put back on free list: find end */ |
| 172 | i = head; | 243 | i = head; |
| 244 | |||
| 245 | /* Free the indirect table */ | ||
| 246 | if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) | ||
| 247 | kfree(phys_to_virt(vq->vring.desc[i].addr)); | ||
| 248 | |||
| 173 | while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { | 249 | while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { |
| 174 | i = vq->vring.desc[i].next; | 250 | i = vq->vring.desc[i].next; |
| 175 | vq->num_free++; | 251 | vq->num_free++; |
| @@ -284,7 +360,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, | |||
| 284 | struct virtio_device *vdev, | 360 | struct virtio_device *vdev, |
| 285 | void *pages, | 361 | void *pages, |
| 286 | void (*notify)(struct virtqueue *), | 362 | void (*notify)(struct virtqueue *), |
| 287 | void (*callback)(struct virtqueue *)) | 363 | void (*callback)(struct virtqueue *), |
| 364 | const char *name) | ||
| 288 | { | 365 | { |
| 289 | struct vring_virtqueue *vq; | 366 | struct vring_virtqueue *vq; |
| 290 | unsigned int i; | 367 | unsigned int i; |
| @@ -303,14 +380,18 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, | |||
| 303 | vq->vq.callback = callback; | 380 | vq->vq.callback = callback; |
| 304 | vq->vq.vdev = vdev; | 381 | vq->vq.vdev = vdev; |
| 305 | vq->vq.vq_ops = &vring_vq_ops; | 382 | vq->vq.vq_ops = &vring_vq_ops; |
| 383 | vq->vq.name = name; | ||
| 306 | vq->notify = notify; | 384 | vq->notify = notify; |
| 307 | vq->broken = false; | 385 | vq->broken = false; |
| 308 | vq->last_used_idx = 0; | 386 | vq->last_used_idx = 0; |
| 309 | vq->num_added = 0; | 387 | vq->num_added = 0; |
| 388 | list_add_tail(&vq->vq.list, &vdev->vqs); | ||
| 310 | #ifdef DEBUG | 389 | #ifdef DEBUG |
| 311 | vq->in_use = false; | 390 | vq->in_use = false; |
| 312 | #endif | 391 | #endif |
| 313 | 392 | ||
| 393 | vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); | ||
| 394 | |||
| 314 | /* No callback? Tell other side not to bother us. */ | 395 | /* No callback? Tell other side not to bother us. */ |
| 315 | if (!callback) | 396 | if (!callback) |
| 316 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; | 397 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; |
| @@ -327,6 +408,7 @@ EXPORT_SYMBOL_GPL(vring_new_virtqueue); | |||
| 327 | 408 | ||
| 328 | void vring_del_virtqueue(struct virtqueue *vq) | 409 | void vring_del_virtqueue(struct virtqueue *vq) |
| 329 | { | 410 | { |
| 411 | list_del(&vq->list); | ||
| 330 | kfree(to_vvq(vq)); | 412 | kfree(to_vvq(vq)); |
| 331 | } | 413 | } |
| 332 | EXPORT_SYMBOL_GPL(vring_del_virtqueue); | 414 | EXPORT_SYMBOL_GPL(vring_del_virtqueue); |
| @@ -338,6 +420,8 @@ void vring_transport_features(struct virtio_device *vdev) | |||
| 338 | 420 | ||
| 339 | for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { | 421 | for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { |
| 340 | switch (i) { | 422 | switch (i) { |
| 423 | case VIRTIO_RING_F_INDIRECT_DESC: | ||
| 424 | break; | ||
| 341 | default: | 425 | default: |
| 342 | /* We don't understand this bit. */ | 426 | /* We don't understand this bit. */ |
| 343 | clear_bit(i, vdev->features); | 427 | clear_bit(i, vdev->features); |
