aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 15:37:27 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 15:37:27 -0500
commitbcf8a3dfcb274cf6654a19e12e244f3af8c0d355 (patch)
treef1d0e0f36c0575a9202750aff65ba17ce91bc437 /drivers
parent61bd5e5683244a564ecfe31c73575ee0bc708ccc (diff)
parentb6c96c0214138186f495e3ee73737c6fc5e4efa2 (diff)
Merge tag 'to-linus' of git://github.com/rustyrussell/linux
* tag 'to-linus' of git://github.com/rustyrussell/linux: (24 commits) lguest: Make sure interrupt is allocated ok by lguest_setup_irq lguest: move the lguest tool to the tools directory lguest: switch segment-voodoo-numbers to readable symbols virtio: balloon: Add freeze, restore handlers to support S4 virtio: balloon: Move vq initialization into separate function virtio: net: Add freeze, restore handlers to support S4 virtio: net: Move vq and vq buf removal into separate function virtio: net: Move vq initialization into separate function virtio: blk: Add freeze, restore handlers to support S4 virtio: blk: Move vq initialization to separate function virtio: console: Disable callbacks for virtqueues at start of S4 freeze virtio: console: Add freeze and restore handlers to support S4 virtio: console: Move vq and vq buf removal into separate functions virtio: pci: add PM notification handlers for restore, freeze, thaw, poweroff virtio: pci: switch to new PM API virtio_blk: fix config handler race virtio: add debugging if driver doesn't kick. virtio: expose added descriptors immediately. virtio: avoid modulus operation. virtio: support unlocked queue kick ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/virtio_blk.c87
-rw-r--r--drivers/char/hw_random/virtio-rng.c2
-rw-r--r--drivers/char/virtio_console.c140
-rw-r--r--drivers/lguest/Makefile2
-rw-r--r--drivers/lguest/lguest_device.c18
-rw-r--r--drivers/lguest/segments.c28
-rw-r--r--drivers/net/virtio_net.c125
-rw-r--r--drivers/s390/kvm/kvm_virtio.c2
-rw-r--r--drivers/virtio/virtio_balloon.c108
-rw-r--r--drivers/virtio/virtio_mmio.c4
-rw-r--r--drivers/virtio/virtio_pci.c110
-rw-r--r--drivers/virtio/virtio_ring.c245
12 files changed, 713 insertions, 158 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4d0b70adf5f7..ffd5ca919295 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -4,6 +4,7 @@
4#include <linux/blkdev.h> 4#include <linux/blkdev.h>
5#include <linux/hdreg.h> 5#include <linux/hdreg.h>
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/mutex.h>
7#include <linux/virtio.h> 8#include <linux/virtio.h>
8#include <linux/virtio_blk.h> 9#include <linux/virtio_blk.h>
9#include <linux/scatterlist.h> 10#include <linux/scatterlist.h>
@@ -36,6 +37,12 @@ struct virtio_blk
36 /* Process context for config space updates */ 37 /* Process context for config space updates */
37 struct work_struct config_work; 38 struct work_struct config_work;
38 39
40 /* Lock for config space updates */
41 struct mutex config_lock;
42
43 /* enable config space updates */
44 bool config_enable;
45
39 /* What host tells us, plus 2 for header & tailer. */ 46 /* What host tells us, plus 2 for header & tailer. */
40 unsigned int sg_elems; 47 unsigned int sg_elems;
41 48
@@ -172,7 +179,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
172 } 179 }
173 } 180 }
174 181
175 if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) { 182 if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr, GFP_ATOMIC)<0) {
176 mempool_free(vbr, vblk->pool); 183 mempool_free(vbr, vblk->pool);
177 return false; 184 return false;
178 } 185 }
@@ -318,6 +325,10 @@ static void virtblk_config_changed_work(struct work_struct *work)
318 char cap_str_2[10], cap_str_10[10]; 325 char cap_str_2[10], cap_str_10[10];
319 u64 capacity, size; 326 u64 capacity, size;
320 327
328 mutex_lock(&vblk->config_lock);
329 if (!vblk->config_enable)
330 goto done;
331
321 /* Host must always specify the capacity. */ 332 /* Host must always specify the capacity. */
322 vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity), 333 vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
323 &capacity, sizeof(capacity)); 334 &capacity, sizeof(capacity));
@@ -340,6 +351,8 @@ static void virtblk_config_changed_work(struct work_struct *work)
340 cap_str_10, cap_str_2); 351 cap_str_10, cap_str_2);
341 352
342 set_capacity(vblk->disk, capacity); 353 set_capacity(vblk->disk, capacity);
354done:
355 mutex_unlock(&vblk->config_lock);
343} 356}
344 357
345static void virtblk_config_changed(struct virtio_device *vdev) 358static void virtblk_config_changed(struct virtio_device *vdev)
@@ -349,6 +362,18 @@ static void virtblk_config_changed(struct virtio_device *vdev)
349 queue_work(virtblk_wq, &vblk->config_work); 362 queue_work(virtblk_wq, &vblk->config_work);
350} 363}
351 364
365static int init_vq(struct virtio_blk *vblk)
366{
367 int err = 0;
368
369 /* We expect one virtqueue, for output. */
370 vblk->vq = virtio_find_single_vq(vblk->vdev, blk_done, "requests");
371 if (IS_ERR(vblk->vq))
372 err = PTR_ERR(vblk->vq);
373
374 return err;
375}
376
352static int __devinit virtblk_probe(struct virtio_device *vdev) 377static int __devinit virtblk_probe(struct virtio_device *vdev)
353{ 378{
354 struct virtio_blk *vblk; 379 struct virtio_blk *vblk;
@@ -388,14 +413,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
388 vblk->vdev = vdev; 413 vblk->vdev = vdev;
389 vblk->sg_elems = sg_elems; 414 vblk->sg_elems = sg_elems;
390 sg_init_table(vblk->sg, vblk->sg_elems); 415 sg_init_table(vblk->sg, vblk->sg_elems);
416 mutex_init(&vblk->config_lock);
391 INIT_WORK(&vblk->config_work, virtblk_config_changed_work); 417 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
418 vblk->config_enable = true;
392 419
393 /* We expect one virtqueue, for output. */ 420 err = init_vq(vblk);
394 vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests"); 421 if (err)
395 if (IS_ERR(vblk->vq)) {
396 err = PTR_ERR(vblk->vq);
397 goto out_free_vblk; 422 goto out_free_vblk;
398 }
399 423
400 vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req)); 424 vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req));
401 if (!vblk->pool) { 425 if (!vblk->pool) {
@@ -542,7 +566,10 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
542 struct virtio_blk *vblk = vdev->priv; 566 struct virtio_blk *vblk = vdev->priv;
543 int index = vblk->index; 567 int index = vblk->index;
544 568
545 flush_work(&vblk->config_work); 569 /* Prevent config work handler from accessing the device. */
570 mutex_lock(&vblk->config_lock);
571 vblk->config_enable = false;
572 mutex_unlock(&vblk->config_lock);
546 573
547 /* Nothing should be pending. */ 574 /* Nothing should be pending. */
548 BUG_ON(!list_empty(&vblk->reqs)); 575 BUG_ON(!list_empty(&vblk->reqs));
@@ -550,6 +577,8 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
550 /* Stop all the virtqueues. */ 577 /* Stop all the virtqueues. */
551 vdev->config->reset(vdev); 578 vdev->config->reset(vdev);
552 579
580 flush_work(&vblk->config_work);
581
553 del_gendisk(vblk->disk); 582 del_gendisk(vblk->disk);
554 blk_cleanup_queue(vblk->disk->queue); 583 blk_cleanup_queue(vblk->disk->queue);
555 put_disk(vblk->disk); 584 put_disk(vblk->disk);
@@ -559,6 +588,46 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
559 ida_simple_remove(&vd_index_ida, index); 588 ida_simple_remove(&vd_index_ida, index);
560} 589}
561 590
591#ifdef CONFIG_PM
592static int virtblk_freeze(struct virtio_device *vdev)
593{
594 struct virtio_blk *vblk = vdev->priv;
595
596 /* Ensure we don't receive any more interrupts */
597 vdev->config->reset(vdev);
598
599 /* Prevent config work handler from accessing the device. */
600 mutex_lock(&vblk->config_lock);
601 vblk->config_enable = false;
602 mutex_unlock(&vblk->config_lock);
603
604 flush_work(&vblk->config_work);
605
606 spin_lock_irq(vblk->disk->queue->queue_lock);
607 blk_stop_queue(vblk->disk->queue);
608 spin_unlock_irq(vblk->disk->queue->queue_lock);
609 blk_sync_queue(vblk->disk->queue);
610
611 vdev->config->del_vqs(vdev);
612 return 0;
613}
614
615static int virtblk_restore(struct virtio_device *vdev)
616{
617 struct virtio_blk *vblk = vdev->priv;
618 int ret;
619
620 vblk->config_enable = true;
621 ret = init_vq(vdev->priv);
622 if (!ret) {
623 spin_lock_irq(vblk->disk->queue->queue_lock);
624 blk_start_queue(vblk->disk->queue);
625 spin_unlock_irq(vblk->disk->queue->queue_lock);
626 }
627 return ret;
628}
629#endif
630
562static const struct virtio_device_id id_table[] = { 631static const struct virtio_device_id id_table[] = {
563 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, 632 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
564 { 0 }, 633 { 0 },
@@ -584,6 +653,10 @@ static struct virtio_driver __refdata virtio_blk = {
584 .probe = virtblk_probe, 653 .probe = virtblk_probe,
585 .remove = __devexit_p(virtblk_remove), 654 .remove = __devexit_p(virtblk_remove),
586 .config_changed = virtblk_config_changed, 655 .config_changed = virtblk_config_changed,
656#ifdef CONFIG_PM
657 .freeze = virtblk_freeze,
658 .restore = virtblk_restore,
659#endif
587}; 660};
588 661
589static int __init init(void) 662static int __init init(void)
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index fd699ccecf5b..723725bbb96b 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -47,7 +47,7 @@ static void register_buffer(u8 *buf, size_t size)
47 sg_init_one(&sg, buf, size); 47 sg_init_one(&sg, buf, size);
48 48
49 /* There should always be room for one buffer. */ 49 /* There should always be room for one buffer. */
50 if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0) 50 if (virtqueue_add_buf(vq, &sg, 0, 1, buf, GFP_KERNEL) < 0)
51 BUG(); 51 BUG();
52 52
53 virtqueue_kick(vq); 53 virtqueue_kick(vq);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8e3c46d67cb3..b58b56187065 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -392,7 +392,7 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
392 392
393 sg_init_one(sg, buf->buf, buf->size); 393 sg_init_one(sg, buf->buf, buf->size);
394 394
395 ret = virtqueue_add_buf(vq, sg, 0, 1, buf); 395 ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC);
396 virtqueue_kick(vq); 396 virtqueue_kick(vq);
397 return ret; 397 return ret;
398} 398}
@@ -457,7 +457,7 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
457 vq = portdev->c_ovq; 457 vq = portdev->c_ovq;
458 458
459 sg_init_one(sg, &cpkt, sizeof(cpkt)); 459 sg_init_one(sg, &cpkt, sizeof(cpkt));
460 if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) { 460 if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) >= 0) {
461 virtqueue_kick(vq); 461 virtqueue_kick(vq);
462 while (!virtqueue_get_buf(vq, &len)) 462 while (!virtqueue_get_buf(vq, &len))
463 cpu_relax(); 463 cpu_relax();
@@ -506,7 +506,7 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
506 reclaim_consumed_buffers(port); 506 reclaim_consumed_buffers(port);
507 507
508 sg_init_one(sg, in_buf, in_count); 508 sg_init_one(sg, in_buf, in_count);
509 ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf); 509 ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf, GFP_ATOMIC);
510 510
511 /* Tell Host to go! */ 511 /* Tell Host to go! */
512 virtqueue_kick(out_vq); 512 virtqueue_kick(out_vq);
@@ -1271,6 +1271,20 @@ static void remove_port(struct kref *kref)
1271 kfree(port); 1271 kfree(port);
1272} 1272}
1273 1273
1274static void remove_port_data(struct port *port)
1275{
1276 struct port_buffer *buf;
1277
1278 /* Remove unused data this port might have received. */
1279 discard_port_data(port);
1280
1281 reclaim_consumed_buffers(port);
1282
1283 /* Remove buffers we queued up for the Host to send us data in. */
1284 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1285 free_buf(buf);
1286}
1287
1274/* 1288/*
1275 * Port got unplugged. Remove port from portdev's list and drop the 1289 * Port got unplugged. Remove port from portdev's list and drop the
1276 * kref reference. If no userspace has this port opened, it will 1290 * kref reference. If no userspace has this port opened, it will
@@ -1278,8 +1292,6 @@ static void remove_port(struct kref *kref)
1278 */ 1292 */
1279static void unplug_port(struct port *port) 1293static void unplug_port(struct port *port)
1280{ 1294{
1281 struct port_buffer *buf;
1282
1283 spin_lock_irq(&port->portdev->ports_lock); 1295 spin_lock_irq(&port->portdev->ports_lock);
1284 list_del(&port->list); 1296 list_del(&port->list);
1285 spin_unlock_irq(&port->portdev->ports_lock); 1297 spin_unlock_irq(&port->portdev->ports_lock);
@@ -1300,14 +1312,7 @@ static void unplug_port(struct port *port)
1300 hvc_remove(port->cons.hvc); 1312 hvc_remove(port->cons.hvc);
1301 } 1313 }
1302 1314
1303 /* Remove unused data this port might have received. */ 1315 remove_port_data(port);
1304 discard_port_data(port);
1305
1306 reclaim_consumed_buffers(port);
1307
1308 /* Remove buffers we queued up for the Host to send us data in. */
1309 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1310 free_buf(buf);
1311 1316
1312 /* 1317 /*
1313 * We should just assume the device itself has gone off -- 1318 * We should just assume the device itself has gone off --
@@ -1659,6 +1664,28 @@ static const struct file_operations portdev_fops = {
1659 .owner = THIS_MODULE, 1664 .owner = THIS_MODULE,
1660}; 1665};
1661 1666
1667static void remove_vqs(struct ports_device *portdev)
1668{
1669 portdev->vdev->config->del_vqs(portdev->vdev);
1670 kfree(portdev->in_vqs);
1671 kfree(portdev->out_vqs);
1672}
1673
1674static void remove_controlq_data(struct ports_device *portdev)
1675{
1676 struct port_buffer *buf;
1677 unsigned int len;
1678
1679 if (!use_multiport(portdev))
1680 return;
1681
1682 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
1683 free_buf(buf);
1684
1685 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
1686 free_buf(buf);
1687}
1688
1662/* 1689/*
1663 * Once we're further in boot, we get probed like any other virtio 1690 * Once we're further in boot, we get probed like any other virtio
1664 * device. 1691 * device.
@@ -1764,9 +1791,7 @@ free_vqs:
1764 /* The host might want to notify mgmt sw about device add failure */ 1791 /* The host might want to notify mgmt sw about device add failure */
1765 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, 1792 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
1766 VIRTIO_CONSOLE_DEVICE_READY, 0); 1793 VIRTIO_CONSOLE_DEVICE_READY, 0);
1767 vdev->config->del_vqs(vdev); 1794 remove_vqs(portdev);
1768 kfree(portdev->in_vqs);
1769 kfree(portdev->out_vqs);
1770free_chrdev: 1795free_chrdev:
1771 unregister_chrdev(portdev->chr_major, "virtio-portsdev"); 1796 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
1772free: 1797free:
@@ -1804,21 +1829,8 @@ static void virtcons_remove(struct virtio_device *vdev)
1804 * have to just stop using the port, as the vqs are going 1829 * have to just stop using the port, as the vqs are going
1805 * away. 1830 * away.
1806 */ 1831 */
1807 if (use_multiport(portdev)) { 1832 remove_controlq_data(portdev);
1808 struct port_buffer *buf; 1833 remove_vqs(portdev);
1809 unsigned int len;
1810
1811 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
1812 free_buf(buf);
1813
1814 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
1815 free_buf(buf);
1816 }
1817
1818 vdev->config->del_vqs(vdev);
1819 kfree(portdev->in_vqs);
1820 kfree(portdev->out_vqs);
1821
1822 kfree(portdev); 1834 kfree(portdev);
1823} 1835}
1824 1836
@@ -1832,6 +1844,68 @@ static unsigned int features[] = {
1832 VIRTIO_CONSOLE_F_MULTIPORT, 1844 VIRTIO_CONSOLE_F_MULTIPORT,
1833}; 1845};
1834 1846
1847#ifdef CONFIG_PM
1848static int virtcons_freeze(struct virtio_device *vdev)
1849{
1850 struct ports_device *portdev;
1851 struct port *port;
1852
1853 portdev = vdev->priv;
1854
1855 vdev->config->reset(vdev);
1856
1857 virtqueue_disable_cb(portdev->c_ivq);
1858 cancel_work_sync(&portdev->control_work);
1859 /*
1860 * Once more: if control_work_handler() was running, it would
1861 * enable the cb as the last step.
1862 */
1863 virtqueue_disable_cb(portdev->c_ivq);
1864 remove_controlq_data(portdev);
1865
1866 list_for_each_entry(port, &portdev->ports, list) {
1867 virtqueue_disable_cb(port->in_vq);
1868 virtqueue_disable_cb(port->out_vq);
1869 /*
1870 * We'll ask the host later if the new invocation has
1871 * the port opened or closed.
1872 */
1873 port->host_connected = false;
1874 remove_port_data(port);
1875 }
1876 remove_vqs(portdev);
1877
1878 return 0;
1879}
1880
1881static int virtcons_restore(struct virtio_device *vdev)
1882{
1883 struct ports_device *portdev;
1884 struct port *port;
1885 int ret;
1886
1887 portdev = vdev->priv;
1888
1889 ret = init_vqs(portdev);
1890 if (ret)
1891 return ret;
1892
1893 if (use_multiport(portdev))
1894 fill_queue(portdev->c_ivq, &portdev->cvq_lock);
1895
1896 list_for_each_entry(port, &portdev->ports, list) {
1897 port->in_vq = portdev->in_vqs[port->id];
1898 port->out_vq = portdev->out_vqs[port->id];
1899
1900 fill_queue(port->in_vq, &port->inbuf_lock);
1901
1902 /* Get port open/close status on the host */
1903 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1904 }
1905 return 0;
1906}
1907#endif
1908
1835static struct virtio_driver virtio_console = { 1909static struct virtio_driver virtio_console = {
1836 .feature_table = features, 1910 .feature_table = features,
1837 .feature_table_size = ARRAY_SIZE(features), 1911 .feature_table_size = ARRAY_SIZE(features),
@@ -1841,6 +1915,10 @@ static struct virtio_driver virtio_console = {
1841 .probe = virtcons_probe, 1915 .probe = virtcons_probe,
1842 .remove = virtcons_remove, 1916 .remove = virtcons_remove,
1843 .config_changed = config_intr, 1917 .config_changed = config_intr,
1918#ifdef CONFIG_PM
1919 .freeze = virtcons_freeze,
1920 .restore = virtcons_restore,
1921#endif
1844}; 1922};
1845 1923
1846static int __init init(void) 1924static int __init init(void)
diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile
index 8ac947c7e7c7..c4197503900e 100644
--- a/drivers/lguest/Makefile
+++ b/drivers/lguest/Makefile
@@ -18,7 +18,7 @@ Mastery: PREFIX=M
18Beer: 18Beer:
19 @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}" 19 @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}"
20Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery: 20Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery:
21 @sh ../../Documentation/virtual/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'` 21 @sh ../../tools/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'`
22Puppy: 22Puppy:
23 @clear 23 @clear
24 @printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n" 24 @printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n"
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 595d73197016..9e8388efd88e 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -241,7 +241,7 @@ static void lg_notify(struct virtqueue *vq)
241} 241}
242 242
243/* An extern declaration inside a C file is bad form. Don't do it. */ 243/* An extern declaration inside a C file is bad form. Don't do it. */
244extern void lguest_setup_irq(unsigned int irq); 244extern int lguest_setup_irq(unsigned int irq);
245 245
246/* 246/*
247 * This routine finds the Nth virtqueue described in the configuration of 247 * This routine finds the Nth virtqueue described in the configuration of
@@ -292,17 +292,21 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
292 292
293 /* 293 /*
294 * OK, tell virtio_ring.c to set up a virtqueue now we know its size 294 * OK, tell virtio_ring.c to set up a virtqueue now we know its size
295 * and we've got a pointer to its pages. 295 * and we've got a pointer to its pages. Note that we set weak_barriers
296 * to 'true': the host just a(nother) SMP CPU, so we only need inter-cpu
297 * barriers.
296 */ 298 */
297 vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, 299 vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, vdev,
298 vdev, lvq->pages, lg_notify, callback, name); 300 true, lvq->pages, lg_notify, callback, name);
299 if (!vq) { 301 if (!vq) {
300 err = -ENOMEM; 302 err = -ENOMEM;
301 goto unmap; 303 goto unmap;
302 } 304 }
303 305
304 /* Make sure the interrupt is allocated. */ 306 /* Make sure the interrupt is allocated. */
305 lguest_setup_irq(lvq->config.irq); 307 err = lguest_setup_irq(lvq->config.irq);
308 if (err)
309 goto destroy_vring;
306 310
307 /* 311 /*
308 * Tell the interrupt for this virtqueue to go to the virtio_ring 312 * Tell the interrupt for this virtqueue to go to the virtio_ring
@@ -315,7 +319,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
315 err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED, 319 err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED,
316 dev_name(&vdev->dev), vq); 320 dev_name(&vdev->dev), vq);
317 if (err) 321 if (err)
318 goto destroy_vring; 322 goto free_desc;
319 323
320 /* 324 /*
321 * Last of all we hook up our 'struct lguest_vq_info" to the 325 * Last of all we hook up our 'struct lguest_vq_info" to the
@@ -324,6 +328,8 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
324 vq->priv = lvq; 328 vq->priv = lvq;
325 return vq; 329 return vq;
326 330
331free_desc:
332 irq_free_desc(lvq->config.irq);
327destroy_vring: 333destroy_vring:
328 vring_del_virtqueue(vq); 334 vring_del_virtqueue(vq);
329unmap: 335unmap:
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index ede46581351a..c4fb424dfddb 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -81,8 +81,8 @@ static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end)
81 * sometimes careless and leaves this as 0, even though it's 81 * sometimes careless and leaves this as 0, even though it's
82 * running at privilege level 1. If so, we fix it here. 82 * running at privilege level 1. If so, we fix it here.
83 */ 83 */
84 if ((cpu->arch.gdt[i].b & 0x00006000) == 0) 84 if (cpu->arch.gdt[i].dpl == 0)
85 cpu->arch.gdt[i].b |= (GUEST_PL << 13); 85 cpu->arch.gdt[i].dpl |= GUEST_PL;
86 86
87 /* 87 /*
88 * Each descriptor has an "accessed" bit. If we don't set it 88 * Each descriptor has an "accessed" bit. If we don't set it
@@ -90,7 +90,7 @@ static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end)
90 * that entry into a segment register. But the GDT isn't 90 * that entry into a segment register. But the GDT isn't
91 * writable by the Guest, so bad things can happen. 91 * writable by the Guest, so bad things can happen.
92 */ 92 */
93 cpu->arch.gdt[i].b |= 0x00000100; 93 cpu->arch.gdt[i].type |= 0x1;
94 } 94 }
95} 95}
96 96
@@ -114,13 +114,19 @@ void setup_default_gdt_entries(struct lguest_ro_state *state)
114 114
115 /* 115 /*
116 * The TSS segment refers to the TSS entry for this particular CPU. 116 * The TSS segment refers to the TSS entry for this particular CPU.
117 * Forgive the magic flags: the 0x8900 means the entry is Present, it's
118 * privilege level 0 Available 386 TSS system segment, and the 0x67
119 * means Saturn is eclipsed by Mercury in the twelfth house.
120 */ 117 */
121 gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16); 118 gdt[GDT_ENTRY_TSS].a = 0;
122 gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000) 119 gdt[GDT_ENTRY_TSS].b = 0;
123 | ((tss >> 16) & 0x000000FF); 120
121 gdt[GDT_ENTRY_TSS].limit0 = 0x67;
122 gdt[GDT_ENTRY_TSS].base0 = tss & 0xFFFF;
123 gdt[GDT_ENTRY_TSS].base1 = (tss >> 16) & 0xFF;
124 gdt[GDT_ENTRY_TSS].base2 = tss >> 24;
125 gdt[GDT_ENTRY_TSS].type = 0x9; /* 32-bit TSS (available) */
126 gdt[GDT_ENTRY_TSS].p = 0x1; /* Entry is present */
127 gdt[GDT_ENTRY_TSS].dpl = 0x0; /* Privilege level 0 */
128 gdt[GDT_ENTRY_TSS].s = 0x0; /* system segment */
129
124} 130}
125 131
126/* 132/*
@@ -135,8 +141,8 @@ void setup_guest_gdt(struct lg_cpu *cpu)
135 */ 141 */
136 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; 142 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT;
137 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; 143 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT;
138 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13); 144 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].dpl |= GUEST_PL;
139 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); 145 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].dpl |= GUEST_PL;
140} 146}
141 147
142/*H:650 148/*H:650
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 76fe14efb2b5..4880aa8b4c28 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -370,7 +370,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
370 370
371 skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len); 371 skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
372 372
373 err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 2, skb, gfp); 373 err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
374 if (err < 0) 374 if (err < 0)
375 dev_kfree_skb(skb); 375 dev_kfree_skb(skb);
376 376
@@ -415,8 +415,8 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
415 415
416 /* chain first in list head */ 416 /* chain first in list head */
417 first->private = (unsigned long)list; 417 first->private = (unsigned long)list;
418 err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, 418 err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
419 first, gfp); 419 first, gfp);
420 if (err < 0) 420 if (err < 0)
421 give_pages(vi, first); 421 give_pages(vi, first);
422 422
@@ -434,7 +434,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
434 434
435 sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE); 435 sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
436 436
437 err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 1, page, gfp); 437 err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
438 if (err < 0) 438 if (err < 0)
439 give_pages(vi, page); 439 give_pages(vi, page);
440 440
@@ -609,7 +609,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
609 609
610 hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1; 610 hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
611 return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg, 611 return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
612 0, skb); 612 0, skb, GFP_ATOMIC);
613} 613}
614 614
615static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 615static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -767,7 +767,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
767 sg_set_buf(&sg[i + 1], sg_virt(s), s->length); 767 sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
768 sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); 768 sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
769 769
770 BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0); 770 BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
771 771
772 virtqueue_kick(vi->cvq); 772 virtqueue_kick(vi->cvq);
773 773
@@ -985,15 +985,38 @@ static void virtnet_config_changed(struct virtio_device *vdev)
985 virtnet_update_status(vi); 985 virtnet_update_status(vi);
986} 986}
987 987
988static int init_vqs(struct virtnet_info *vi)
989{
990 struct virtqueue *vqs[3];
991 vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
992 const char *names[] = { "input", "output", "control" };
993 int nvqs, err;
994
995 /* We expect two virtqueues, receive then send,
996 * and optionally control. */
997 nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
998
999 err = vi->vdev->config->find_vqs(vi->vdev, nvqs, vqs, callbacks, names);
1000 if (err)
1001 return err;
1002
1003 vi->rvq = vqs[0];
1004 vi->svq = vqs[1];
1005
1006 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
1007 vi->cvq = vqs[2];
1008
1009 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1010 vi->dev->features |= NETIF_F_HW_VLAN_FILTER;
1011 }
1012 return 0;
1013}
1014
988static int virtnet_probe(struct virtio_device *vdev) 1015static int virtnet_probe(struct virtio_device *vdev)
989{ 1016{
990 int err; 1017 int err;
991 struct net_device *dev; 1018 struct net_device *dev;
992 struct virtnet_info *vi; 1019 struct virtnet_info *vi;
993 struct virtqueue *vqs[3];
994 vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
995 const char *names[] = { "input", "output", "control" };
996 int nvqs;
997 1020
998 /* Allocate ourselves a network device with room for our info */ 1021 /* Allocate ourselves a network device with room for our info */
999 dev = alloc_etherdev(sizeof(struct virtnet_info)); 1022 dev = alloc_etherdev(sizeof(struct virtnet_info));
@@ -1065,24 +1088,10 @@ static int virtnet_probe(struct virtio_device *vdev)
1065 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 1088 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
1066 vi->mergeable_rx_bufs = true; 1089 vi->mergeable_rx_bufs = true;
1067 1090
1068 /* We expect two virtqueues, receive then send, 1091 err = init_vqs(vi);
1069 * and optionally control. */
1070 nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
1071
1072 err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
1073 if (err) 1092 if (err)
1074 goto free_stats; 1093 goto free_stats;
1075 1094
1076 vi->rvq = vqs[0];
1077 vi->svq = vqs[1];
1078
1079 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
1080 vi->cvq = vqs[2];
1081
1082 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1083 dev->features |= NETIF_F_HW_VLAN_FILTER;
1084 }
1085
1086 err = register_netdev(dev); 1095 err = register_netdev(dev);
1087 if (err) { 1096 if (err) {
1088 pr_debug("virtio_net: registering device failed\n"); 1097 pr_debug("virtio_net: registering device failed\n");
@@ -1144,27 +1153,73 @@ static void free_unused_bufs(struct virtnet_info *vi)
1144 BUG_ON(vi->num != 0); 1153 BUG_ON(vi->num != 0);
1145} 1154}
1146 1155
1147static void __devexit virtnet_remove(struct virtio_device *vdev) 1156static void remove_vq_common(struct virtnet_info *vi)
1148{ 1157{
1149 struct virtnet_info *vi = vdev->priv; 1158 vi->vdev->config->reset(vi->vdev);
1150
1151 /* Stop all the virtqueues. */
1152 vdev->config->reset(vdev);
1153
1154 unregister_netdev(vi->dev);
1155 1159
1156 /* Free unused buffers in both send and recv, if any. */ 1160 /* Free unused buffers in both send and recv, if any. */
1157 free_unused_bufs(vi); 1161 free_unused_bufs(vi);
1158 1162
1159 vdev->config->del_vqs(vi->vdev); 1163 vi->vdev->config->del_vqs(vi->vdev);
1160 1164
1161 while (vi->pages) 1165 while (vi->pages)
1162 __free_pages(get_a_page(vi, GFP_KERNEL), 0); 1166 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
1167}
1168
1169static void __devexit virtnet_remove(struct virtio_device *vdev)
1170{
1171 struct virtnet_info *vi = vdev->priv;
1172
1173 unregister_netdev(vi->dev);
1174
1175 remove_vq_common(vi);
1163 1176
1164 free_percpu(vi->stats); 1177 free_percpu(vi->stats);
1165 free_netdev(vi->dev); 1178 free_netdev(vi->dev);
1166} 1179}
1167 1180
1181#ifdef CONFIG_PM
1182static int virtnet_freeze(struct virtio_device *vdev)
1183{
1184 struct virtnet_info *vi = vdev->priv;
1185
1186 virtqueue_disable_cb(vi->rvq);
1187 virtqueue_disable_cb(vi->svq);
1188 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
1189 virtqueue_disable_cb(vi->cvq);
1190
1191 netif_device_detach(vi->dev);
1192 cancel_delayed_work_sync(&vi->refill);
1193
1194 if (netif_running(vi->dev))
1195 napi_disable(&vi->napi);
1196
1197 remove_vq_common(vi);
1198
1199 return 0;
1200}
1201
1202static int virtnet_restore(struct virtio_device *vdev)
1203{
1204 struct virtnet_info *vi = vdev->priv;
1205 int err;
1206
1207 err = init_vqs(vi);
1208 if (err)
1209 return err;
1210
1211 if (netif_running(vi->dev))
1212 virtnet_napi_enable(vi);
1213
1214 netif_device_attach(vi->dev);
1215
1216 if (!try_fill_recv(vi, GFP_KERNEL))
1217 queue_delayed_work(system_nrt_wq, &vi->refill, 0);
1218
1219 return 0;
1220}
1221#endif
1222
1168static struct virtio_device_id id_table[] = { 1223static struct virtio_device_id id_table[] = {
1169 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 1224 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
1170 { 0 }, 1225 { 0 },
@@ -1189,6 +1244,10 @@ static struct virtio_driver virtio_net_driver = {
1189 .probe = virtnet_probe, 1244 .probe = virtnet_probe,
1190 .remove = __devexit_p(virtnet_remove), 1245 .remove = __devexit_p(virtnet_remove),
1191 .config_changed = virtnet_config_changed, 1246 .config_changed = virtnet_config_changed,
1247#ifdef CONFIG_PM
1248 .freeze = virtnet_freeze,
1249 .restore = virtnet_restore,
1250#endif
1192}; 1251};
1193 1252
1194static int __init init(void) 1253static int __init init(void)
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 8af868bab20b..7bc1955337ea 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -198,7 +198,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
198 goto out; 198 goto out;
199 199
200 vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN, 200 vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN,
201 vdev, (void *) config->address, 201 vdev, true, (void *) config->address,
202 kvm_notify, callback, name); 202 kvm_notify, callback, name);
203 if (!vq) { 203 if (!vq) {
204 err = -ENOMEM; 204 err = -ENOMEM;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 94fd738a7741..95aeedf198f8 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -1,4 +1,5 @@
1/* Virtio balloon implementation, inspired by Dor Loar and Marcelo 1/*
2 * Virtio balloon implementation, inspired by Dor Laor and Marcelo
2 * Tosatti's implementations. 3 * Tosatti's implementations.
3 * 4 *
4 * Copyright 2008 Rusty Russell IBM Corporation 5 * Copyright 2008 Rusty Russell IBM Corporation
@@ -17,7 +18,7 @@
17 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */ 20 */
20//#define DEBUG 21
21#include <linux/virtio.h> 22#include <linux/virtio.h>
22#include <linux/virtio_balloon.h> 23#include <linux/virtio_balloon.h>
23#include <linux/swap.h> 24#include <linux/swap.h>
@@ -87,7 +88,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
87 init_completion(&vb->acked); 88 init_completion(&vb->acked);
88 89
89 /* We should always be able to add one buffer to an empty queue. */ 90 /* We should always be able to add one buffer to an empty queue. */
90 if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0) 91 if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
91 BUG(); 92 BUG();
92 virtqueue_kick(vq); 93 virtqueue_kick(vq);
93 94
@@ -149,7 +150,6 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
149 vb->num_pages--; 150 vb->num_pages--;
150 } 151 }
151 152
152
153 /* 153 /*
154 * Note that if 154 * Note that if
155 * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST); 155 * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
@@ -220,7 +220,7 @@ static void stats_handle_request(struct virtio_balloon *vb)
220 220
221 vq = vb->stats_vq; 221 vq = vb->stats_vq;
222 sg_init_one(&sg, vb->stats, sizeof(vb->stats)); 222 sg_init_one(&sg, vb->stats, sizeof(vb->stats));
223 if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0) 223 if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
224 BUG(); 224 BUG();
225 virtqueue_kick(vq); 225 virtqueue_kick(vq);
226} 226}
@@ -275,32 +275,21 @@ static int balloon(void *_vballoon)
275 return 0; 275 return 0;
276} 276}
277 277
278static int virtballoon_probe(struct virtio_device *vdev) 278static int init_vqs(struct virtio_balloon *vb)
279{ 279{
280 struct virtio_balloon *vb;
281 struct virtqueue *vqs[3]; 280 struct virtqueue *vqs[3];
282 vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request }; 281 vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
283 const char *names[] = { "inflate", "deflate", "stats" }; 282 const char *names[] = { "inflate", "deflate", "stats" };
284 int err, nvqs; 283 int err, nvqs;
285 284
286 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); 285 /*
287 if (!vb) { 286 * We expect two virtqueues: inflate and deflate, and
288 err = -ENOMEM; 287 * optionally stat.
289 goto out; 288 */
290 }
291
292 INIT_LIST_HEAD(&vb->pages);
293 vb->num_pages = 0;
294 init_waitqueue_head(&vb->config_change);
295 vb->vdev = vdev;
296 vb->need_stats_update = 0;
297
298 /* We expect two virtqueues: inflate and deflate,
299 * and optionally stat. */
300 nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2; 289 nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
301 err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names); 290 err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names);
302 if (err) 291 if (err)
303 goto out_free_vb; 292 return err;
304 293
305 vb->inflate_vq = vqs[0]; 294 vb->inflate_vq = vqs[0];
306 vb->deflate_vq = vqs[1]; 295 vb->deflate_vq = vqs[1];
@@ -313,10 +302,34 @@ static int virtballoon_probe(struct virtio_device *vdev)
313 * use it to signal us later. 302 * use it to signal us later.
314 */ 303 */
315 sg_init_one(&sg, vb->stats, sizeof vb->stats); 304 sg_init_one(&sg, vb->stats, sizeof vb->stats);
316 if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb) < 0) 305 if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb, GFP_KERNEL)
306 < 0)
317 BUG(); 307 BUG();
318 virtqueue_kick(vb->stats_vq); 308 virtqueue_kick(vb->stats_vq);
319 } 309 }
310 return 0;
311}
312
313static int virtballoon_probe(struct virtio_device *vdev)
314{
315 struct virtio_balloon *vb;
316 int err;
317
318 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
319 if (!vb) {
320 err = -ENOMEM;
321 goto out;
322 }
323
324 INIT_LIST_HEAD(&vb->pages);
325 vb->num_pages = 0;
326 init_waitqueue_head(&vb->config_change);
327 vb->vdev = vdev;
328 vb->need_stats_update = 0;
329
330 err = init_vqs(vb);
331 if (err)
332 goto out_free_vb;
320 333
321 vb->thread = kthread_run(balloon, vb, "vballoon"); 334 vb->thread = kthread_run(balloon, vb, "vballoon");
322 if (IS_ERR(vb->thread)) { 335 if (IS_ERR(vb->thread)) {
@@ -351,6 +364,48 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev)
351 kfree(vb); 364 kfree(vb);
352} 365}
353 366
367#ifdef CONFIG_PM
368static int virtballoon_freeze(struct virtio_device *vdev)
369{
370 /*
371 * The kthread is already frozen by the PM core before this
372 * function is called.
373 */
374
375 /* Ensure we don't get any more requests from the host */
376 vdev->config->reset(vdev);
377 vdev->config->del_vqs(vdev);
378 return 0;
379}
380
381static int virtballoon_thaw(struct virtio_device *vdev)
382{
383 return init_vqs(vdev->priv);
384}
385
386static int virtballoon_restore(struct virtio_device *vdev)
387{
388 struct virtio_balloon *vb = vdev->priv;
389 struct page *page, *page2;
390
391 /* We're starting from a clean slate */
392 vb->num_pages = 0;
393
394 /*
395 * If a request wasn't complete at the time of freezing, this
396 * could have been set.
397 */
398 vb->need_stats_update = 0;
399
400 /* We don't have these pages in the balloon anymore! */
401 list_for_each_entry_safe(page, page2, &vb->pages, lru) {
402 list_del(&page->lru);
403 totalram_pages++;
404 }
405 return init_vqs(vdev->priv);
406}
407#endif
408
354static unsigned int features[] = { 409static unsigned int features[] = {
355 VIRTIO_BALLOON_F_MUST_TELL_HOST, 410 VIRTIO_BALLOON_F_MUST_TELL_HOST,
356 VIRTIO_BALLOON_F_STATS_VQ, 411 VIRTIO_BALLOON_F_STATS_VQ,
@@ -365,6 +420,11 @@ static struct virtio_driver virtio_balloon_driver = {
365 .probe = virtballoon_probe, 420 .probe = virtballoon_probe,
366 .remove = __devexit_p(virtballoon_remove), 421 .remove = __devexit_p(virtballoon_remove),
367 .config_changed = virtballoon_changed, 422 .config_changed = virtballoon_changed,
423#ifdef CONFIG_PM
424 .freeze = virtballoon_freeze,
425 .restore = virtballoon_restore,
426 .thaw = virtballoon_thaw,
427#endif
368}; 428};
369 429
370static int __init init(void) 430static int __init init(void)
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 0269717436af..01d6dc250d5c 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -310,8 +310,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
310 vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); 310 vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
311 311
312 /* Create the vring */ 312 /* Create the vring */
313 vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN, 313 vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN, vdev,
314 vdev, info->queue, vm_notify, callback, name); 314 true, info->queue, vm_notify, callback, name);
315 if (!vq) { 315 if (!vq) {
316 err = -ENOMEM; 316 err = -ENOMEM;
317 goto error_new_virtqueue; 317 goto error_new_virtqueue;
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index baabb7937ec2..635e1efb3792 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -55,6 +55,10 @@ struct virtio_pci_device
55 unsigned msix_vectors; 55 unsigned msix_vectors;
56 /* Vectors allocated, excluding per-vq vectors if any */ 56 /* Vectors allocated, excluding per-vq vectors if any */
57 unsigned msix_used_vectors; 57 unsigned msix_used_vectors;
58
59 /* Status saved during hibernate/restore */
60 u8 saved_status;
61
58 /* Whether we have vector per vq */ 62 /* Whether we have vector per vq */
59 bool per_vq_vectors; 63 bool per_vq_vectors;
60}; 64};
@@ -414,8 +418,8 @@ static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index,
414 vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); 418 vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
415 419
416 /* create the vring */ 420 /* create the vring */
417 vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, 421 vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, vdev,
418 vdev, info->queue, vp_notify, callback, name); 422 true, info->queue, vp_notify, callback, name);
419 if (!vq) { 423 if (!vq) {
420 err = -ENOMEM; 424 err = -ENOMEM;
421 goto out_activate_queue; 425 goto out_activate_queue;
@@ -716,19 +720,114 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
716} 720}
717 721
718#ifdef CONFIG_PM 722#ifdef CONFIG_PM
719static int virtio_pci_suspend(struct pci_dev *pci_dev, pm_message_t state) 723static int virtio_pci_suspend(struct device *dev)
720{ 724{
725 struct pci_dev *pci_dev = to_pci_dev(dev);
726
721 pci_save_state(pci_dev); 727 pci_save_state(pci_dev);
722 pci_set_power_state(pci_dev, PCI_D3hot); 728 pci_set_power_state(pci_dev, PCI_D3hot);
723 return 0; 729 return 0;
724} 730}
725 731
726static int virtio_pci_resume(struct pci_dev *pci_dev) 732static int virtio_pci_resume(struct device *dev)
727{ 733{
734 struct pci_dev *pci_dev = to_pci_dev(dev);
735
728 pci_restore_state(pci_dev); 736 pci_restore_state(pci_dev);
729 pci_set_power_state(pci_dev, PCI_D0); 737 pci_set_power_state(pci_dev, PCI_D0);
730 return 0; 738 return 0;
731} 739}
740
741static int virtio_pci_freeze(struct device *dev)
742{
743 struct pci_dev *pci_dev = to_pci_dev(dev);
744 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
745 struct virtio_driver *drv;
746 int ret;
747
748 drv = container_of(vp_dev->vdev.dev.driver,
749 struct virtio_driver, driver);
750
751 ret = 0;
752 vp_dev->saved_status = vp_get_status(&vp_dev->vdev);
753 if (drv && drv->freeze)
754 ret = drv->freeze(&vp_dev->vdev);
755
756 if (!ret)
757 pci_disable_device(pci_dev);
758 return ret;
759}
760
761static int restore_common(struct device *dev)
762{
763 struct pci_dev *pci_dev = to_pci_dev(dev);
764 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
765 int ret;
766
767 ret = pci_enable_device(pci_dev);
768 if (ret)
769 return ret;
770 pci_set_master(pci_dev);
771 vp_finalize_features(&vp_dev->vdev);
772
773 return ret;
774}
775
776static int virtio_pci_thaw(struct device *dev)
777{
778 struct pci_dev *pci_dev = to_pci_dev(dev);
779 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
780 struct virtio_driver *drv;
781 int ret;
782
783 ret = restore_common(dev);
784 if (ret)
785 return ret;
786
787 drv = container_of(vp_dev->vdev.dev.driver,
788 struct virtio_driver, driver);
789
790 if (drv && drv->thaw)
791 ret = drv->thaw(&vp_dev->vdev);
792 else if (drv && drv->restore)
793 ret = drv->restore(&vp_dev->vdev);
794
795 /* Finally, tell the device we're all set */
796 if (!ret)
797 vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
798
799 return ret;
800}
801
802static int virtio_pci_restore(struct device *dev)
803{
804 struct pci_dev *pci_dev = to_pci_dev(dev);
805 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
806 struct virtio_driver *drv;
807 int ret;
808
809 drv = container_of(vp_dev->vdev.dev.driver,
810 struct virtio_driver, driver);
811
812 ret = restore_common(dev);
813 if (!ret && drv && drv->restore)
814 ret = drv->restore(&vp_dev->vdev);
815
816 /* Finally, tell the device we're all set */
817 if (!ret)
818 vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
819
820 return ret;
821}
822
823static const struct dev_pm_ops virtio_pci_pm_ops = {
824 .suspend = virtio_pci_suspend,
825 .resume = virtio_pci_resume,
826 .freeze = virtio_pci_freeze,
827 .thaw = virtio_pci_thaw,
828 .restore = virtio_pci_restore,
829 .poweroff = virtio_pci_suspend,
830};
732#endif 831#endif
733 832
734static struct pci_driver virtio_pci_driver = { 833static struct pci_driver virtio_pci_driver = {
@@ -737,8 +836,7 @@ static struct pci_driver virtio_pci_driver = {
737 .probe = virtio_pci_probe, 836 .probe = virtio_pci_probe,
738 .remove = __devexit_p(virtio_pci_remove), 837 .remove = __devexit_p(virtio_pci_remove),
739#ifdef CONFIG_PM 838#ifdef CONFIG_PM
740 .suspend = virtio_pci_suspend, 839 .driver.pm = &virtio_pci_pm_ops,
741 .resume = virtio_pci_resume,
742#endif 840#endif
743}; 841};
744 842
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index c7a2c208f6ea..79e1b292c030 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -22,23 +22,27 @@
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/hrtimer.h>
25 26
26/* virtio guest is communicating with a virtual "device" that actually runs on 27/* virtio guest is communicating with a virtual "device" that actually runs on
27 * a host processor. Memory barriers are used to control SMP effects. */ 28 * a host processor. Memory barriers are used to control SMP effects. */
28#ifdef CONFIG_SMP 29#ifdef CONFIG_SMP
29/* Where possible, use SMP barriers which are more lightweight than mandatory 30/* Where possible, use SMP barriers which are more lightweight than mandatory
30 * barriers, because mandatory barriers control MMIO effects on accesses 31 * barriers, because mandatory barriers control MMIO effects on accesses
31 * through relaxed memory I/O windows (which virtio does not use). */ 32 * through relaxed memory I/O windows (which virtio-pci does not use). */
32#define virtio_mb() smp_mb() 33#define virtio_mb(vq) \
33#define virtio_rmb() smp_rmb() 34 do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
34#define virtio_wmb() smp_wmb() 35#define virtio_rmb(vq) \
36 do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
37#define virtio_wmb(vq) \
38 do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
35#else 39#else
36/* We must force memory ordering even if guest is UP since host could be 40/* We must force memory ordering even if guest is UP since host could be
37 * running on another CPU, but SMP barriers are defined to barrier() in that 41 * running on another CPU, but SMP barriers are defined to barrier() in that
38 * configuration. So fall back to mandatory barriers instead. */ 42 * configuration. So fall back to mandatory barriers instead. */
39#define virtio_mb() mb() 43#define virtio_mb(vq) mb()
40#define virtio_rmb() rmb() 44#define virtio_rmb(vq) rmb()
41#define virtio_wmb() wmb() 45#define virtio_wmb(vq) wmb()
42#endif 46#endif
43 47
44#ifdef DEBUG 48#ifdef DEBUG
@@ -77,6 +81,9 @@ struct vring_virtqueue
77 /* Actual memory layout for this queue */ 81 /* Actual memory layout for this queue */
78 struct vring vring; 82 struct vring vring;
79 83
84 /* Can we use weak barriers? */
85 bool weak_barriers;
86
80 /* Other side has made a mess, don't try any more. */ 87 /* Other side has made a mess, don't try any more. */
81 bool broken; 88 bool broken;
82 89
@@ -102,6 +109,10 @@ struct vring_virtqueue
102#ifdef DEBUG 109#ifdef DEBUG
103 /* They're supposed to lock for us. */ 110 /* They're supposed to lock for us. */
104 unsigned int in_use; 111 unsigned int in_use;
112
113 /* Figure out if their kicks are too delayed. */
114 bool last_add_time_valid;
115 ktime_t last_add_time;
105#endif 116#endif
106 117
107 /* Tokens for callbacks. */ 118 /* Tokens for callbacks. */
@@ -160,12 +171,29 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
160 return head; 171 return head;
161} 172}
162 173
163int virtqueue_add_buf_gfp(struct virtqueue *_vq, 174/**
164 struct scatterlist sg[], 175 * virtqueue_add_buf - expose buffer to other end
165 unsigned int out, 176 * @vq: the struct virtqueue we're talking about.
166 unsigned int in, 177 * @sg: the description of the buffer(s).
167 void *data, 178 * @out_num: the number of sg readable by other side
168 gfp_t gfp) 179 * @in_num: the number of sg which are writable (after readable ones)
180 * @data: the token identifying the buffer.
181 * @gfp: how to do memory allocations (if necessary).
182 *
183 * Caller must ensure we don't call this with other virtqueue operations
184 * at the same time (except where noted).
185 *
186 * Returns remaining capacity of queue or a negative error
187 * (ie. ENOSPC). Note that it only really makes sense to treat all
188 * positive return values as "available": indirect buffers mean that
189 * we can put an entire sg[] array inside a single queue entry.
190 */
191int virtqueue_add_buf(struct virtqueue *_vq,
192 struct scatterlist sg[],
193 unsigned int out,
194 unsigned int in,
195 void *data,
196 gfp_t gfp)
169{ 197{
170 struct vring_virtqueue *vq = to_vvq(_vq); 198 struct vring_virtqueue *vq = to_vvq(_vq);
171 unsigned int i, avail, uninitialized_var(prev); 199 unsigned int i, avail, uninitialized_var(prev);
@@ -175,6 +203,19 @@ int virtqueue_add_buf_gfp(struct virtqueue *_vq,
175 203
176 BUG_ON(data == NULL); 204 BUG_ON(data == NULL);
177 205
206#ifdef DEBUG
207 {
208 ktime_t now = ktime_get();
209
210 /* No kick or get, with .1 second between? Warn. */
211 if (vq->last_add_time_valid)
212 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
213 > 100);
214 vq->last_add_time = now;
215 vq->last_add_time_valid = true;
216 }
217#endif
218
178 /* If the host supports indirect descriptor tables, and we have multiple 219 /* If the host supports indirect descriptor tables, and we have multiple
179 * buffers, then go indirect. FIXME: tune this threshold */ 220 * buffers, then go indirect. FIXME: tune this threshold */
180 if (vq->indirect && (out + in) > 1 && vq->num_free) { 221 if (vq->indirect && (out + in) > 1 && vq->num_free) {
@@ -227,40 +268,102 @@ add_head:
227 vq->data[head] = data; 268 vq->data[head] = data;
228 269
229 /* Put entry in available array (but don't update avail->idx until they 270 /* Put entry in available array (but don't update avail->idx until they
230 * do sync). FIXME: avoid modulus here? */ 271 * do sync). */
231 avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num; 272 avail = (vq->vring.avail->idx & (vq->vring.num-1));
232 vq->vring.avail->ring[avail] = head; 273 vq->vring.avail->ring[avail] = head;
233 274
275 /* Descriptors and available array need to be set before we expose the
276 * new available array entries. */
277 virtio_wmb(vq);
278 vq->vring.avail->idx++;
279 vq->num_added++;
280
281 /* This is very unlikely, but theoretically possible. Kick
282 * just in case. */
283 if (unlikely(vq->num_added == (1 << 16) - 1))
284 virtqueue_kick(_vq);
285
234 pr_debug("Added buffer head %i to %p\n", head, vq); 286 pr_debug("Added buffer head %i to %p\n", head, vq);
235 END_USE(vq); 287 END_USE(vq);
236 288
237 return vq->num_free; 289 return vq->num_free;
238} 290}
239EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp); 291EXPORT_SYMBOL_GPL(virtqueue_add_buf);
240 292
241void virtqueue_kick(struct virtqueue *_vq) 293/**
294 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
295 * @vq: the struct virtqueue
296 *
297 * Instead of virtqueue_kick(), you can do:
298 * if (virtqueue_kick_prepare(vq))
299 * virtqueue_notify(vq);
300 *
301 * This is sometimes useful because the virtqueue_kick_prepare() needs
302 * to be serialized, but the actual virtqueue_notify() call does not.
303 */
304bool virtqueue_kick_prepare(struct virtqueue *_vq)
242{ 305{
243 struct vring_virtqueue *vq = to_vvq(_vq); 306 struct vring_virtqueue *vq = to_vvq(_vq);
244 u16 new, old; 307 u16 new, old;
308 bool needs_kick;
309
245 START_USE(vq); 310 START_USE(vq);
246 /* Descriptors and available array need to be set before we expose the 311 /* Descriptors and available array need to be set before we expose the
247 * new available array entries. */ 312 * new available array entries. */
248 virtio_wmb(); 313 virtio_wmb(vq);
249 314
250 old = vq->vring.avail->idx; 315 old = vq->vring.avail->idx - vq->num_added;
251 new = vq->vring.avail->idx = old + vq->num_added; 316 new = vq->vring.avail->idx;
252 vq->num_added = 0; 317 vq->num_added = 0;
253 318
254 /* Need to update avail index before checking if we should notify */ 319#ifdef DEBUG
255 virtio_mb(); 320 if (vq->last_add_time_valid) {
256 321 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
257 if (vq->event ? 322 vq->last_add_time)) > 100);
258 vring_need_event(vring_avail_event(&vq->vring), new, old) : 323 }
259 !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) 324 vq->last_add_time_valid = false;
260 /* Prod other side to tell it about changes. */ 325#endif
261 vq->notify(&vq->vq);
262 326
327 if (vq->event) {
328 needs_kick = vring_need_event(vring_avail_event(&vq->vring),
329 new, old);
330 } else {
331 needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
332 }
263 END_USE(vq); 333 END_USE(vq);
334 return needs_kick;
335}
336EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
337
338/**
339 * virtqueue_notify - second half of split virtqueue_kick call.
340 * @vq: the struct virtqueue
341 *
342 * This does not need to be serialized.
343 */
344void virtqueue_notify(struct virtqueue *_vq)
345{
346 struct vring_virtqueue *vq = to_vvq(_vq);
347
348 /* Prod other side to tell it about changes. */
349 vq->notify(_vq);
350}
351EXPORT_SYMBOL_GPL(virtqueue_notify);
352
353/**
354 * virtqueue_kick - update after add_buf
355 * @vq: the struct virtqueue
356 *
357 * After one or more virtqueue_add_buf calls, invoke this to kick
358 * the other side.
359 *
360 * Caller must ensure we don't call this with other virtqueue
361 * operations at the same time (except where noted).
362 */
363void virtqueue_kick(struct virtqueue *vq)
364{
365 if (virtqueue_kick_prepare(vq))
366 virtqueue_notify(vq);
264} 367}
265EXPORT_SYMBOL_GPL(virtqueue_kick); 368EXPORT_SYMBOL_GPL(virtqueue_kick);
266 369
@@ -294,11 +397,28 @@ static inline bool more_used(const struct vring_virtqueue *vq)
294 return vq->last_used_idx != vq->vring.used->idx; 397 return vq->last_used_idx != vq->vring.used->idx;
295} 398}
296 399
400/**
401 * virtqueue_get_buf - get the next used buffer
402 * @vq: the struct virtqueue we're talking about.
403 * @len: the length written into the buffer
404 *
405 * If the driver wrote data into the buffer, @len will be set to the
406 * amount written. This means you don't need to clear the buffer
407 * beforehand to ensure there's no data leakage in the case of short
408 * writes.
409 *
410 * Caller must ensure we don't call this with other virtqueue
411 * operations at the same time (except where noted).
412 *
413 * Returns NULL if there are no used buffers, or the "data" token
414 * handed to virtqueue_add_buf().
415 */
297void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 416void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
298{ 417{
299 struct vring_virtqueue *vq = to_vvq(_vq); 418 struct vring_virtqueue *vq = to_vvq(_vq);
300 void *ret; 419 void *ret;
301 unsigned int i; 420 unsigned int i;
421 u16 last_used;
302 422
303 START_USE(vq); 423 START_USE(vq);
304 424
@@ -314,10 +434,11 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
314 } 434 }
315 435
316 /* Only get used array entries after they have been exposed by host. */ 436 /* Only get used array entries after they have been exposed by host. */
317 virtio_rmb(); 437 virtio_rmb(vq);
318 438
319 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; 439 last_used = (vq->last_used_idx & (vq->vring.num - 1));
320 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; 440 i = vq->vring.used->ring[last_used].id;
441 *len = vq->vring.used->ring[last_used].len;
321 442
322 if (unlikely(i >= vq->vring.num)) { 443 if (unlikely(i >= vq->vring.num)) {
323 BAD_RING(vq, "id %u out of range\n", i); 444 BAD_RING(vq, "id %u out of range\n", i);
@@ -337,14 +458,27 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
337 * the read in the next get_buf call. */ 458 * the read in the next get_buf call. */
338 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { 459 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
339 vring_used_event(&vq->vring) = vq->last_used_idx; 460 vring_used_event(&vq->vring) = vq->last_used_idx;
340 virtio_mb(); 461 virtio_mb(vq);
341 } 462 }
342 463
464#ifdef DEBUG
465 vq->last_add_time_valid = false;
466#endif
467
343 END_USE(vq); 468 END_USE(vq);
344 return ret; 469 return ret;
345} 470}
346EXPORT_SYMBOL_GPL(virtqueue_get_buf); 471EXPORT_SYMBOL_GPL(virtqueue_get_buf);
347 472
473/**
474 * virtqueue_disable_cb - disable callbacks
475 * @vq: the struct virtqueue we're talking about.
476 *
477 * Note that this is not necessarily synchronous, hence unreliable and only
478 * useful as an optimization.
479 *
480 * Unlike other operations, this need not be serialized.
481 */
348void virtqueue_disable_cb(struct virtqueue *_vq) 482void virtqueue_disable_cb(struct virtqueue *_vq)
349{ 483{
350 struct vring_virtqueue *vq = to_vvq(_vq); 484 struct vring_virtqueue *vq = to_vvq(_vq);
@@ -353,6 +487,17 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
353} 487}
354EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 488EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
355 489
490/**
491 * virtqueue_enable_cb - restart callbacks after disable_cb.
492 * @vq: the struct virtqueue we're talking about.
493 *
494 * This re-enables callbacks; it returns "false" if there are pending
495 * buffers in the queue, to detect a possible race between the driver
496 * checking for more work, and enabling callbacks.
497 *
498 * Caller must ensure we don't call this with other virtqueue
499 * operations at the same time (except where noted).
500 */
356bool virtqueue_enable_cb(struct virtqueue *_vq) 501bool virtqueue_enable_cb(struct virtqueue *_vq)
357{ 502{
358 struct vring_virtqueue *vq = to_vvq(_vq); 503 struct vring_virtqueue *vq = to_vvq(_vq);
@@ -366,7 +511,7 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
366 * entry. Always do both to keep code simple. */ 511 * entry. Always do both to keep code simple. */
367 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 512 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
368 vring_used_event(&vq->vring) = vq->last_used_idx; 513 vring_used_event(&vq->vring) = vq->last_used_idx;
369 virtio_mb(); 514 virtio_mb(vq);
370 if (unlikely(more_used(vq))) { 515 if (unlikely(more_used(vq))) {
371 END_USE(vq); 516 END_USE(vq);
372 return false; 517 return false;
@@ -377,6 +522,19 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
377} 522}
378EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 523EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
379 524
525/**
526 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
527 * @vq: the struct virtqueue we're talking about.
528 *
529 * This re-enables callbacks but hints to the other side to delay
530 * interrupts until most of the available buffers have been processed;
531 * it returns "false" if there are many pending buffers in the queue,
532 * to detect a possible race between the driver checking for more work,
533 * and enabling callbacks.
534 *
535 * Caller must ensure we don't call this with other virtqueue
536 * operations at the same time (except where noted).
537 */
380bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 538bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
381{ 539{
382 struct vring_virtqueue *vq = to_vvq(_vq); 540 struct vring_virtqueue *vq = to_vvq(_vq);
@@ -393,7 +551,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
393 /* TODO: tune this threshold */ 551 /* TODO: tune this threshold */
394 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; 552 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
395 vring_used_event(&vq->vring) = vq->last_used_idx + bufs; 553 vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
396 virtio_mb(); 554 virtio_mb(vq);
397 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { 555 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
398 END_USE(vq); 556 END_USE(vq);
399 return false; 557 return false;
@@ -404,6 +562,14 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
404} 562}
405EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 563EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
406 564
565/**
566 * virtqueue_detach_unused_buf - detach first unused buffer
567 * @vq: the struct virtqueue we're talking about.
568 *
569 * Returns NULL or the "data" token handed to virtqueue_add_buf().
570 * This is not valid on an active queue; it is useful only for device
571 * shutdown.
572 */
407void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 573void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
408{ 574{
409 struct vring_virtqueue *vq = to_vvq(_vq); 575 struct vring_virtqueue *vq = to_vvq(_vq);
@@ -453,6 +619,7 @@ EXPORT_SYMBOL_GPL(vring_interrupt);
453struct virtqueue *vring_new_virtqueue(unsigned int num, 619struct virtqueue *vring_new_virtqueue(unsigned int num,
454 unsigned int vring_align, 620 unsigned int vring_align,
455 struct virtio_device *vdev, 621 struct virtio_device *vdev,
622 bool weak_barriers,
456 void *pages, 623 void *pages,
457 void (*notify)(struct virtqueue *), 624 void (*notify)(struct virtqueue *),
458 void (*callback)(struct virtqueue *), 625 void (*callback)(struct virtqueue *),
@@ -476,12 +643,14 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
476 vq->vq.vdev = vdev; 643 vq->vq.vdev = vdev;
477 vq->vq.name = name; 644 vq->vq.name = name;
478 vq->notify = notify; 645 vq->notify = notify;
646 vq->weak_barriers = weak_barriers;
479 vq->broken = false; 647 vq->broken = false;
480 vq->last_used_idx = 0; 648 vq->last_used_idx = 0;
481 vq->num_added = 0; 649 vq->num_added = 0;
482 list_add_tail(&vq->vq.list, &vdev->vqs); 650 list_add_tail(&vq->vq.list, &vdev->vqs);
483#ifdef DEBUG 651#ifdef DEBUG
484 vq->in_use = false; 652 vq->in_use = false;
653 vq->last_add_time_valid = false;
485#endif 654#endif
486 655
487 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); 656 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
@@ -530,7 +699,13 @@ void vring_transport_features(struct virtio_device *vdev)
530} 699}
531EXPORT_SYMBOL_GPL(vring_transport_features); 700EXPORT_SYMBOL_GPL(vring_transport_features);
532 701
533/* return the size of the vring within the virtqueue */ 702/**
703 * virtqueue_get_vring_size - return the size of the virtqueue's vring
704 * @vq: the struct virtqueue containing the vring of interest.
705 *
706 * Returns the size of the vring. This is mainly used for boasting to
707 * userspace. Unlike other operations, this need not be serialized.
708 */
534unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) 709unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
535{ 710{
536 711