aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/virtio/mmio.txt17
-rw-r--r--drivers/block/virtio_blk.c30
-rw-r--r--drivers/char/virtio_console.c120
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/virtio/Kconfig11
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio_mmio.c479
-rw-r--r--drivers/virtio/virtio_pci.c10
-rw-r--r--include/linux/virtio.h4
-rw-r--r--include/linux/virtio_config.h3
-rw-r--r--include/linux/virtio_mmio.h111
-rw-r--r--include/linux/virtio_ring.h6
12 files changed, 742 insertions, 64 deletions
diff --git a/Documentation/devicetree/bindings/virtio/mmio.txt b/Documentation/devicetree/bindings/virtio/mmio.txt
new file mode 100644
index 000000000000..5069c1b8e193
--- /dev/null
+++ b/Documentation/devicetree/bindings/virtio/mmio.txt
@@ -0,0 +1,17 @@
1* virtio memory mapped device
2
3See http://ozlabs.org/~rusty/virtio-spec/ for more details.
4
5Required properties:
6
7- compatible: "virtio,mmio" compatibility string
8- reg: control registers base address and size including configuration space
9- interrupts: interrupt generated by the device
10
11Example:
12
13 virtio_block@3000 {
14 compatible = "virtio,mmio";
15 reg = <0x3000 0x100>;
16 interrupts = <41>;
17 }
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 079c08808d8a..e7a5750a93d9 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -8,10 +8,13 @@
8#include <linux/scatterlist.h> 8#include <linux/scatterlist.h>
9#include <linux/string_helpers.h> 9#include <linux/string_helpers.h>
10#include <scsi/scsi_cmnd.h> 10#include <scsi/scsi_cmnd.h>
11#include <linux/idr.h>
11 12
12#define PART_BITS 4 13#define PART_BITS 4
13 14
14static int major, index; 15static int major;
16static DEFINE_IDA(vd_index_ida);
17
15struct workqueue_struct *virtblk_wq; 18struct workqueue_struct *virtblk_wq;
16 19
17struct virtio_blk 20struct virtio_blk
@@ -35,6 +38,9 @@ struct virtio_blk
35 /* What host tells us, plus 2 for header & tailer. */ 38 /* What host tells us, plus 2 for header & tailer. */
36 unsigned int sg_elems; 39 unsigned int sg_elems;
37 40
41 /* Ida index - used to track minor number allocations. */
42 int index;
43
38 /* Scatterlist: can be too big for stack. */ 44 /* Scatterlist: can be too big for stack. */
39 struct scatterlist sg[/*sg_elems*/]; 45 struct scatterlist sg[/*sg_elems*/];
40}; 46};
@@ -276,6 +282,11 @@ static int index_to_minor(int index)
276 return index << PART_BITS; 282 return index << PART_BITS;
277} 283}
278 284
285static int minor_to_index(int minor)
286{
287 return minor >> PART_BITS;
288}
289
279static ssize_t virtblk_serial_show(struct device *dev, 290static ssize_t virtblk_serial_show(struct device *dev,
280 struct device_attribute *attr, char *buf) 291 struct device_attribute *attr, char *buf)
281{ 292{
@@ -341,14 +352,17 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
341{ 352{
342 struct virtio_blk *vblk; 353 struct virtio_blk *vblk;
343 struct request_queue *q; 354 struct request_queue *q;
344 int err; 355 int err, index;
345 u64 cap; 356 u64 cap;
346 u32 v, blk_size, sg_elems, opt_io_size; 357 u32 v, blk_size, sg_elems, opt_io_size;
347 u16 min_io_size; 358 u16 min_io_size;
348 u8 physical_block_exp, alignment_offset; 359 u8 physical_block_exp, alignment_offset;
349 360
350 if (index_to_minor(index) >= 1 << MINORBITS) 361 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
351 return -ENOSPC; 362 GFP_KERNEL);
363 if (err < 0)
364 goto out;
365 index = err;
352 366
353 /* We need to know how many segments before we allocate. */ 367 /* We need to know how many segments before we allocate. */
354 err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, 368 err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
@@ -365,7 +379,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
365 sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL); 379 sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
366 if (!vblk) { 380 if (!vblk) {
367 err = -ENOMEM; 381 err = -ENOMEM;
368 goto out; 382 goto out_free_index;
369 } 383 }
370 384
371 INIT_LIST_HEAD(&vblk->reqs); 385 INIT_LIST_HEAD(&vblk->reqs);
@@ -421,7 +435,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
421 vblk->disk->private_data = vblk; 435 vblk->disk->private_data = vblk;
422 vblk->disk->fops = &virtblk_fops; 436 vblk->disk->fops = &virtblk_fops;
423 vblk->disk->driverfs_dev = &vdev->dev; 437 vblk->disk->driverfs_dev = &vdev->dev;
424 index++; 438 vblk->index = index;
425 439
426 /* configure queue flush support */ 440 /* configure queue flush support */
427 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) 441 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
@@ -516,6 +530,8 @@ out_free_vq:
516 vdev->config->del_vqs(vdev); 530 vdev->config->del_vqs(vdev);
517out_free_vblk: 531out_free_vblk:
518 kfree(vblk); 532 kfree(vblk);
533out_free_index:
534 ida_simple_remove(&vd_index_ida, index);
519out: 535out:
520 return err; 536 return err;
521} 537}
@@ -523,6 +539,7 @@ out:
523static void __devexit virtblk_remove(struct virtio_device *vdev) 539static void __devexit virtblk_remove(struct virtio_device *vdev)
524{ 540{
525 struct virtio_blk *vblk = vdev->priv; 541 struct virtio_blk *vblk = vdev->priv;
542 int index = vblk->index;
526 543
527 flush_work(&vblk->config_work); 544 flush_work(&vblk->config_work);
528 545
@@ -538,6 +555,7 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
538 mempool_destroy(vblk->pool); 555 mempool_destroy(vblk->pool);
539 vdev->config->del_vqs(vdev); 556 vdev->config->del_vqs(vdev);
540 kfree(vblk); 557 kfree(vblk);
558 ida_simple_remove(&vd_index_ida, index);
541} 559}
542 560
543static const struct virtio_device_id id_table[] = { 561static const struct virtio_device_id id_table[] = {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index fb68b1295373..4ca181f1378b 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -19,8 +19,10 @@
19 */ 19 */
20#include <linux/cdev.h> 20#include <linux/cdev.h>
21#include <linux/debugfs.h> 21#include <linux/debugfs.h>
22#include <linux/completion.h>
22#include <linux/device.h> 23#include <linux/device.h>
23#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/freezer.h>
24#include <linux/fs.h> 26#include <linux/fs.h>
25#include <linux/init.h> 27#include <linux/init.h>
26#include <linux/list.h> 28#include <linux/list.h>
@@ -73,6 +75,7 @@ struct ports_driver_data {
73static struct ports_driver_data pdrvdata; 75static struct ports_driver_data pdrvdata;
74 76
75DEFINE_SPINLOCK(pdrvdata_lock); 77DEFINE_SPINLOCK(pdrvdata_lock);
78DECLARE_COMPLETION(early_console_added);
76 79
77/* This struct holds information that's relevant only for console ports */ 80/* This struct holds information that's relevant only for console ports */
78struct console { 81struct console {
@@ -151,6 +154,10 @@ struct ports_device {
151 int chr_major; 154 int chr_major;
152}; 155};
153 156
157struct port_stats {
158 unsigned long bytes_sent, bytes_received, bytes_discarded;
159};
160
154/* This struct holds the per-port data */ 161/* This struct holds the per-port data */
155struct port { 162struct port {
156 /* Next port in the list, head is in the ports_device */ 163 /* Next port in the list, head is in the ports_device */
@@ -179,6 +186,13 @@ struct port {
179 struct dentry *debugfs_file; 186 struct dentry *debugfs_file;
180 187
181 /* 188 /*
189 * Keep count of the bytes sent, received and discarded for
190 * this port for accounting and debugging purposes. These
191 * counts are not reset across port open / close events.
192 */
193 struct port_stats stats;
194
195 /*
182 * The entries in this struct will be valid if this port is 196 * The entries in this struct will be valid if this port is
183 * hooked up to an hvc console 197 * hooked up to an hvc console
184 */ 198 */
@@ -347,17 +361,19 @@ fail:
347} 361}
348 362
349/* Callers should take appropriate locks */ 363/* Callers should take appropriate locks */
350static void *get_inbuf(struct port *port) 364static struct port_buffer *get_inbuf(struct port *port)
351{ 365{
352 struct port_buffer *buf; 366 struct port_buffer *buf;
353 struct virtqueue *vq;
354 unsigned int len; 367 unsigned int len;
355 368
356 vq = port->in_vq; 369 if (port->inbuf)
357 buf = virtqueue_get_buf(vq, &len); 370 return port->inbuf;
371
372 buf = virtqueue_get_buf(port->in_vq, &len);
358 if (buf) { 373 if (buf) {
359 buf->len = len; 374 buf->len = len;
360 buf->offset = 0; 375 buf->offset = 0;
376 port->stats.bytes_received += len;
361 } 377 }
362 return buf; 378 return buf;
363} 379}
@@ -384,32 +400,27 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
384static void discard_port_data(struct port *port) 400static void discard_port_data(struct port *port)
385{ 401{
386 struct port_buffer *buf; 402 struct port_buffer *buf;
387 struct virtqueue *vq; 403 unsigned int err;
388 unsigned int len;
389 int ret;
390 404
391 if (!port->portdev) { 405 if (!port->portdev) {
392 /* Device has been unplugged. vqs are already gone. */ 406 /* Device has been unplugged. vqs are already gone. */
393 return; 407 return;
394 } 408 }
395 vq = port->in_vq; 409 buf = get_inbuf(port);
396 if (port->inbuf)
397 buf = port->inbuf;
398 else
399 buf = virtqueue_get_buf(vq, &len);
400 410
401 ret = 0; 411 err = 0;
402 while (buf) { 412 while (buf) {
403 if (add_inbuf(vq, buf) < 0) { 413 port->stats.bytes_discarded += buf->len - buf->offset;
404 ret++; 414 if (add_inbuf(port->in_vq, buf) < 0) {
415 err++;
405 free_buf(buf); 416 free_buf(buf);
406 } 417 }
407 buf = virtqueue_get_buf(vq, &len); 418 port->inbuf = NULL;
419 buf = get_inbuf(port);
408 } 420 }
409 port->inbuf = NULL; 421 if (err)
410 if (ret)
411 dev_warn(port->dev, "Errors adding %d buffers back to vq\n", 422 dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
412 ret); 423 err);
413} 424}
414 425
415static bool port_has_data(struct port *port) 426static bool port_has_data(struct port *port)
@@ -417,18 +428,12 @@ static bool port_has_data(struct port *port)
417 unsigned long flags; 428 unsigned long flags;
418 bool ret; 429 bool ret;
419 430
431 ret = false;
420 spin_lock_irqsave(&port->inbuf_lock, flags); 432 spin_lock_irqsave(&port->inbuf_lock, flags);
421 if (port->inbuf) {
422 ret = true;
423 goto out;
424 }
425 port->inbuf = get_inbuf(port); 433 port->inbuf = get_inbuf(port);
426 if (port->inbuf) { 434 if (port->inbuf)
427 ret = true; 435 ret = true;
428 goto out; 436
429 }
430 ret = false;
431out:
432 spin_unlock_irqrestore(&port->inbuf_lock, flags); 437 spin_unlock_irqrestore(&port->inbuf_lock, flags);
433 return ret; 438 return ret;
434} 439}
@@ -529,6 +534,8 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
529 cpu_relax(); 534 cpu_relax();
530done: 535done:
531 spin_unlock_irqrestore(&port->outvq_lock, flags); 536 spin_unlock_irqrestore(&port->outvq_lock, flags);
537
538 port->stats.bytes_sent += in_count;
532 /* 539 /*
533 * We're expected to return the amount of data we wrote -- all 540 * We're expected to return the amount of data we wrote -- all
534 * of it 541 * of it
@@ -633,8 +640,8 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
633 if (filp->f_flags & O_NONBLOCK) 640 if (filp->f_flags & O_NONBLOCK)
634 return -EAGAIN; 641 return -EAGAIN;
635 642
636 ret = wait_event_interruptible(port->waitqueue, 643 ret = wait_event_freezable(port->waitqueue,
637 !will_read_block(port)); 644 !will_read_block(port));
638 if (ret < 0) 645 if (ret < 0)
639 return ret; 646 return ret;
640 } 647 }
@@ -677,8 +684,8 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
677 if (nonblock) 684 if (nonblock)
678 return -EAGAIN; 685 return -EAGAIN;
679 686
680 ret = wait_event_interruptible(port->waitqueue, 687 ret = wait_event_freezable(port->waitqueue,
681 !will_write_block(port)); 688 !will_write_block(port));
682 if (ret < 0) 689 if (ret < 0)
683 return ret; 690 return ret;
684 } 691 }
@@ -1059,6 +1066,14 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
1059 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1066 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1060 "outvq_full: %d\n", port->outvq_full); 1067 "outvq_full: %d\n", port->outvq_full);
1061 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1068 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1069 "bytes_sent: %lu\n", port->stats.bytes_sent);
1070 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1071 "bytes_received: %lu\n",
1072 port->stats.bytes_received);
1073 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1074 "bytes_discarded: %lu\n",
1075 port->stats.bytes_discarded);
1076 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1062 "is_console: %s\n", 1077 "is_console: %s\n",
1063 is_console_port(port) ? "yes" : "no"); 1078 is_console_port(port) ? "yes" : "no");
1064 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1079 out_offset += snprintf(buf + out_offset, out_count - out_offset,
@@ -1143,6 +1158,7 @@ static int add_port(struct ports_device *portdev, u32 id)
1143 port->cons.ws.ws_row = port->cons.ws.ws_col = 0; 1158 port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
1144 1159
1145 port->host_connected = port->guest_connected = false; 1160 port->host_connected = port->guest_connected = false;
1161 port->stats = (struct port_stats) { 0 };
1146 1162
1147 port->outvq_full = false; 1163 port->outvq_full = false;
1148 1164
@@ -1352,6 +1368,7 @@ static void handle_control_message(struct ports_device *portdev,
1352 break; 1368 break;
1353 1369
1354 init_port_console(port); 1370 init_port_console(port);
1371 complete(&early_console_added);
1355 /* 1372 /*
1356 * Could remove the port here in case init fails - but 1373 * Could remove the port here in case init fails - but
1357 * have to notify the host first. 1374 * have to notify the host first.
@@ -1394,6 +1411,13 @@ static void handle_control_message(struct ports_device *portdev,
1394 break; 1411 break;
1395 case VIRTIO_CONSOLE_PORT_NAME: 1412 case VIRTIO_CONSOLE_PORT_NAME:
1396 /* 1413 /*
1414 * If we woke up after hibernation, we can get this
1415 * again. Skip it in that case.
1416 */
1417 if (port->name)
1418 break;
1419
1420 /*
1397 * Skip the size of the header and the cpkt to get the size 1421 * Skip the size of the header and the cpkt to get the size
1398 * of the name that was sent 1422 * of the name that was sent
1399 */ 1423 */
@@ -1481,8 +1505,7 @@ static void in_intr(struct virtqueue *vq)
1481 return; 1505 return;
1482 1506
1483 spin_lock_irqsave(&port->inbuf_lock, flags); 1507 spin_lock_irqsave(&port->inbuf_lock, flags);
1484 if (!port->inbuf) 1508 port->inbuf = get_inbuf(port);
1485 port->inbuf = get_inbuf(port);
1486 1509
1487 /* 1510 /*
1488 * Don't queue up data when port is closed. This condition 1511 * Don't queue up data when port is closed. This condition
@@ -1563,7 +1586,7 @@ static int init_vqs(struct ports_device *portdev)
1563 portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), 1586 portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1564 GFP_KERNEL); 1587 GFP_KERNEL);
1565 if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || 1588 if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
1566 !portdev->out_vqs) { 1589 !portdev->out_vqs) {
1567 err = -ENOMEM; 1590 err = -ENOMEM;
1568 goto free; 1591 goto free;
1569 } 1592 }
@@ -1648,6 +1671,10 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
1648 struct ports_device *portdev; 1671 struct ports_device *portdev;
1649 int err; 1672 int err;
1650 bool multiport; 1673 bool multiport;
1674 bool early = early_put_chars != NULL;
1675
1676 /* Ensure to read early_put_chars now */
1677 barrier();
1651 1678
1652 portdev = kmalloc(sizeof(*portdev), GFP_KERNEL); 1679 portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
1653 if (!portdev) { 1680 if (!portdev) {
@@ -1675,13 +1702,11 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
1675 1702
1676 multiport = false; 1703 multiport = false;
1677 portdev->config.max_nr_ports = 1; 1704 portdev->config.max_nr_ports = 1;
1678 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { 1705 if (virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
1706 offsetof(struct virtio_console_config,
1707 max_nr_ports),
1708 &portdev->config.max_nr_ports) == 0)
1679 multiport = true; 1709 multiport = true;
1680 vdev->config->get(vdev, offsetof(struct virtio_console_config,
1681 max_nr_ports),
1682 &portdev->config.max_nr_ports,
1683 sizeof(portdev->config.max_nr_ports));
1684 }
1685 1710
1686 err = init_vqs(portdev); 1711 err = init_vqs(portdev);
1687 if (err < 0) { 1712 if (err < 0) {
@@ -1719,6 +1744,19 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
1719 1744
1720 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, 1745 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
1721 VIRTIO_CONSOLE_DEVICE_READY, 1); 1746 VIRTIO_CONSOLE_DEVICE_READY, 1);
1747
1748 /*
1749 * If there was an early virtio console, assume that there are no
1750 * other consoles. We need to wait until the hvc_alloc matches the
1751 * hvc_instantiate, otherwise tty_open will complain, resulting in
1752 * a "Warning: unable to open an initial console" boot failure.
1753 * Without multiport this is done in add_port above. With multiport
1754 * this might take some host<->guest communication - thus we have to
1755 * wait.
1756 */
1757 if (multiport && early)
1758 wait_for_completion(&early_console_added);
1759
1722 return 0; 1760 return 0;
1723 1761
1724free_vqs: 1762free_vqs:
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 91039ab16728..6ee8410443c4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -925,12 +925,10 @@ static void virtnet_update_status(struct virtnet_info *vi)
925{ 925{
926 u16 v; 926 u16 v;
927 927
928 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) 928 if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
929 return;
930
931 vi->vdev->config->get(vi->vdev,
932 offsetof(struct virtio_net_config, status), 929 offsetof(struct virtio_net_config, status),
933 &v, sizeof(v)); 930 &v) < 0)
931 return;
934 932
935 /* Ignore unknown (future) status bits */ 933 /* Ignore unknown (future) status bits */
936 v &= VIRTIO_NET_S_LINK_UP; 934 v &= VIRTIO_NET_S_LINK_UP;
@@ -1006,11 +1004,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1006 } 1004 }
1007 1005
1008 /* Configuration may specify what MAC to use. Otherwise random. */ 1006 /* Configuration may specify what MAC to use. Otherwise random. */
1009 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 1007 if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
1010 vdev->config->get(vdev,
1011 offsetof(struct virtio_net_config, mac), 1008 offsetof(struct virtio_net_config, mac),
1012 dev->dev_addr, dev->addr_len); 1009 dev->dev_addr, dev->addr_len) < 0)
1013 } else
1014 random_ether_addr(dev->dev_addr); 1010 random_ether_addr(dev->dev_addr);
1015 1011
1016 /* Set up our device-specific information */ 1012 /* Set up our device-specific information */
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 57e493b1bd20..816ed08e7cf3 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -35,4 +35,15 @@ config VIRTIO_BALLOON
35 35
36 If unsure, say M. 36 If unsure, say M.
37 37
38 config VIRTIO_MMIO
39 tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)"
40 depends on EXPERIMENTAL
41 select VIRTIO
42 select VIRTIO_RING
43 ---help---
44 This drivers provides support for memory mapped virtio
45 platform device driver.
46
47 If unsure, say N.
48
38endmenu 49endmenu
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 6738c446c199..5a4c63cfd380 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_VIRTIO) += virtio.o 1obj-$(CONFIG_VIRTIO) += virtio.o
2obj-$(CONFIG_VIRTIO_RING) += virtio_ring.o 2obj-$(CONFIG_VIRTIO_RING) += virtio_ring.o
3obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
3obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o 4obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
4obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o 5obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
new file mode 100644
index 000000000000..acc5e43c373e
--- /dev/null
+++ b/drivers/virtio/virtio_mmio.c
@@ -0,0 +1,479 @@
1/*
2 * Virtio memory mapped device driver
3 *
4 * Copyright 2011, ARM Ltd.
5 *
6 * This module allows virtio devices to be used over a virtual, memory mapped
7 * platform device.
8 *
9 * Registers layout (all 32-bit wide):
10 *
11 * offset d. name description
12 * ------ -- ---------------- -----------------
13 *
14 * 0x000 R MagicValue Magic value "virt"
15 * 0x004 R Version Device version (current max. 1)
16 * 0x008 R DeviceID Virtio device ID
17 * 0x00c R VendorID Virtio vendor ID
18 *
19 * 0x010 R HostFeatures Features supported by the host
20 * 0x014 W HostFeaturesSel Set of host features to access via HostFeatures
21 *
22 * 0x020 W GuestFeatures Features activated by the guest
23 * 0x024 W GuestFeaturesSel Set of activated features to set via GuestFeatures
24 * 0x028 W GuestPageSize Size of guest's memory page in bytes
25 *
26 * 0x030 W QueueSel Queue selector
27 * 0x034 R QueueNumMax Maximum size of the currently selected queue
28 * 0x038 W QueueNum Queue size for the currently selected queue
29 * 0x03c W QueueAlign Used Ring alignment for the current queue
30 * 0x040 RW QueuePFN PFN for the currently selected queue
31 *
32 * 0x050 W QueueNotify Queue notifier
33 * 0x060 R InterruptStatus Interrupt status register
34 * 0x060 W InterruptACK Interrupt acknowledge register
35 * 0x070 RW Status Device status register
36 *
37 * 0x100+ RW Device-specific configuration space
38 *
39 * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
40 *
41 * This work is licensed under the terms of the GNU GPL, version 2 or later.
42 * See the COPYING file in the top-level directory.
43 */
44
45#include <linux/highmem.h>
46#include <linux/interrupt.h>
47#include <linux/io.h>
48#include <linux/list.h>
49#include <linux/module.h>
50#include <linux/platform_device.h>
51#include <linux/slab.h>
52#include <linux/spinlock.h>
53#include <linux/virtio.h>
54#include <linux/virtio_config.h>
55#include <linux/virtio_mmio.h>
56#include <linux/virtio_ring.h>
57
58
59
60/* The alignment to use between consumer and producer parts of vring.
61 * Currently hardcoded to the page size. */
62#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
63
64
65
66#define to_virtio_mmio_device(_plat_dev) \
67 container_of(_plat_dev, struct virtio_mmio_device, vdev)
68
69struct virtio_mmio_device {
70 struct virtio_device vdev;
71 struct platform_device *pdev;
72
73 void __iomem *base;
74 unsigned long version;
75
76 /* a list of queues so we can dispatch IRQs */
77 spinlock_t lock;
78 struct list_head virtqueues;
79};
80
81struct virtio_mmio_vq_info {
82 /* the actual virtqueue */
83 struct virtqueue *vq;
84
85 /* the number of entries in the queue */
86 unsigned int num;
87
88 /* the index of the queue */
89 int queue_index;
90
91 /* the virtual address of the ring queue */
92 void *queue;
93
94 /* the list node for the virtqueues list */
95 struct list_head node;
96};
97
98
99
100/* Configuration interface */
101
102static u32 vm_get_features(struct virtio_device *vdev)
103{
104 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
105
106 /* TODO: Features > 32 bits */
107 writel(0, vm_dev->base + VIRTIO_MMIO_HOST_FEATURES_SEL);
108
109 return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES);
110}
111
112static void vm_finalize_features(struct virtio_device *vdev)
113{
114 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
115 int i;
116
117 /* Give virtio_ring a chance to accept features. */
118 vring_transport_features(vdev);
119
120 for (i = 0; i < ARRAY_SIZE(vdev->features); i++) {
121 writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SET);
122 writel(vdev->features[i],
123 vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
124 }
125}
126
127static void vm_get(struct virtio_device *vdev, unsigned offset,
128 void *buf, unsigned len)
129{
130 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
131 u8 *ptr = buf;
132 int i;
133
134 for (i = 0; i < len; i++)
135 ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i);
136}
137
138static void vm_set(struct virtio_device *vdev, unsigned offset,
139 const void *buf, unsigned len)
140{
141 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
142 const u8 *ptr = buf;
143 int i;
144
145 for (i = 0; i < len; i++)
146 writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i);
147}
148
149static u8 vm_get_status(struct virtio_device *vdev)
150{
151 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
152
153 return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff;
154}
155
156static void vm_set_status(struct virtio_device *vdev, u8 status)
157{
158 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
159
160 /* We should never be setting status to 0. */
161 BUG_ON(status == 0);
162
163 writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
164}
165
166static void vm_reset(struct virtio_device *vdev)
167{
168 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
169
170 /* 0 status means a reset. */
171 writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
172}
173
174
175
176/* Transport interface */
177
178/* the notify function used when creating a virt queue */
179static void vm_notify(struct virtqueue *vq)
180{
181 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
182 struct virtio_mmio_vq_info *info = vq->priv;
183
184 /* We write the queue's selector into the notification register to
185 * signal the other end */
186 writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
187}
188
189/* Notify all virtqueues on an interrupt. */
190static irqreturn_t vm_interrupt(int irq, void *opaque)
191{
192 struct virtio_mmio_device *vm_dev = opaque;
193 struct virtio_mmio_vq_info *info;
194 struct virtio_driver *vdrv = container_of(vm_dev->vdev.dev.driver,
195 struct virtio_driver, driver);
196 unsigned long status;
197 unsigned long flags;
198 irqreturn_t ret = IRQ_NONE;
199
200 /* Read and acknowledge interrupts */
201 status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
202 writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
203
204 if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)
205 && vdrv && vdrv->config_changed) {
206 vdrv->config_changed(&vm_dev->vdev);
207 ret = IRQ_HANDLED;
208 }
209
210 if (likely(status & VIRTIO_MMIO_INT_VRING)) {
211 spin_lock_irqsave(&vm_dev->lock, flags);
212 list_for_each_entry(info, &vm_dev->virtqueues, node)
213 ret |= vring_interrupt(irq, info->vq);
214 spin_unlock_irqrestore(&vm_dev->lock, flags);
215 }
216
217 return ret;
218}
219
220
221
222static void vm_del_vq(struct virtqueue *vq)
223{
224 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
225 struct virtio_mmio_vq_info *info = vq->priv;
226 unsigned long flags, size;
227
228 spin_lock_irqsave(&vm_dev->lock, flags);
229 list_del(&info->node);
230 spin_unlock_irqrestore(&vm_dev->lock, flags);
231
232 vring_del_virtqueue(vq);
233
234 /* Select and deactivate the queue */
235 writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
236 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
237
238 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN));
239 free_pages_exact(info->queue, size);
240 kfree(info);
241}
242
243static void vm_del_vqs(struct virtio_device *vdev)
244{
245 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
246 struct virtqueue *vq, *n;
247
248 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
249 vm_del_vq(vq);
250
251 free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
252}
253
254
255
256static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
257 void (*callback)(struct virtqueue *vq),
258 const char *name)
259{
260 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
261 struct virtio_mmio_vq_info *info;
262 struct virtqueue *vq;
263 unsigned long flags, size;
264 int err;
265
266 /* Select the queue we're interested in */
267 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
268
269 /* Queue shouldn't already be set up. */
270 if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) {
271 err = -ENOENT;
272 goto error_available;
273 }
274
275 /* Allocate and fill out our active queue description */
276 info = kmalloc(sizeof(*info), GFP_KERNEL);
277 if (!info) {
278 err = -ENOMEM;
279 goto error_kmalloc;
280 }
281 info->queue_index = index;
282
283 /* Allocate pages for the queue - start with a queue as big as
284 * possible (limited by maximum size allowed by device), drop down
285 * to a minimal size, just big enough to fit descriptor table
286 * and two rings (which makes it "alignment_size * 2")
287 */
288 info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
289 while (1) {
290 size = PAGE_ALIGN(vring_size(info->num,
291 VIRTIO_MMIO_VRING_ALIGN));
292 /* Already smallest possible allocation? */
293 if (size <= VIRTIO_MMIO_VRING_ALIGN * 2) {
294 err = -ENOMEM;
295 goto error_alloc_pages;
296 }
297
298 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
299 if (info->queue)
300 break;
301
302 info->num /= 2;
303 }
304
305 /* Activate the queue */
306 writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
307 writel(VIRTIO_MMIO_VRING_ALIGN,
308 vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
309 writel(virt_to_phys(info->queue) >> PAGE_SHIFT,
310 vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
311
312 /* Create the vring */
313 vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN,
314 vdev, info->queue, vm_notify, callback, name);
315 if (!vq) {
316 err = -ENOMEM;
317 goto error_new_virtqueue;
318 }
319
320 vq->priv = info;
321 info->vq = vq;
322
323 spin_lock_irqsave(&vm_dev->lock, flags);
324 list_add(&info->node, &vm_dev->virtqueues);
325 spin_unlock_irqrestore(&vm_dev->lock, flags);
326
327 return vq;
328
329error_new_virtqueue:
330 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
331 free_pages_exact(info->queue, size);
332error_alloc_pages:
333 kfree(info);
334error_kmalloc:
335error_available:
336 return ERR_PTR(err);
337}
338
339static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
340 struct virtqueue *vqs[],
341 vq_callback_t *callbacks[],
342 const char *names[])
343{
344 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
345 unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
346 int i, err;
347
348 err = request_irq(irq, vm_interrupt, IRQF_SHARED,
349 dev_name(&vdev->dev), vm_dev);
350 if (err)
351 return err;
352
353 for (i = 0; i < nvqs; ++i) {
354 vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]);
355 if (IS_ERR(vqs[i])) {
356 vm_del_vqs(vdev);
357 return PTR_ERR(vqs[i]);
358 }
359 }
360
361 return 0;
362}
363
364
365
366static struct virtio_config_ops virtio_mmio_config_ops = {
367 .get = vm_get,
368 .set = vm_set,
369 .get_status = vm_get_status,
370 .set_status = vm_set_status,
371 .reset = vm_reset,
372 .find_vqs = vm_find_vqs,
373 .del_vqs = vm_del_vqs,
374 .get_features = vm_get_features,
375 .finalize_features = vm_finalize_features,
376};
377
378
379
380/* Platform device */
381
382static int __devinit virtio_mmio_probe(struct platform_device *pdev)
383{
384 struct virtio_mmio_device *vm_dev;
385 struct resource *mem;
386 unsigned long magic;
387
388 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
389 if (!mem)
390 return -EINVAL;
391
392 if (!devm_request_mem_region(&pdev->dev, mem->start,
393 resource_size(mem), pdev->name))
394 return -EBUSY;
395
396 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
397 if (!vm_dev)
398 return -ENOMEM;
399
400 vm_dev->vdev.dev.parent = &pdev->dev;
401 vm_dev->vdev.config = &virtio_mmio_config_ops;
402 vm_dev->pdev = pdev;
403 INIT_LIST_HEAD(&vm_dev->virtqueues);
404 spin_lock_init(&vm_dev->lock);
405
406 vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
407 if (vm_dev->base == NULL)
408 return -EFAULT;
409
410 /* Check magic value */
411 magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
412 if (memcmp(&magic, "virt", 4) != 0) {
413 dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
414 return -ENODEV;
415 }
416
417 /* Check device version */
418 vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
419 if (vm_dev->version != 1) {
420 dev_err(&pdev->dev, "Version %ld not supported!\n",
421 vm_dev->version);
422 return -ENXIO;
423 }
424
425 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
426 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
427
428 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
429
430 platform_set_drvdata(pdev, vm_dev);
431
432 return register_virtio_device(&vm_dev->vdev);
433}
434
435static int __devexit virtio_mmio_remove(struct platform_device *pdev)
436{
437 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
438
439 unregister_virtio_device(&vm_dev->vdev);
440
441 return 0;
442}
443
444
445
446/* Platform driver */
447
448static struct of_device_id virtio_mmio_match[] = {
449 { .compatible = "virtio,mmio", },
450 {},
451};
452MODULE_DEVICE_TABLE(of, virtio_mmio_match);
453
454static struct platform_driver virtio_mmio_driver = {
455 .probe = virtio_mmio_probe,
456 .remove = __devexit_p(virtio_mmio_remove),
457 .driver = {
458 .name = "virtio-mmio",
459 .owner = THIS_MODULE,
460 .of_match_table = virtio_mmio_match,
461 },
462};
463
464static int __init virtio_mmio_init(void)
465{
466 return platform_driver_register(&virtio_mmio_driver);
467}
468
469static void __exit virtio_mmio_exit(void)
470{
471 platform_driver_unregister(&virtio_mmio_driver);
472}
473
474module_init(virtio_mmio_init);
475module_exit(virtio_mmio_exit);
476
477MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
478MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices");
479MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 4bcc8b82640b..79a31e5b4b68 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -415,9 +415,13 @@ static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index,
415 } 415 }
416 } 416 }
417 417
418 spin_lock_irqsave(&vp_dev->lock, flags); 418 if (callback) {
419 list_add(&info->node, &vp_dev->virtqueues); 419 spin_lock_irqsave(&vp_dev->lock, flags);
420 spin_unlock_irqrestore(&vp_dev->lock, flags); 420 list_add(&info->node, &vp_dev->virtqueues);
421 spin_unlock_irqrestore(&vp_dev->lock, flags);
422 } else {
423 INIT_LIST_HEAD(&info->node);
424 }
421 425
422 return vq; 426 return vq;
423 427
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 851ebf1a4476..4c069d8bd740 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -131,10 +131,10 @@ void unregister_virtio_device(struct virtio_device *dev);
131 * virtio_driver - operations for a virtio I/O driver 131 * virtio_driver - operations for a virtio I/O driver
132 * @driver: underlying device driver (populate name and owner). 132 * @driver: underlying device driver (populate name and owner).
133 * @id_table: the ids serviced by this driver. 133 * @id_table: the ids serviced by this driver.
134 * @feature_table: an array of feature numbers supported by this device. 134 * @feature_table: an array of feature numbers supported by this driver.
135 * @feature_table_size: number of entries in the feature table array. 135 * @feature_table_size: number of entries in the feature table array.
136 * @probe: the function to call when a device is found. Returns 0 or -errno. 136 * @probe: the function to call when a device is found. Returns 0 or -errno.
137 * @remove: the function when a device is removed. 137 * @remove: the function to call when a device is removed.
138 * @config_changed: optional function to call when the device configuration 138 * @config_changed: optional function to call when the device configuration
139 * changes; may be called in interrupt context. 139 * changes; may be called in interrupt context.
140 */ 140 */
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 39c88c5ad19d..add4790b21fe 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -155,6 +155,9 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
155#define virtio_config_val(vdev, fbit, offset, v) \ 155#define virtio_config_val(vdev, fbit, offset, v) \
156 virtio_config_buf((vdev), (fbit), (offset), (v), sizeof(*v)) 156 virtio_config_buf((vdev), (fbit), (offset), (v), sizeof(*v))
157 157
158#define virtio_config_val_len(vdev, fbit, offset, v, len) \
159 virtio_config_buf((vdev), (fbit), (offset), (v), (len))
160
158static inline int virtio_config_buf(struct virtio_device *vdev, 161static inline int virtio_config_buf(struct virtio_device *vdev,
159 unsigned int fbit, 162 unsigned int fbit,
160 unsigned int offset, 163 unsigned int offset,
diff --git a/include/linux/virtio_mmio.h b/include/linux/virtio_mmio.h
new file mode 100644
index 000000000000..27c7edefbc86
--- /dev/null
+++ b/include/linux/virtio_mmio.h
@@ -0,0 +1,111 @@
1/*
2 * Virtio platform device driver
3 *
4 * Copyright 2011, ARM Ltd.
5 *
6 * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
7 *
8 * This header is BSD licensed so anyone can use the definitions to implement
9 * compatible drivers/servers.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of IBM nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#ifndef _LINUX_VIRTIO_MMIO_H
36#define _LINUX_VIRTIO_MMIO_H
37
38/*
39 * Control registers
40 */
41
42/* Magic value ("virt" string) - Read Only */
43#define VIRTIO_MMIO_MAGIC_VALUE 0x000
44
45/* Virtio device version - Read Only */
46#define VIRTIO_MMIO_VERSION 0x004
47
48/* Virtio device ID - Read Only */
49#define VIRTIO_MMIO_DEVICE_ID 0x008
50
51/* Virtio vendor ID - Read Only */
52#define VIRTIO_MMIO_VENDOR_ID 0x00c
53
54/* Bitmask of the features supported by the host
55 * (32 bits per set) - Read Only */
56#define VIRTIO_MMIO_HOST_FEATURES 0x010
57
58/* Host features set selector - Write Only */
59#define VIRTIO_MMIO_HOST_FEATURES_SEL 0x014
60
61/* Bitmask of features activated by the guest
62 * (32 bits per set) - Write Only */
63#define VIRTIO_MMIO_GUEST_FEATURES 0x020
64
65/* Activated features set selector - Write Only */
66#define VIRTIO_MMIO_GUEST_FEATURES_SET 0x024
67
68/* Guest's memory page size in bytes - Write Only */
69#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028
70
71/* Queue selector - Write Only */
72#define VIRTIO_MMIO_QUEUE_SEL 0x030
73
74/* Maximum size of the currently selected queue - Read Only */
75#define VIRTIO_MMIO_QUEUE_NUM_MAX 0x034
76
77/* Queue size for the currently selected queue - Write Only */
78#define VIRTIO_MMIO_QUEUE_NUM 0x038
79
80/* Used Ring alignment for the currently selected queue - Write Only */
81#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c
82
83/* Guest's PFN for the currently selected queue - Read Write */
84#define VIRTIO_MMIO_QUEUE_PFN 0x040
85
86/* Queue notifier - Write Only */
87#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050
88
89/* Interrupt status - Read Only */
90#define VIRTIO_MMIO_INTERRUPT_STATUS 0x060
91
92/* Interrupt acknowledge - Write Only */
93#define VIRTIO_MMIO_INTERRUPT_ACK 0x064
94
95/* Device status register - Read Write */
96#define VIRTIO_MMIO_STATUS 0x070
97
98/* The config space is defined by each driver as
99 * the per-driver configuration space - Read Write */
100#define VIRTIO_MMIO_CONFIG 0x100
101
102
103
104/*
105 * Interrupt flags (re: interrupt status & acknowledge registers)
106 */
107
108#define VIRTIO_MMIO_INT_VRING (1 << 0)
109#define VIRTIO_MMIO_INT_CONFIG (1 << 1)
110
111#endif
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 4a32cb6da425..36be0f6e18a9 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -135,13 +135,13 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
135 vr->num = num; 135 vr->num = num;
136 vr->desc = p; 136 vr->desc = p;
137 vr->avail = p + num*sizeof(struct vring_desc); 137 vr->avail = p + num*sizeof(struct vring_desc);
138 vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + align-1) 138 vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__u16)
139 & ~(align - 1)); 139 + align-1) & ~(align - 1));
140} 140}
141 141
142static inline unsigned vring_size(unsigned int num, unsigned long align) 142static inline unsigned vring_size(unsigned int num, unsigned long align)
143{ 143{
144 return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num) 144 return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
145 + align - 1) & ~(align - 1)) 145 + align - 1) & ~(align - 1))
146 + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num; 146 + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
147} 147}