aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPawel Moll <pawel.moll@arm.com>2011-10-24 09:07:03 -0400
committerRusty Russell <rusty@rustcorp.com.au>2011-11-01 21:11:01 -0400
commitedfd52e6367270c90f3fd7cc302b375ffa89f91e (patch)
tree1a847022a6ee87524461ecd2427e7f23acab48ce /drivers
parent005b20a8e0f587a46a00910ba4507bb9f6da70ea (diff)
virtio: Add platform bus driver for memory mapped virtio device
This patch, based on virtio PCI driver, adds support for memory mapped (platform) virtio device. This should allow environments like qemu to use virtio-based block & network devices even on platforms without PCI support. One can define and register a platform device which resources will describe memory mapped control registers and "mailbox" interrupt. Such device can be also instantiated using the Device Tree node with compatible property equal "virtio,mmio". Cc: Anthony Liguori <aliguori@us.ibm.com> Cc: Michael S.Tsirkin <mst@redhat.com> Signed-off-by: Pawel Moll <pawel.moll@arm.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/virtio/Kconfig11
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio_mmio.c479
3 files changed, 491 insertions, 0 deletions
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 57e493b1bd20..816ed08e7cf3 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -35,4 +35,15 @@ config VIRTIO_BALLOON
35 35
36 If unsure, say M. 36 If unsure, say M.
37 37
38 config VIRTIO_MMIO
39 tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)"
40 depends on EXPERIMENTAL
41 select VIRTIO
42 select VIRTIO_RING
43 ---help---
44 This drivers provides support for memory mapped virtio
45 platform device driver.
46
47 If unsure, say N.
48
38endmenu 49endmenu
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 6738c446c199..5a4c63cfd380 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_VIRTIO) += virtio.o 1obj-$(CONFIG_VIRTIO) += virtio.o
2obj-$(CONFIG_VIRTIO_RING) += virtio_ring.o 2obj-$(CONFIG_VIRTIO_RING) += virtio_ring.o
3obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
3obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o 4obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
4obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o 5obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
new file mode 100644
index 000000000000..acc5e43c373e
--- /dev/null
+++ b/drivers/virtio/virtio_mmio.c
@@ -0,0 +1,479 @@
1/*
2 * Virtio memory mapped device driver
3 *
4 * Copyright 2011, ARM Ltd.
5 *
6 * This module allows virtio devices to be used over a virtual, memory mapped
7 * platform device.
8 *
9 * Registers layout (all 32-bit wide):
10 *
11 * offset d. name description
12 * ------ -- ---------------- -----------------
13 *
14 * 0x000 R MagicValue Magic value "virt"
15 * 0x004 R Version Device version (current max. 1)
16 * 0x008 R DeviceID Virtio device ID
17 * 0x00c R VendorID Virtio vendor ID
18 *
19 * 0x010 R HostFeatures Features supported by the host
20 * 0x014 W HostFeaturesSel Set of host features to access via HostFeatures
21 *
22 * 0x020 W GuestFeatures Features activated by the guest
23 * 0x024 W GuestFeaturesSel Set of activated features to set via GuestFeatures
24 * 0x028 W GuestPageSize Size of guest's memory page in bytes
25 *
26 * 0x030 W QueueSel Queue selector
27 * 0x034 R QueueNumMax Maximum size of the currently selected queue
28 * 0x038 W QueueNum Queue size for the currently selected queue
29 * 0x03c W QueueAlign Used Ring alignment for the current queue
30 * 0x040 RW QueuePFN PFN for the currently selected queue
31 *
32 * 0x050 W QueueNotify Queue notifier
33 * 0x060 R InterruptStatus Interrupt status register
34 * 0x060 W InterruptACK Interrupt acknowledge register
35 * 0x070 RW Status Device status register
36 *
37 * 0x100+ RW Device-specific configuration space
38 *
39 * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
40 *
41 * This work is licensed under the terms of the GNU GPL, version 2 or later.
42 * See the COPYING file in the top-level directory.
43 */
44
45#include <linux/highmem.h>
46#include <linux/interrupt.h>
47#include <linux/io.h>
48#include <linux/list.h>
49#include <linux/module.h>
50#include <linux/platform_device.h>
51#include <linux/slab.h>
52#include <linux/spinlock.h>
53#include <linux/virtio.h>
54#include <linux/virtio_config.h>
55#include <linux/virtio_mmio.h>
56#include <linux/virtio_ring.h>
57
58
59
60/* The alignment to use between consumer and producer parts of vring.
61 * Currently hardcoded to the page size. */
62#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
63
64
65
66#define to_virtio_mmio_device(_plat_dev) \
67 container_of(_plat_dev, struct virtio_mmio_device, vdev)
68
69struct virtio_mmio_device {
70 struct virtio_device vdev;
71 struct platform_device *pdev;
72
73 void __iomem *base;
74 unsigned long version;
75
76 /* a list of queues so we can dispatch IRQs */
77 spinlock_t lock;
78 struct list_head virtqueues;
79};
80
81struct virtio_mmio_vq_info {
82 /* the actual virtqueue */
83 struct virtqueue *vq;
84
85 /* the number of entries in the queue */
86 unsigned int num;
87
88 /* the index of the queue */
89 int queue_index;
90
91 /* the virtual address of the ring queue */
92 void *queue;
93
94 /* the list node for the virtqueues list */
95 struct list_head node;
96};
97
98
99
100/* Configuration interface */
101
102static u32 vm_get_features(struct virtio_device *vdev)
103{
104 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
105
106 /* TODO: Features > 32 bits */
107 writel(0, vm_dev->base + VIRTIO_MMIO_HOST_FEATURES_SEL);
108
109 return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES);
110}
111
112static void vm_finalize_features(struct virtio_device *vdev)
113{
114 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
115 int i;
116
117 /* Give virtio_ring a chance to accept features. */
118 vring_transport_features(vdev);
119
120 for (i = 0; i < ARRAY_SIZE(vdev->features); i++) {
121 writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SET);
122 writel(vdev->features[i],
123 vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
124 }
125}
126
127static void vm_get(struct virtio_device *vdev, unsigned offset,
128 void *buf, unsigned len)
129{
130 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
131 u8 *ptr = buf;
132 int i;
133
134 for (i = 0; i < len; i++)
135 ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i);
136}
137
138static void vm_set(struct virtio_device *vdev, unsigned offset,
139 const void *buf, unsigned len)
140{
141 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
142 const u8 *ptr = buf;
143 int i;
144
145 for (i = 0; i < len; i++)
146 writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i);
147}
148
149static u8 vm_get_status(struct virtio_device *vdev)
150{
151 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
152
153 return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff;
154}
155
156static void vm_set_status(struct virtio_device *vdev, u8 status)
157{
158 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
159
160 /* We should never be setting status to 0. */
161 BUG_ON(status == 0);
162
163 writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
164}
165
166static void vm_reset(struct virtio_device *vdev)
167{
168 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
169
170 /* 0 status means a reset. */
171 writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
172}
173
174
175
176/* Transport interface */
177
178/* the notify function used when creating a virt queue */
179static void vm_notify(struct virtqueue *vq)
180{
181 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
182 struct virtio_mmio_vq_info *info = vq->priv;
183
184 /* We write the queue's selector into the notification register to
185 * signal the other end */
186 writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
187}
188
189/* Notify all virtqueues on an interrupt. */
190static irqreturn_t vm_interrupt(int irq, void *opaque)
191{
192 struct virtio_mmio_device *vm_dev = opaque;
193 struct virtio_mmio_vq_info *info;
194 struct virtio_driver *vdrv = container_of(vm_dev->vdev.dev.driver,
195 struct virtio_driver, driver);
196 unsigned long status;
197 unsigned long flags;
198 irqreturn_t ret = IRQ_NONE;
199
200 /* Read and acknowledge interrupts */
201 status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
202 writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
203
204 if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)
205 && vdrv && vdrv->config_changed) {
206 vdrv->config_changed(&vm_dev->vdev);
207 ret = IRQ_HANDLED;
208 }
209
210 if (likely(status & VIRTIO_MMIO_INT_VRING)) {
211 spin_lock_irqsave(&vm_dev->lock, flags);
212 list_for_each_entry(info, &vm_dev->virtqueues, node)
213 ret |= vring_interrupt(irq, info->vq);
214 spin_unlock_irqrestore(&vm_dev->lock, flags);
215 }
216
217 return ret;
218}
219
220
221
222static void vm_del_vq(struct virtqueue *vq)
223{
224 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
225 struct virtio_mmio_vq_info *info = vq->priv;
226 unsigned long flags, size;
227
228 spin_lock_irqsave(&vm_dev->lock, flags);
229 list_del(&info->node);
230 spin_unlock_irqrestore(&vm_dev->lock, flags);
231
232 vring_del_virtqueue(vq);
233
234 /* Select and deactivate the queue */
235 writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
236 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
237
238 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN));
239 free_pages_exact(info->queue, size);
240 kfree(info);
241}
242
243static void vm_del_vqs(struct virtio_device *vdev)
244{
245 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
246 struct virtqueue *vq, *n;
247
248 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
249 vm_del_vq(vq);
250
251 free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
252}
253
254
255
256static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
257 void (*callback)(struct virtqueue *vq),
258 const char *name)
259{
260 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
261 struct virtio_mmio_vq_info *info;
262 struct virtqueue *vq;
263 unsigned long flags, size;
264 int err;
265
266 /* Select the queue we're interested in */
267 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
268
269 /* Queue shouldn't already be set up. */
270 if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) {
271 err = -ENOENT;
272 goto error_available;
273 }
274
275 /* Allocate and fill out our active queue description */
276 info = kmalloc(sizeof(*info), GFP_KERNEL);
277 if (!info) {
278 err = -ENOMEM;
279 goto error_kmalloc;
280 }
281 info->queue_index = index;
282
283 /* Allocate pages for the queue - start with a queue as big as
284 * possible (limited by maximum size allowed by device), drop down
285 * to a minimal size, just big enough to fit descriptor table
286 * and two rings (which makes it "alignment_size * 2")
287 */
288 info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
289 while (1) {
290 size = PAGE_ALIGN(vring_size(info->num,
291 VIRTIO_MMIO_VRING_ALIGN));
292 /* Already smallest possible allocation? */
293 if (size <= VIRTIO_MMIO_VRING_ALIGN * 2) {
294 err = -ENOMEM;
295 goto error_alloc_pages;
296 }
297
298 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
299 if (info->queue)
300 break;
301
302 info->num /= 2;
303 }
304
305 /* Activate the queue */
306 writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
307 writel(VIRTIO_MMIO_VRING_ALIGN,
308 vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
309 writel(virt_to_phys(info->queue) >> PAGE_SHIFT,
310 vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
311
312 /* Create the vring */
313 vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN,
314 vdev, info->queue, vm_notify, callback, name);
315 if (!vq) {
316 err = -ENOMEM;
317 goto error_new_virtqueue;
318 }
319
320 vq->priv = info;
321 info->vq = vq;
322
323 spin_lock_irqsave(&vm_dev->lock, flags);
324 list_add(&info->node, &vm_dev->virtqueues);
325 spin_unlock_irqrestore(&vm_dev->lock, flags);
326
327 return vq;
328
329error_new_virtqueue:
330 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
331 free_pages_exact(info->queue, size);
332error_alloc_pages:
333 kfree(info);
334error_kmalloc:
335error_available:
336 return ERR_PTR(err);
337}
338
339static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
340 struct virtqueue *vqs[],
341 vq_callback_t *callbacks[],
342 const char *names[])
343{
344 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
345 unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
346 int i, err;
347
348 err = request_irq(irq, vm_interrupt, IRQF_SHARED,
349 dev_name(&vdev->dev), vm_dev);
350 if (err)
351 return err;
352
353 for (i = 0; i < nvqs; ++i) {
354 vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]);
355 if (IS_ERR(vqs[i])) {
356 vm_del_vqs(vdev);
357 return PTR_ERR(vqs[i]);
358 }
359 }
360
361 return 0;
362}
363
364
365
366static struct virtio_config_ops virtio_mmio_config_ops = {
367 .get = vm_get,
368 .set = vm_set,
369 .get_status = vm_get_status,
370 .set_status = vm_set_status,
371 .reset = vm_reset,
372 .find_vqs = vm_find_vqs,
373 .del_vqs = vm_del_vqs,
374 .get_features = vm_get_features,
375 .finalize_features = vm_finalize_features,
376};
377
378
379
380/* Platform device */
381
382static int __devinit virtio_mmio_probe(struct platform_device *pdev)
383{
384 struct virtio_mmio_device *vm_dev;
385 struct resource *mem;
386 unsigned long magic;
387
388 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
389 if (!mem)
390 return -EINVAL;
391
392 if (!devm_request_mem_region(&pdev->dev, mem->start,
393 resource_size(mem), pdev->name))
394 return -EBUSY;
395
396 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
397 if (!vm_dev)
398 return -ENOMEM;
399
400 vm_dev->vdev.dev.parent = &pdev->dev;
401 vm_dev->vdev.config = &virtio_mmio_config_ops;
402 vm_dev->pdev = pdev;
403 INIT_LIST_HEAD(&vm_dev->virtqueues);
404 spin_lock_init(&vm_dev->lock);
405
406 vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
407 if (vm_dev->base == NULL)
408 return -EFAULT;
409
410 /* Check magic value */
411 magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
412 if (memcmp(&magic, "virt", 4) != 0) {
413 dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
414 return -ENODEV;
415 }
416
417 /* Check device version */
418 vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
419 if (vm_dev->version != 1) {
420 dev_err(&pdev->dev, "Version %ld not supported!\n",
421 vm_dev->version);
422 return -ENXIO;
423 }
424
425 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
426 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
427
428 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
429
430 platform_set_drvdata(pdev, vm_dev);
431
432 return register_virtio_device(&vm_dev->vdev);
433}
434
435static int __devexit virtio_mmio_remove(struct platform_device *pdev)
436{
437 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
438
439 unregister_virtio_device(&vm_dev->vdev);
440
441 return 0;
442}
443
444
445
446/* Platform driver */
447
448static struct of_device_id virtio_mmio_match[] = {
449 { .compatible = "virtio,mmio", },
450 {},
451};
452MODULE_DEVICE_TABLE(of, virtio_mmio_match);
453
454static struct platform_driver virtio_mmio_driver = {
455 .probe = virtio_mmio_probe,
456 .remove = __devexit_p(virtio_mmio_remove),
457 .driver = {
458 .name = "virtio-mmio",
459 .owner = THIS_MODULE,
460 .of_match_table = virtio_mmio_match,
461 },
462};
463
464static int __init virtio_mmio_init(void)
465{
466 return platform_driver_register(&virtio_mmio_driver);
467}
468
469static void __exit virtio_mmio_exit(void)
470{
471 platform_driver_unregister(&virtio_mmio_driver);
472}
473
474module_init(virtio_mmio_init);
475module_exit(virtio_mmio_exit);
476
477MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
478MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices");
479MODULE_LICENSE("GPL");