diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2014-12-07 11:41:16 -0500 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2014-12-09 14:42:04 -0500 |
commit | 38eb4a29a77fa1bdb67f4534bfe93716a64e85ad (patch) | |
tree | 0ed4027f2488431360026196b42557f7451d42c3 | |
parent | 6f8f23d63d57c8d93d699fc015eba9bf144479e9 (diff) |
virtio_pci: split out legacy device support
Move everything dealing with legacy devices out to virtio_pci_legacy.c.
Expose common code APIs in virtio_pci.h
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-rw-r--r-- | drivers/virtio/virtio_pci.c | 420 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci.h | 133 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci_legacy.c | 323 |
3 files changed, 468 insertions, 408 deletions
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 4de3cbc0746d..d73ceecaf1c3 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
@@ -14,179 +14,10 @@ | |||
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/module.h> | 17 | #include "virtio_pci_legacy.c" |
18 | #include <linux/list.h> | ||
19 | #include <linux/pci.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/virtio.h> | ||
23 | #include <linux/virtio_config.h> | ||
24 | #include <linux/virtio_ring.h> | ||
25 | #include <linux/virtio_pci.h> | ||
26 | #include <linux/highmem.h> | ||
27 | #include <linux/spinlock.h> | ||
28 | |||
29 | MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>"); | ||
30 | MODULE_DESCRIPTION("virtio-pci"); | ||
31 | MODULE_LICENSE("GPL"); | ||
32 | MODULE_VERSION("1"); | ||
33 | |||
34 | struct virtio_pci_vq_info { | ||
35 | /* the actual virtqueue */ | ||
36 | struct virtqueue *vq; | ||
37 | |||
38 | /* the number of entries in the queue */ | ||
39 | int num; | ||
40 | |||
41 | /* the virtual address of the ring queue */ | ||
42 | void *queue; | ||
43 | |||
44 | /* the list node for the virtqueues list */ | ||
45 | struct list_head node; | ||
46 | |||
47 | /* MSI-X vector (or none) */ | ||
48 | unsigned msix_vector; | ||
49 | }; | ||
50 | |||
51 | /* Our device structure */ | ||
52 | struct virtio_pci_device { | ||
53 | struct virtio_device vdev; | ||
54 | struct pci_dev *pci_dev; | ||
55 | |||
56 | /* the IO mapping for the PCI config space */ | ||
57 | void __iomem *ioaddr; | ||
58 | |||
59 | /* the IO mapping for ISR operation */ | ||
60 | void __iomem *isr; | ||
61 | |||
62 | /* a list of queues so we can dispatch IRQs */ | ||
63 | spinlock_t lock; | ||
64 | struct list_head virtqueues; | ||
65 | |||
66 | /* array of all queues for house-keeping */ | ||
67 | struct virtio_pci_vq_info **vqs; | ||
68 | |||
69 | /* MSI-X support */ | ||
70 | int msix_enabled; | ||
71 | int intx_enabled; | ||
72 | struct msix_entry *msix_entries; | ||
73 | cpumask_var_t *msix_affinity_masks; | ||
74 | /* Name strings for interrupts. This size should be enough, | ||
75 | * and I'm too lazy to allocate each name separately. */ | ||
76 | char (*msix_names)[256]; | ||
77 | /* Number of available vectors */ | ||
78 | unsigned msix_vectors; | ||
79 | /* Vectors allocated, excluding per-vq vectors if any */ | ||
80 | unsigned msix_used_vectors; | ||
81 | |||
82 | /* Whether we have vector per vq */ | ||
83 | bool per_vq_vectors; | ||
84 | |||
85 | struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, | ||
86 | struct virtio_pci_vq_info *info, | ||
87 | unsigned idx, | ||
88 | void (*callback)(struct virtqueue *vq), | ||
89 | const char *name, | ||
90 | u16 msix_vec); | ||
91 | void (*del_vq)(struct virtio_pci_vq_info *info); | ||
92 | u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); | ||
93 | }; | ||
94 | |||
95 | /* Constants for MSI-X */ | ||
96 | /* Use first vector for configuration changes, second and the rest for | ||
97 | * virtqueues Thus, we need at least 2 vectors for MSI. */ | ||
98 | enum { | ||
99 | VP_MSIX_CONFIG_VECTOR = 0, | ||
100 | VP_MSIX_VQ_VECTOR = 1, | ||
101 | }; | ||
102 | |||
103 | /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ | ||
104 | static const struct pci_device_id virtio_pci_id_table[] = { | ||
105 | { PCI_DEVICE(0x1af4, PCI_ANY_ID) }, | ||
106 | { 0 } | ||
107 | }; | ||
108 | |||
109 | MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); | ||
110 | |||
111 | /* Convert a generic virtio device to our structure */ | ||
112 | static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) | ||
113 | { | ||
114 | return container_of(vdev, struct virtio_pci_device, vdev); | ||
115 | } | ||
116 | |||
117 | /* virtio config->get_features() implementation */ | ||
118 | static u64 vp_get_features(struct virtio_device *vdev) | ||
119 | { | ||
120 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
121 | |||
122 | /* When someone needs more than 32 feature bits, we'll need to | ||
123 | * steal a bit to indicate that the rest are somewhere else. */ | ||
124 | return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); | ||
125 | } | ||
126 | |||
127 | /* virtio config->finalize_features() implementation */ | ||
128 | static int vp_finalize_features(struct virtio_device *vdev) | ||
129 | { | ||
130 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
131 | |||
132 | /* Give virtio_ring a chance to accept features. */ | ||
133 | vring_transport_features(vdev); | ||
134 | |||
135 | /* Make sure we don't have any features > 32 bits! */ | ||
136 | BUG_ON((u32)vdev->features != vdev->features); | ||
137 | |||
138 | /* We only support 32 feature bits. */ | ||
139 | iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES); | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | /* virtio config->get() implementation */ | ||
145 | static void vp_get(struct virtio_device *vdev, unsigned offset, | ||
146 | void *buf, unsigned len) | ||
147 | { | ||
148 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
149 | void __iomem *ioaddr = vp_dev->ioaddr + | ||
150 | VIRTIO_PCI_CONFIG(vp_dev) + offset; | ||
151 | u8 *ptr = buf; | ||
152 | int i; | ||
153 | |||
154 | for (i = 0; i < len; i++) | ||
155 | ptr[i] = ioread8(ioaddr + i); | ||
156 | } | ||
157 | |||
158 | /* the config->set() implementation. it's symmetric to the config->get() | ||
159 | * implementation */ | ||
160 | static void vp_set(struct virtio_device *vdev, unsigned offset, | ||
161 | const void *buf, unsigned len) | ||
162 | { | ||
163 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
164 | void __iomem *ioaddr = vp_dev->ioaddr + | ||
165 | VIRTIO_PCI_CONFIG(vp_dev) + offset; | ||
166 | const u8 *ptr = buf; | ||
167 | int i; | ||
168 | |||
169 | for (i = 0; i < len; i++) | ||
170 | iowrite8(ptr[i], ioaddr + i); | ||
171 | } | ||
172 | |||
173 | /* config->{get,set}_status() implementations */ | ||
174 | static u8 vp_get_status(struct virtio_device *vdev) | ||
175 | { | ||
176 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
177 | return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); | ||
178 | } | ||
179 | |||
180 | static void vp_set_status(struct virtio_device *vdev, u8 status) | ||
181 | { | ||
182 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
183 | /* We should never be setting status to 0. */ | ||
184 | BUG_ON(status == 0); | ||
185 | iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); | ||
186 | } | ||
187 | 18 | ||
188 | /* wait for pending irq handlers */ | 19 | /* wait for pending irq handlers */ |
189 | static void vp_synchronize_vectors(struct virtio_device *vdev) | 20 | void vp_synchronize_vectors(struct virtio_device *vdev) |
190 | { | 21 | { |
191 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 22 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
192 | int i; | 23 | int i; |
@@ -198,20 +29,8 @@ static void vp_synchronize_vectors(struct virtio_device *vdev) | |||
198 | synchronize_irq(vp_dev->msix_entries[i].vector); | 29 | synchronize_irq(vp_dev->msix_entries[i].vector); |
199 | } | 30 | } |
200 | 31 | ||
201 | static void vp_reset(struct virtio_device *vdev) | ||
202 | { | ||
203 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
204 | /* 0 status means a reset. */ | ||
205 | iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); | ||
206 | /* Flush out the status write, and flush in device writes, | ||
207 | * including MSi-X interrupts, if any. */ | ||
208 | ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); | ||
209 | /* Flush pending VQ/configuration callbacks. */ | ||
210 | vp_synchronize_vectors(vdev); | ||
211 | } | ||
212 | |||
213 | /* the notify function used when creating a virt queue */ | 32 | /* the notify function used when creating a virt queue */ |
214 | static bool vp_notify(struct virtqueue *vq) | 33 | bool vp_notify(struct virtqueue *vq) |
215 | { | 34 | { |
216 | /* we write the queue's selector into the notification register to | 35 | /* we write the queue's selector into the notification register to |
217 | * signal the other end */ | 36 | * signal the other end */ |
@@ -272,15 +91,6 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) | |||
272 | return vp_vring_interrupt(irq, opaque); | 91 | return vp_vring_interrupt(irq, opaque); |
273 | } | 92 | } |
274 | 93 | ||
275 | static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) | ||
276 | { | ||
277 | /* Setup the vector used for configuration events */ | ||
278 | iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
279 | /* Verify we had enough resources to assign the vector */ | ||
280 | /* Will also flush the write out to device */ | ||
281 | return ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
282 | } | ||
283 | |||
284 | static void vp_free_vectors(struct virtio_device *vdev) | 94 | static void vp_free_vectors(struct virtio_device *vdev) |
285 | { | 95 | { |
286 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 96 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
@@ -401,68 +211,6 @@ static int vp_request_intx(struct virtio_device *vdev) | |||
401 | return err; | 211 | return err; |
402 | } | 212 | } |
403 | 213 | ||
404 | static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, | ||
405 | struct virtio_pci_vq_info *info, | ||
406 | unsigned index, | ||
407 | void (*callback)(struct virtqueue *vq), | ||
408 | const char *name, | ||
409 | u16 msix_vec) | ||
410 | { | ||
411 | struct virtqueue *vq; | ||
412 | unsigned long size; | ||
413 | u16 num; | ||
414 | int err; | ||
415 | |||
416 | /* Select the queue we're interested in */ | ||
417 | iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | ||
418 | |||
419 | /* Check if queue is either not available or already active. */ | ||
420 | num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); | ||
421 | if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) | ||
422 | return ERR_PTR(-ENOENT); | ||
423 | |||
424 | info->num = num; | ||
425 | info->msix_vector = msix_vec; | ||
426 | |||
427 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); | ||
428 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); | ||
429 | if (info->queue == NULL) | ||
430 | return ERR_PTR(-ENOMEM); | ||
431 | |||
432 | /* activate the queue */ | ||
433 | iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, | ||
434 | vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | ||
435 | |||
436 | /* create the vring */ | ||
437 | vq = vring_new_virtqueue(index, info->num, | ||
438 | VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, | ||
439 | true, info->queue, vp_notify, callback, name); | ||
440 | if (!vq) { | ||
441 | err = -ENOMEM; | ||
442 | goto out_activate_queue; | ||
443 | } | ||
444 | |||
445 | vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY; | ||
446 | |||
447 | if (msix_vec != VIRTIO_MSI_NO_VECTOR) { | ||
448 | iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | ||
449 | msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | ||
450 | if (msix_vec == VIRTIO_MSI_NO_VECTOR) { | ||
451 | err = -EBUSY; | ||
452 | goto out_assign; | ||
453 | } | ||
454 | } | ||
455 | |||
456 | return vq; | ||
457 | |||
458 | out_assign: | ||
459 | vring_del_virtqueue(vq); | ||
460 | out_activate_queue: | ||
461 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | ||
462 | free_pages_exact(info->queue, size); | ||
463 | return ERR_PTR(err); | ||
464 | } | ||
465 | |||
466 | static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index, | 214 | static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index, |
467 | void (*callback)(struct virtqueue *vq), | 215 | void (*callback)(struct virtqueue *vq), |
468 | const char *name, | 216 | const char *name, |
@@ -498,30 +246,6 @@ out_info: | |||
498 | return vq; | 246 | return vq; |
499 | } | 247 | } |
500 | 248 | ||
501 | static void del_vq(struct virtio_pci_vq_info *info) | ||
502 | { | ||
503 | struct virtqueue *vq = info->vq; | ||
504 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); | ||
505 | unsigned long size; | ||
506 | |||
507 | iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | ||
508 | |||
509 | if (vp_dev->msix_enabled) { | ||
510 | iowrite16(VIRTIO_MSI_NO_VECTOR, | ||
511 | vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | ||
512 | /* Flush the write out to device */ | ||
513 | ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); | ||
514 | } | ||
515 | |||
516 | vring_del_virtqueue(vq); | ||
517 | |||
518 | /* Select and deactivate the queue */ | ||
519 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | ||
520 | |||
521 | size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); | ||
522 | free_pages_exact(info->queue, size); | ||
523 | } | ||
524 | |||
525 | static void vp_del_vq(struct virtqueue *vq) | 249 | static void vp_del_vq(struct virtqueue *vq) |
526 | { | 250 | { |
527 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); | 251 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); |
@@ -537,7 +261,7 @@ static void vp_del_vq(struct virtqueue *vq) | |||
537 | } | 261 | } |
538 | 262 | ||
539 | /* the config->del_vqs() implementation */ | 263 | /* the config->del_vqs() implementation */ |
540 | static void vp_del_vqs(struct virtio_device *vdev) | 264 | void vp_del_vqs(struct virtio_device *vdev) |
541 | { | 265 | { |
542 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 266 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
543 | struct virtqueue *vq, *n; | 267 | struct virtqueue *vq, *n; |
@@ -637,10 +361,10 @@ error_find: | |||
637 | } | 361 | } |
638 | 362 | ||
639 | /* the config->find_vqs() implementation */ | 363 | /* the config->find_vqs() implementation */ |
640 | static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 364 | int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
641 | struct virtqueue *vqs[], | 365 | struct virtqueue *vqs[], |
642 | vq_callback_t *callbacks[], | 366 | vq_callback_t *callbacks[], |
643 | const char *names[]) | 367 | const char *names[]) |
644 | { | 368 | { |
645 | int err; | 369 | int err; |
646 | 370 | ||
@@ -658,7 +382,7 @@ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
658 | false, false); | 382 | false, false); |
659 | } | 383 | } |
660 | 384 | ||
661 | static const char *vp_bus_name(struct virtio_device *vdev) | 385 | const char *vp_bus_name(struct virtio_device *vdev) |
662 | { | 386 | { |
663 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 387 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
664 | 388 | ||
@@ -670,7 +394,7 @@ static const char *vp_bus_name(struct virtio_device *vdev) | |||
670 | * - OR over all affinities for shared MSI | 394 | * - OR over all affinities for shared MSI |
671 | * - ignore the affinity request if we're using INTX | 395 | * - ignore the affinity request if we're using INTX |
672 | */ | 396 | */ |
673 | static int vp_set_vq_affinity(struct virtqueue *vq, int cpu) | 397 | int vp_set_vq_affinity(struct virtqueue *vq, int cpu) |
674 | { | 398 | { |
675 | struct virtio_device *vdev = vq->vdev; | 399 | struct virtio_device *vdev = vq->vdev; |
676 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 400 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
@@ -694,21 +418,7 @@ static int vp_set_vq_affinity(struct virtqueue *vq, int cpu) | |||
694 | return 0; | 418 | return 0; |
695 | } | 419 | } |
696 | 420 | ||
697 | static const struct virtio_config_ops virtio_pci_config_ops = { | 421 | void virtio_pci_release_dev(struct device *_d) |
698 | .get = vp_get, | ||
699 | .set = vp_set, | ||
700 | .get_status = vp_get_status, | ||
701 | .set_status = vp_set_status, | ||
702 | .reset = vp_reset, | ||
703 | .find_vqs = vp_find_vqs, | ||
704 | .del_vqs = vp_del_vqs, | ||
705 | .get_features = vp_get_features, | ||
706 | .finalize_features = vp_finalize_features, | ||
707 | .bus_name = vp_bus_name, | ||
708 | .set_vq_affinity = vp_set_vq_affinity, | ||
709 | }; | ||
710 | |||
711 | static void virtio_pci_release_dev(struct device *_d) | ||
712 | { | 422 | { |
713 | /* | 423 | /* |
714 | * No need for a release method as we allocate/free | 424 | * No need for a release method as we allocate/free |
@@ -717,100 +427,6 @@ static void virtio_pci_release_dev(struct device *_d) | |||
717 | */ | 427 | */ |
718 | } | 428 | } |
719 | 429 | ||
720 | /* the PCI probing function */ | ||
721 | static int virtio_pci_probe(struct pci_dev *pci_dev, | ||
722 | const struct pci_device_id *id) | ||
723 | { | ||
724 | struct virtio_pci_device *vp_dev; | ||
725 | int err; | ||
726 | |||
727 | /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ | ||
728 | if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) | ||
729 | return -ENODEV; | ||
730 | |||
731 | if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { | ||
732 | printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", | ||
733 | VIRTIO_PCI_ABI_VERSION, pci_dev->revision); | ||
734 | return -ENODEV; | ||
735 | } | ||
736 | |||
737 | /* allocate our structure and fill it out */ | ||
738 | vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); | ||
739 | if (vp_dev == NULL) | ||
740 | return -ENOMEM; | ||
741 | |||
742 | vp_dev->vdev.dev.parent = &pci_dev->dev; | ||
743 | vp_dev->vdev.dev.release = virtio_pci_release_dev; | ||
744 | vp_dev->vdev.config = &virtio_pci_config_ops; | ||
745 | vp_dev->pci_dev = pci_dev; | ||
746 | INIT_LIST_HEAD(&vp_dev->virtqueues); | ||
747 | spin_lock_init(&vp_dev->lock); | ||
748 | |||
749 | /* Disable MSI/MSIX to bring device to a known good state. */ | ||
750 | pci_msi_off(pci_dev); | ||
751 | |||
752 | /* enable the device */ | ||
753 | err = pci_enable_device(pci_dev); | ||
754 | if (err) | ||
755 | goto out; | ||
756 | |||
757 | err = pci_request_regions(pci_dev, "virtio-pci"); | ||
758 | if (err) | ||
759 | goto out_enable_device; | ||
760 | |||
761 | vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); | ||
762 | if (vp_dev->ioaddr == NULL) { | ||
763 | err = -ENOMEM; | ||
764 | goto out_req_regions; | ||
765 | } | ||
766 | |||
767 | vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR; | ||
768 | |||
769 | pci_set_drvdata(pci_dev, vp_dev); | ||
770 | pci_set_master(pci_dev); | ||
771 | |||
772 | /* we use the subsystem vendor/device id as the virtio vendor/device | ||
773 | * id. this allows us to use the same PCI vendor/device id for all | ||
774 | * virtio devices and to identify the particular virtio driver by | ||
775 | * the subsystem ids */ | ||
776 | vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; | ||
777 | vp_dev->vdev.id.device = pci_dev->subsystem_device; | ||
778 | |||
779 | vp_dev->config_vector = vp_config_vector; | ||
780 | vp_dev->setup_vq = setup_vq; | ||
781 | vp_dev->del_vq = del_vq; | ||
782 | |||
783 | /* finally register the virtio device */ | ||
784 | err = register_virtio_device(&vp_dev->vdev); | ||
785 | if (err) | ||
786 | goto out_set_drvdata; | ||
787 | |||
788 | return 0; | ||
789 | |||
790 | out_set_drvdata: | ||
791 | pci_iounmap(pci_dev, vp_dev->ioaddr); | ||
792 | out_req_regions: | ||
793 | pci_release_regions(pci_dev); | ||
794 | out_enable_device: | ||
795 | pci_disable_device(pci_dev); | ||
796 | out: | ||
797 | kfree(vp_dev); | ||
798 | return err; | ||
799 | } | ||
800 | |||
801 | static void virtio_pci_remove(struct pci_dev *pci_dev) | ||
802 | { | ||
803 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); | ||
804 | |||
805 | unregister_virtio_device(&vp_dev->vdev); | ||
806 | |||
807 | vp_del_vqs(&vp_dev->vdev); | ||
808 | pci_iounmap(pci_dev, vp_dev->ioaddr); | ||
809 | pci_release_regions(pci_dev); | ||
810 | pci_disable_device(pci_dev); | ||
811 | kfree(vp_dev); | ||
812 | } | ||
813 | |||
814 | #ifdef CONFIG_PM_SLEEP | 430 | #ifdef CONFIG_PM_SLEEP |
815 | static int virtio_pci_freeze(struct device *dev) | 431 | static int virtio_pci_freeze(struct device *dev) |
816 | { | 432 | { |
@@ -839,19 +455,7 @@ static int virtio_pci_restore(struct device *dev) | |||
839 | return virtio_device_restore(&vp_dev->vdev); | 455 | return virtio_device_restore(&vp_dev->vdev); |
840 | } | 456 | } |
841 | 457 | ||
842 | static const struct dev_pm_ops virtio_pci_pm_ops = { | 458 | const struct dev_pm_ops virtio_pci_pm_ops = { |
843 | SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore) | 459 | SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore) |
844 | }; | 460 | }; |
845 | #endif | 461 | #endif |
846 | |||
847 | static struct pci_driver virtio_pci_driver = { | ||
848 | .name = "virtio-pci", | ||
849 | .id_table = virtio_pci_id_table, | ||
850 | .probe = virtio_pci_probe, | ||
851 | .remove = virtio_pci_remove, | ||
852 | #ifdef CONFIG_PM_SLEEP | ||
853 | .driver.pm = &virtio_pci_pm_ops, | ||
854 | #endif | ||
855 | }; | ||
856 | |||
857 | module_pci_driver(virtio_pci_driver); | ||
diff --git a/drivers/virtio/virtio_pci.h b/drivers/virtio/virtio_pci.h new file mode 100644 index 000000000000..a3b12595d6c8 --- /dev/null +++ b/drivers/virtio/virtio_pci.h | |||
@@ -0,0 +1,133 @@ | |||
1 | #ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_H | ||
2 | #define _DRIVERS_VIRTIO_VIRTIO_PCI_H | ||
3 | /* | ||
4 | * Virtio PCI driver | ||
5 | * | ||
6 | * This module allows virtio devices to be used over a virtual PCI device. | ||
7 | * This can be used with QEMU based VMMs like KVM or Xen. | ||
8 | * | ||
9 | * Copyright IBM Corp. 2007 | ||
10 | * | ||
11 | * Authors: | ||
12 | * Anthony Liguori <aliguori@us.ibm.com> | ||
13 | * | ||
14 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
15 | * See the COPYING file in the top-level directory. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/list.h> | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/virtio.h> | ||
25 | #include <linux/virtio_config.h> | ||
26 | #include <linux/virtio_ring.h> | ||
27 | #define VIRTIO_PCI_NO_LEGACY | ||
28 | #include <linux/virtio_pci.h> | ||
29 | #include <linux/highmem.h> | ||
30 | #include <linux/spinlock.h> | ||
31 | |||
32 | struct virtio_pci_vq_info { | ||
33 | /* the actual virtqueue */ | ||
34 | struct virtqueue *vq; | ||
35 | |||
36 | /* the number of entries in the queue */ | ||
37 | int num; | ||
38 | |||
39 | /* the virtual address of the ring queue */ | ||
40 | void *queue; | ||
41 | |||
42 | /* the list node for the virtqueues list */ | ||
43 | struct list_head node; | ||
44 | |||
45 | /* MSI-X vector (or none) */ | ||
46 | unsigned msix_vector; | ||
47 | }; | ||
48 | |||
49 | /* Our device structure */ | ||
50 | struct virtio_pci_device { | ||
51 | struct virtio_device vdev; | ||
52 | struct pci_dev *pci_dev; | ||
53 | |||
54 | /* the IO mapping for the PCI config space */ | ||
55 | void __iomem *ioaddr; | ||
56 | |||
57 | /* the IO mapping for ISR operation */ | ||
58 | void __iomem *isr; | ||
59 | |||
60 | /* a list of queues so we can dispatch IRQs */ | ||
61 | spinlock_t lock; | ||
62 | struct list_head virtqueues; | ||
63 | |||
64 | /* array of all queues for house-keeping */ | ||
65 | struct virtio_pci_vq_info **vqs; | ||
66 | |||
67 | /* MSI-X support */ | ||
68 | int msix_enabled; | ||
69 | int intx_enabled; | ||
70 | struct msix_entry *msix_entries; | ||
71 | cpumask_var_t *msix_affinity_masks; | ||
72 | /* Name strings for interrupts. This size should be enough, | ||
73 | * and I'm too lazy to allocate each name separately. */ | ||
74 | char (*msix_names)[256]; | ||
75 | /* Number of available vectors */ | ||
76 | unsigned msix_vectors; | ||
77 | /* Vectors allocated, excluding per-vq vectors if any */ | ||
78 | unsigned msix_used_vectors; | ||
79 | |||
80 | /* Whether we have vector per vq */ | ||
81 | bool per_vq_vectors; | ||
82 | |||
83 | struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, | ||
84 | struct virtio_pci_vq_info *info, | ||
85 | unsigned idx, | ||
86 | void (*callback)(struct virtqueue *vq), | ||
87 | const char *name, | ||
88 | u16 msix_vec); | ||
89 | void (*del_vq)(struct virtio_pci_vq_info *info); | ||
90 | |||
91 | u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); | ||
92 | }; | ||
93 | |||
94 | /* Constants for MSI-X */ | ||
95 | /* Use first vector for configuration changes, second and the rest for | ||
96 | * virtqueues Thus, we need at least 2 vectors for MSI. */ | ||
97 | enum { | ||
98 | VP_MSIX_CONFIG_VECTOR = 0, | ||
99 | VP_MSIX_VQ_VECTOR = 1, | ||
100 | }; | ||
101 | |||
102 | /* Convert a generic virtio device to our structure */ | ||
103 | static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) | ||
104 | { | ||
105 | return container_of(vdev, struct virtio_pci_device, vdev); | ||
106 | } | ||
107 | |||
108 | /* wait for pending irq handlers */ | ||
109 | void vp_synchronize_vectors(struct virtio_device *vdev); | ||
110 | /* the notify function used when creating a virt queue */ | ||
111 | bool vp_notify(struct virtqueue *vq); | ||
112 | /* the config->del_vqs() implementation */ | ||
113 | void vp_del_vqs(struct virtio_device *vdev); | ||
114 | /* the config->find_vqs() implementation */ | ||
115 | int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, | ||
116 | struct virtqueue *vqs[], | ||
117 | vq_callback_t *callbacks[], | ||
118 | const char *names[]); | ||
119 | const char *vp_bus_name(struct virtio_device *vdev); | ||
120 | |||
121 | /* Setup the affinity for a virtqueue: | ||
122 | * - force the affinity for per vq vector | ||
123 | * - OR over all affinities for shared MSI | ||
124 | * - ignore the affinity request if we're using INTX | ||
125 | */ | ||
126 | int vp_set_vq_affinity(struct virtqueue *vq, int cpu); | ||
127 | void virtio_pci_release_dev(struct device *); | ||
128 | |||
129 | #ifdef CONFIG_PM_SLEEP | ||
130 | extern const struct dev_pm_ops virtio_pci_pm_ops; | ||
131 | #endif | ||
132 | |||
133 | #endif | ||
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c new file mode 100644 index 000000000000..c3393d47097f --- /dev/null +++ b/drivers/virtio/virtio_pci_legacy.c | |||
@@ -0,0 +1,323 @@ | |||
1 | /* | ||
2 | * Virtio PCI driver | ||
3 | * | ||
4 | * This module allows virtio devices to be used over a virtual PCI device. | ||
5 | * This can be used with QEMU based VMMs like KVM or Xen. | ||
6 | * | ||
7 | * Copyright IBM Corp. 2007 | ||
8 | * | ||
9 | * Authors: | ||
10 | * Anthony Liguori <aliguori@us.ibm.com> | ||
11 | * | ||
12 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
13 | * See the COPYING file in the top-level directory. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include "virtio_pci.h" | ||
18 | |||
19 | /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ | ||
20 | static const struct pci_device_id virtio_pci_id_table[] = { | ||
21 | { PCI_DEVICE(0x1af4, PCI_ANY_ID) }, | ||
22 | { 0 } | ||
23 | }; | ||
24 | |||
25 | MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); | ||
26 | |||
27 | /* virtio config->get_features() implementation */ | ||
28 | static u64 vp_get_features(struct virtio_device *vdev) | ||
29 | { | ||
30 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
31 | |||
32 | /* When someone needs more than 32 feature bits, we'll need to | ||
33 | * steal a bit to indicate that the rest are somewhere else. */ | ||
34 | return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); | ||
35 | } | ||
36 | |||
37 | /* virtio config->finalize_features() implementation */ | ||
38 | static int vp_finalize_features(struct virtio_device *vdev) | ||
39 | { | ||
40 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
41 | |||
42 | /* Give virtio_ring a chance to accept features. */ | ||
43 | vring_transport_features(vdev); | ||
44 | |||
45 | /* Make sure we don't have any features > 32 bits! */ | ||
46 | BUG_ON((u32)vdev->features != vdev->features); | ||
47 | |||
48 | /* We only support 32 feature bits. */ | ||
49 | iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES); | ||
50 | |||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | /* virtio config->get() implementation */ | ||
55 | static void vp_get(struct virtio_device *vdev, unsigned offset, | ||
56 | void *buf, unsigned len) | ||
57 | { | ||
58 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
59 | void __iomem *ioaddr = vp_dev->ioaddr + | ||
60 | VIRTIO_PCI_CONFIG(vp_dev) + offset; | ||
61 | u8 *ptr = buf; | ||
62 | int i; | ||
63 | |||
64 | for (i = 0; i < len; i++) | ||
65 | ptr[i] = ioread8(ioaddr + i); | ||
66 | } | ||
67 | |||
68 | /* the config->set() implementation. it's symmetric to the config->get() | ||
69 | * implementation */ | ||
70 | static void vp_set(struct virtio_device *vdev, unsigned offset, | ||
71 | const void *buf, unsigned len) | ||
72 | { | ||
73 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
74 | void __iomem *ioaddr = vp_dev->ioaddr + | ||
75 | VIRTIO_PCI_CONFIG(vp_dev) + offset; | ||
76 | const u8 *ptr = buf; | ||
77 | int i; | ||
78 | |||
79 | for (i = 0; i < len; i++) | ||
80 | iowrite8(ptr[i], ioaddr + i); | ||
81 | } | ||
82 | |||
83 | /* config->{get,set}_status() implementations */ | ||
84 | static u8 vp_get_status(struct virtio_device *vdev) | ||
85 | { | ||
86 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
87 | return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); | ||
88 | } | ||
89 | |||
90 | static void vp_set_status(struct virtio_device *vdev, u8 status) | ||
91 | { | ||
92 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
93 | /* We should never be setting status to 0. */ | ||
94 | BUG_ON(status == 0); | ||
95 | iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); | ||
96 | } | ||
97 | |||
98 | static void vp_reset(struct virtio_device *vdev) | ||
99 | { | ||
100 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
101 | /* 0 status means a reset. */ | ||
102 | iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); | ||
103 | /* Flush out the status write, and flush in device writes, | ||
104 | * including MSi-X interrupts, if any. */ | ||
105 | ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); | ||
106 | /* Flush pending VQ/configuration callbacks. */ | ||
107 | vp_synchronize_vectors(vdev); | ||
108 | } | ||
109 | |||
110 | static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) | ||
111 | { | ||
112 | /* Setup the vector used for configuration events */ | ||
113 | iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
114 | /* Verify we had enough resources to assign the vector */ | ||
115 | /* Will also flush the write out to device */ | ||
116 | return ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
117 | } | ||
118 | |||
119 | static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, | ||
120 | struct virtio_pci_vq_info *info, | ||
121 | unsigned index, | ||
122 | void (*callback)(struct virtqueue *vq), | ||
123 | const char *name, | ||
124 | u16 msix_vec) | ||
125 | { | ||
126 | struct virtqueue *vq; | ||
127 | unsigned long size; | ||
128 | u16 num; | ||
129 | int err; | ||
130 | |||
131 | /* Select the queue we're interested in */ | ||
132 | iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | ||
133 | |||
134 | /* Check if queue is either not available or already active. */ | ||
135 | num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); | ||
136 | if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) | ||
137 | return ERR_PTR(-ENOENT); | ||
138 | |||
139 | info->num = num; | ||
140 | info->msix_vector = msix_vec; | ||
141 | |||
142 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); | ||
143 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); | ||
144 | if (info->queue == NULL) | ||
145 | return ERR_PTR(-ENOMEM); | ||
146 | |||
147 | /* activate the queue */ | ||
148 | iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, | ||
149 | vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | ||
150 | |||
151 | /* create the vring */ | ||
152 | vq = vring_new_virtqueue(index, info->num, | ||
153 | VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, | ||
154 | true, info->queue, vp_notify, callback, name); | ||
155 | if (!vq) { | ||
156 | err = -ENOMEM; | ||
157 | goto out_activate_queue; | ||
158 | } | ||
159 | |||
160 | vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY; | ||
161 | |||
162 | if (msix_vec != VIRTIO_MSI_NO_VECTOR) { | ||
163 | iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | ||
164 | msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | ||
165 | if (msix_vec == VIRTIO_MSI_NO_VECTOR) { | ||
166 | err = -EBUSY; | ||
167 | goto out_assign; | ||
168 | } | ||
169 | } | ||
170 | |||
171 | return vq; | ||
172 | |||
173 | out_assign: | ||
174 | vring_del_virtqueue(vq); | ||
175 | out_activate_queue: | ||
176 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | ||
177 | free_pages_exact(info->queue, size); | ||
178 | return ERR_PTR(err); | ||
179 | } | ||
180 | |||
181 | static void del_vq(struct virtio_pci_vq_info *info) | ||
182 | { | ||
183 | struct virtqueue *vq = info->vq; | ||
184 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); | ||
185 | unsigned long size; | ||
186 | |||
187 | iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | ||
188 | |||
189 | if (vp_dev->msix_enabled) { | ||
190 | iowrite16(VIRTIO_MSI_NO_VECTOR, | ||
191 | vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | ||
192 | /* Flush the write out to device */ | ||
193 | ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); | ||
194 | } | ||
195 | |||
196 | vring_del_virtqueue(vq); | ||
197 | |||
198 | /* Select and deactivate the queue */ | ||
199 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | ||
200 | |||
201 | size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); | ||
202 | free_pages_exact(info->queue, size); | ||
203 | } | ||
204 | |||
205 | static const struct virtio_config_ops virtio_pci_config_ops = { | ||
206 | .get = vp_get, | ||
207 | .set = vp_set, | ||
208 | .get_status = vp_get_status, | ||
209 | .set_status = vp_set_status, | ||
210 | .reset = vp_reset, | ||
211 | .find_vqs = vp_find_vqs, | ||
212 | .del_vqs = vp_del_vqs, | ||
213 | .get_features = vp_get_features, | ||
214 | .finalize_features = vp_finalize_features, | ||
215 | .bus_name = vp_bus_name, | ||
216 | .set_vq_affinity = vp_set_vq_affinity, | ||
217 | }; | ||
218 | |||
219 | /* the PCI probing function */ | ||
220 | static int virtio_pci_probe(struct pci_dev *pci_dev, | ||
221 | const struct pci_device_id *id) | ||
222 | { | ||
223 | struct virtio_pci_device *vp_dev; | ||
224 | int err; | ||
225 | |||
226 | /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ | ||
227 | if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) | ||
228 | return -ENODEV; | ||
229 | |||
230 | if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { | ||
231 | printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", | ||
232 | VIRTIO_PCI_ABI_VERSION, pci_dev->revision); | ||
233 | return -ENODEV; | ||
234 | } | ||
235 | |||
236 | /* allocate our structure and fill it out */ | ||
237 | vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); | ||
238 | if (vp_dev == NULL) | ||
239 | return -ENOMEM; | ||
240 | |||
241 | vp_dev->vdev.dev.parent = &pci_dev->dev; | ||
242 | vp_dev->vdev.dev.release = virtio_pci_release_dev; | ||
243 | vp_dev->vdev.config = &virtio_pci_config_ops; | ||
244 | vp_dev->pci_dev = pci_dev; | ||
245 | INIT_LIST_HEAD(&vp_dev->virtqueues); | ||
246 | spin_lock_init(&vp_dev->lock); | ||
247 | |||
248 | /* Disable MSI/MSIX to bring device to a known good state. */ | ||
249 | pci_msi_off(pci_dev); | ||
250 | |||
251 | /* enable the device */ | ||
252 | err = pci_enable_device(pci_dev); | ||
253 | if (err) | ||
254 | goto out; | ||
255 | |||
256 | err = pci_request_regions(pci_dev, "virtio-pci"); | ||
257 | if (err) | ||
258 | goto out_enable_device; | ||
259 | |||
260 | vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); | ||
261 | if (vp_dev->ioaddr == NULL) { | ||
262 | err = -ENOMEM; | ||
263 | goto out_req_regions; | ||
264 | } | ||
265 | |||
266 | vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR; | ||
267 | |||
268 | pci_set_drvdata(pci_dev, vp_dev); | ||
269 | pci_set_master(pci_dev); | ||
270 | |||
271 | /* we use the subsystem vendor/device id as the virtio vendor/device | ||
272 | * id. this allows us to use the same PCI vendor/device id for all | ||
273 | * virtio devices and to identify the particular virtio driver by | ||
274 | * the subsystem ids */ | ||
275 | vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; | ||
276 | vp_dev->vdev.id.device = pci_dev->subsystem_device; | ||
277 | |||
278 | vp_dev->config_vector = vp_config_vector; | ||
279 | vp_dev->setup_vq = setup_vq; | ||
280 | vp_dev->del_vq = del_vq; | ||
281 | |||
282 | /* finally register the virtio device */ | ||
283 | err = register_virtio_device(&vp_dev->vdev); | ||
284 | if (err) | ||
285 | goto out_set_drvdata; | ||
286 | |||
287 | return 0; | ||
288 | |||
289 | out_set_drvdata: | ||
290 | pci_iounmap(pci_dev, vp_dev->ioaddr); | ||
291 | out_req_regions: | ||
292 | pci_release_regions(pci_dev); | ||
293 | out_enable_device: | ||
294 | pci_disable_device(pci_dev); | ||
295 | out: | ||
296 | kfree(vp_dev); | ||
297 | return err; | ||
298 | } | ||
299 | |||
300 | static void virtio_pci_remove(struct pci_dev *pci_dev) | ||
301 | { | ||
302 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); | ||
303 | |||
304 | unregister_virtio_device(&vp_dev->vdev); | ||
305 | |||
306 | vp_del_vqs(&vp_dev->vdev); | ||
307 | pci_iounmap(pci_dev, vp_dev->ioaddr); | ||
308 | pci_release_regions(pci_dev); | ||
309 | pci_disable_device(pci_dev); | ||
310 | kfree(vp_dev); | ||
311 | } | ||
312 | |||
313 | static struct pci_driver virtio_pci_driver = { | ||
314 | .name = "virtio-pci", | ||
315 | .id_table = virtio_pci_id_table, | ||
316 | .probe = virtio_pci_probe, | ||
317 | .remove = virtio_pci_remove, | ||
318 | #ifdef CONFIG_PM_SLEEP | ||
319 | .driver.pm = &virtio_pci_pm_ops, | ||
320 | #endif | ||
321 | }; | ||
322 | |||
323 | module_pci_driver(virtio_pci_driver); | ||