aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/assigned-dev.c62
-rw-r--r--virt/kvm/coalesced_mmio.c131
-rw-r--r--virt/kvm/coalesced_mmio.h7
-rw-r--r--virt/kvm/eventfd.c3
-rw-r--r--virt/kvm/ioapic.c3
-rw-r--r--virt/kvm/kvm_main.c112
6 files changed, 200 insertions, 118 deletions
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index eaf3a50f9769..3ad0925d23a9 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -58,8 +58,6 @@ static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
58static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id) 58static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id)
59{ 59{
60 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; 60 struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
61 u32 vector;
62 int index;
63 61
64 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) { 62 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) {
65 spin_lock(&assigned_dev->intx_lock); 63 spin_lock(&assigned_dev->intx_lock);
@@ -68,31 +66,35 @@ static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id)
68 spin_unlock(&assigned_dev->intx_lock); 66 spin_unlock(&assigned_dev->intx_lock);
69 } 67 }
70 68
71 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { 69 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
72 index = find_index_from_host_irq(assigned_dev, irq); 70 assigned_dev->guest_irq, 1);
73 if (index >= 0) { 71
74 vector = assigned_dev-> 72 return IRQ_HANDLED;
75 guest_msix_entries[index].vector; 73}
76 kvm_set_irq(assigned_dev->kvm, 74
77 assigned_dev->irq_source_id, vector, 1); 75#ifdef __KVM_HAVE_MSIX
78 } 76static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id)
79 } else 77{
78 struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
79 int index = find_index_from_host_irq(assigned_dev, irq);
80 u32 vector;
81
82 if (index >= 0) {
83 vector = assigned_dev->guest_msix_entries[index].vector;
80 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, 84 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
81 assigned_dev->guest_irq, 1); 85 vector, 1);
86 }
82 87
83 return IRQ_HANDLED; 88 return IRQ_HANDLED;
84} 89}
90#endif
85 91
86/* Ack the irq line for an assigned device */ 92/* Ack the irq line for an assigned device */
87static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) 93static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
88{ 94{
89 struct kvm_assigned_dev_kernel *dev; 95 struct kvm_assigned_dev_kernel *dev =
90 96 container_of(kian, struct kvm_assigned_dev_kernel,
91 if (kian->gsi == -1) 97 ack_notifier);
92 return;
93
94 dev = container_of(kian, struct kvm_assigned_dev_kernel,
95 ack_notifier);
96 98
97 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); 99 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
98 100
@@ -110,8 +112,9 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
110static void deassign_guest_irq(struct kvm *kvm, 112static void deassign_guest_irq(struct kvm *kvm,
111 struct kvm_assigned_dev_kernel *assigned_dev) 113 struct kvm_assigned_dev_kernel *assigned_dev)
112{ 114{
113 kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); 115 if (assigned_dev->ack_notifier.gsi != -1)
114 assigned_dev->ack_notifier.gsi = -1; 116 kvm_unregister_irq_ack_notifier(kvm,
117 &assigned_dev->ack_notifier);
115 118
116 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, 119 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
117 assigned_dev->guest_irq, 0); 120 assigned_dev->guest_irq, 0);
@@ -143,7 +146,7 @@ static void deassign_host_irq(struct kvm *kvm,
143 146
144 for (i = 0; i < assigned_dev->entries_nr; i++) 147 for (i = 0; i < assigned_dev->entries_nr; i++)
145 free_irq(assigned_dev->host_msix_entries[i].vector, 148 free_irq(assigned_dev->host_msix_entries[i].vector,
146 (void *)assigned_dev); 149 assigned_dev);
147 150
148 assigned_dev->entries_nr = 0; 151 assigned_dev->entries_nr = 0;
149 kfree(assigned_dev->host_msix_entries); 152 kfree(assigned_dev->host_msix_entries);
@@ -153,7 +156,7 @@ static void deassign_host_irq(struct kvm *kvm,
153 /* Deal with MSI and INTx */ 156 /* Deal with MSI and INTx */
154 disable_irq(assigned_dev->host_irq); 157 disable_irq(assigned_dev->host_irq);
155 158
156 free_irq(assigned_dev->host_irq, (void *)assigned_dev); 159 free_irq(assigned_dev->host_irq, assigned_dev);
157 160
158 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) 161 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
159 pci_disable_msi(assigned_dev->dev); 162 pci_disable_msi(assigned_dev->dev);
@@ -239,7 +242,7 @@ static int assigned_device_enable_host_intx(struct kvm *kvm,
239 * are going to be long delays in accepting, acking, etc. 242 * are going to be long delays in accepting, acking, etc.
240 */ 243 */
241 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread, 244 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
242 IRQF_ONESHOT, dev->irq_name, (void *)dev)) 245 IRQF_ONESHOT, dev->irq_name, dev))
243 return -EIO; 246 return -EIO;
244 return 0; 247 return 0;
245} 248}
@@ -258,7 +261,7 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
258 261
259 dev->host_irq = dev->dev->irq; 262 dev->host_irq = dev->dev->irq;
260 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread, 263 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
261 0, dev->irq_name, (void *)dev)) { 264 0, dev->irq_name, dev)) {
262 pci_disable_msi(dev->dev); 265 pci_disable_msi(dev->dev);
263 return -EIO; 266 return -EIO;
264 } 267 }
@@ -284,8 +287,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
284 287
285 for (i = 0; i < dev->entries_nr; i++) { 288 for (i = 0; i < dev->entries_nr; i++) {
286 r = request_threaded_irq(dev->host_msix_entries[i].vector, 289 r = request_threaded_irq(dev->host_msix_entries[i].vector,
287 NULL, kvm_assigned_dev_thread, 290 NULL, kvm_assigned_dev_thread_msix,
288 0, dev->irq_name, (void *)dev); 291 0, dev->irq_name, dev);
289 if (r) 292 if (r)
290 goto err; 293 goto err;
291 } 294 }
@@ -293,7 +296,7 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
293 return 0; 296 return 0;
294err: 297err:
295 for (i -= 1; i >= 0; i--) 298 for (i -= 1; i >= 0; i--)
296 free_irq(dev->host_msix_entries[i].vector, (void *)dev); 299 free_irq(dev->host_msix_entries[i].vector, dev);
297 pci_disable_msix(dev->dev); 300 pci_disable_msix(dev->dev);
298 return r; 301 return r;
299} 302}
@@ -406,7 +409,8 @@ static int assign_guest_irq(struct kvm *kvm,
406 409
407 if (!r) { 410 if (!r) {
408 dev->irq_requested_type |= guest_irq_type; 411 dev->irq_requested_type |= guest_irq_type;
409 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); 412 if (dev->ack_notifier.gsi != -1)
413 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
410 } else 414 } else
411 kvm_free_irq_source_id(kvm, dev->irq_source_id); 415 kvm_free_irq_source_id(kvm, dev->irq_source_id);
412 416
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index fc8487564d1f..a6ec206f36ba 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -24,10 +24,19 @@ static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
24static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, 24static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
25 gpa_t addr, int len) 25 gpa_t addr, int len)
26{ 26{
27 struct kvm_coalesced_mmio_zone *zone; 27 /* is it in a batchable area ?
28 * (addr,len) is fully included in
29 * (zone->addr, zone->size)
30 */
31
32 return (dev->zone.addr <= addr &&
33 addr + len <= dev->zone.addr + dev->zone.size);
34}
35
36static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
37{
28 struct kvm_coalesced_mmio_ring *ring; 38 struct kvm_coalesced_mmio_ring *ring;
29 unsigned avail; 39 unsigned avail;
30 int i;
31 40
32 /* Are we able to batch it ? */ 41 /* Are we able to batch it ? */
33 42
@@ -37,25 +46,12 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
37 */ 46 */
38 ring = dev->kvm->coalesced_mmio_ring; 47 ring = dev->kvm->coalesced_mmio_ring;
39 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; 48 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
40 if (avail < KVM_MAX_VCPUS) { 49 if (avail == 0) {
41 /* full */ 50 /* full */
42 return 0; 51 return 0;
43 } 52 }
44 53
45 /* is it in a batchable area ? */ 54 return 1;
46
47 for (i = 0; i < dev->nb_zones; i++) {
48 zone = &dev->zone[i];
49
50 /* (addr,len) is fully included in
51 * (zone->addr, zone->size)
52 */
53
54 if (zone->addr <= addr &&
55 addr + len <= zone->addr + zone->size)
56 return 1;
57 }
58 return 0;
59} 55}
60 56
61static int coalesced_mmio_write(struct kvm_io_device *this, 57static int coalesced_mmio_write(struct kvm_io_device *this,
@@ -63,10 +59,16 @@ static int coalesced_mmio_write(struct kvm_io_device *this,
63{ 59{
64 struct kvm_coalesced_mmio_dev *dev = to_mmio(this); 60 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
65 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; 61 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
62
66 if (!coalesced_mmio_in_range(dev, addr, len)) 63 if (!coalesced_mmio_in_range(dev, addr, len))
67 return -EOPNOTSUPP; 64 return -EOPNOTSUPP;
68 65
69 spin_lock(&dev->lock); 66 spin_lock(&dev->kvm->ring_lock);
67
68 if (!coalesced_mmio_has_room(dev)) {
69 spin_unlock(&dev->kvm->ring_lock);
70 return -EOPNOTSUPP;
71 }
70 72
71 /* copy data in first free entry of the ring */ 73 /* copy data in first free entry of the ring */
72 74
@@ -75,7 +77,7 @@ static int coalesced_mmio_write(struct kvm_io_device *this,
75 memcpy(ring->coalesced_mmio[ring->last].data, val, len); 77 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
76 smp_wmb(); 78 smp_wmb();
77 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; 79 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
78 spin_unlock(&dev->lock); 80 spin_unlock(&dev->kvm->ring_lock);
79 return 0; 81 return 0;
80} 82}
81 83
@@ -83,6 +85,8 @@ static void coalesced_mmio_destructor(struct kvm_io_device *this)
83{ 85{
84 struct kvm_coalesced_mmio_dev *dev = to_mmio(this); 86 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
85 87
88 list_del(&dev->list);
89
86 kfree(dev); 90 kfree(dev);
87} 91}
88 92
@@ -93,7 +97,6 @@ static const struct kvm_io_device_ops coalesced_mmio_ops = {
93 97
94int kvm_coalesced_mmio_init(struct kvm *kvm) 98int kvm_coalesced_mmio_init(struct kvm *kvm)
95{ 99{
96 struct kvm_coalesced_mmio_dev *dev;
97 struct page *page; 100 struct page *page;
98 int ret; 101 int ret;
99 102
@@ -101,31 +104,18 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
101 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 104 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
102 if (!page) 105 if (!page)
103 goto out_err; 106 goto out_err;
104 kvm->coalesced_mmio_ring = page_address(page);
105
106 ret = -ENOMEM;
107 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
108 if (!dev)
109 goto out_free_page;
110 spin_lock_init(&dev->lock);
111 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
112 dev->kvm = kvm;
113 kvm->coalesced_mmio_dev = dev;
114 107
115 mutex_lock(&kvm->slots_lock); 108 ret = 0;
116 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev); 109 kvm->coalesced_mmio_ring = page_address(page);
117 mutex_unlock(&kvm->slots_lock);
118 if (ret < 0)
119 goto out_free_dev;
120 110
121 return ret; 111 /*
112 * We're using this spinlock to sync access to the coalesced ring.
113 * The list doesn't need it's own lock since device registration and
114 * unregistration should only happen when kvm->slots_lock is held.
115 */
116 spin_lock_init(&kvm->ring_lock);
117 INIT_LIST_HEAD(&kvm->coalesced_zones);
122 118
123out_free_dev:
124 kvm->coalesced_mmio_dev = NULL;
125 kfree(dev);
126out_free_page:
127 kvm->coalesced_mmio_ring = NULL;
128 __free_page(page);
129out_err: 119out_err:
130 return ret; 120 return ret;
131} 121}
@@ -139,51 +129,50 @@ void kvm_coalesced_mmio_free(struct kvm *kvm)
139int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, 129int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
140 struct kvm_coalesced_mmio_zone *zone) 130 struct kvm_coalesced_mmio_zone *zone)
141{ 131{
142 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; 132 int ret;
133 struct kvm_coalesced_mmio_dev *dev;
143 134
144 if (dev == NULL) 135 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
145 return -ENXIO; 136 if (!dev)
137 return -ENOMEM;
138
139 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
140 dev->kvm = kvm;
141 dev->zone = *zone;
146 142
147 mutex_lock(&kvm->slots_lock); 143 mutex_lock(&kvm->slots_lock);
148 if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) { 144 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
149 mutex_unlock(&kvm->slots_lock); 145 zone->size, &dev->dev);
150 return -ENOBUFS; 146 if (ret < 0)
151 } 147 goto out_free_dev;
148 list_add_tail(&dev->list, &kvm->coalesced_zones);
149 mutex_unlock(&kvm->slots_lock);
152 150
153 dev->zone[dev->nb_zones] = *zone; 151 return ret;
154 dev->nb_zones++;
155 152
153out_free_dev:
156 mutex_unlock(&kvm->slots_lock); 154 mutex_unlock(&kvm->slots_lock);
155
156 kfree(dev);
157
158 if (dev == NULL)
159 return -ENXIO;
160
157 return 0; 161 return 0;
158} 162}
159 163
160int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, 164int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
161 struct kvm_coalesced_mmio_zone *zone) 165 struct kvm_coalesced_mmio_zone *zone)
162{ 166{
163 int i; 167 struct kvm_coalesced_mmio_dev *dev, *tmp;
164 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
165 struct kvm_coalesced_mmio_zone *z;
166
167 if (dev == NULL)
168 return -ENXIO;
169 168
170 mutex_lock(&kvm->slots_lock); 169 mutex_lock(&kvm->slots_lock);
171 170
172 i = dev->nb_zones; 171 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
173 while (i) { 172 if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
174 z = &dev->zone[i - 1]; 173 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
175 174 kvm_iodevice_destructor(&dev->dev);
176 /* unregister all zones
177 * included in (zone->addr, zone->size)
178 */
179
180 if (zone->addr <= z->addr &&
181 z->addr + z->size <= zone->addr + zone->size) {
182 dev->nb_zones--;
183 *z = dev->zone[dev->nb_zones];
184 } 175 }
185 i--;
186 }
187 176
188 mutex_unlock(&kvm->slots_lock); 177 mutex_unlock(&kvm->slots_lock);
189 178
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h
index 8a5959e3535f..b280c20444d1 100644
--- a/virt/kvm/coalesced_mmio.h
+++ b/virt/kvm/coalesced_mmio.h
@@ -12,14 +12,13 @@
12 12
13#ifdef CONFIG_KVM_MMIO 13#ifdef CONFIG_KVM_MMIO
14 14
15#define KVM_COALESCED_MMIO_ZONE_MAX 100 15#include <linux/list.h>
16 16
17struct kvm_coalesced_mmio_dev { 17struct kvm_coalesced_mmio_dev {
18 struct list_head list;
18 struct kvm_io_device dev; 19 struct kvm_io_device dev;
19 struct kvm *kvm; 20 struct kvm *kvm;
20 spinlock_t lock; 21 struct kvm_coalesced_mmio_zone zone;
21 int nb_zones;
22 struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX];
23}; 22};
24 23
25int kvm_coalesced_mmio_init(struct kvm *kvm); 24int kvm_coalesced_mmio_init(struct kvm *kvm);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 73358d256fa2..f59c1e8de7a2 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -586,7 +586,8 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
586 586
587 kvm_iodevice_init(&p->dev, &ioeventfd_ops); 587 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
588 588
589 ret = kvm_io_bus_register_dev(kvm, bus_idx, &p->dev); 589 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
590 &p->dev);
590 if (ret < 0) 591 if (ret < 0)
591 goto unlock_fail; 592 goto unlock_fail;
592 593
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 8df1ca104a7f..3eed61eb4867 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -394,7 +394,8 @@ int kvm_ioapic_init(struct kvm *kvm)
394 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); 394 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
395 ioapic->kvm = kvm; 395 ioapic->kvm = kvm;
396 mutex_lock(&kvm->slots_lock); 396 mutex_lock(&kvm->slots_lock);
397 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); 397 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
398 IOAPIC_MEM_LENGTH, &ioapic->dev);
398 mutex_unlock(&kvm->slots_lock); 399 mutex_unlock(&kvm->slots_lock);
399 if (ret < 0) { 400 if (ret < 0) {
400 kvm->arch.vioapic = NULL; 401 kvm->arch.vioapic = NULL;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index aefdda390f5e..d9cfb782cb81 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -47,6 +47,8 @@
47#include <linux/srcu.h> 47#include <linux/srcu.h>
48#include <linux/hugetlb.h> 48#include <linux/hugetlb.h>
49#include <linux/slab.h> 49#include <linux/slab.h>
50#include <linux/sort.h>
51#include <linux/bsearch.h>
50 52
51#include <asm/processor.h> 53#include <asm/processor.h>
52#include <asm/io.h> 54#include <asm/io.h>
@@ -2391,24 +2393,92 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2391 int i; 2393 int i;
2392 2394
2393 for (i = 0; i < bus->dev_count; i++) { 2395 for (i = 0; i < bus->dev_count; i++) {
2394 struct kvm_io_device *pos = bus->devs[i]; 2396 struct kvm_io_device *pos = bus->range[i].dev;
2395 2397
2396 kvm_iodevice_destructor(pos); 2398 kvm_iodevice_destructor(pos);
2397 } 2399 }
2398 kfree(bus); 2400 kfree(bus);
2399} 2401}
2400 2402
2403int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
2404{
2405 const struct kvm_io_range *r1 = p1;
2406 const struct kvm_io_range *r2 = p2;
2407
2408 if (r1->addr < r2->addr)
2409 return -1;
2410 if (r1->addr + r1->len > r2->addr + r2->len)
2411 return 1;
2412 return 0;
2413}
2414
2415int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
2416 gpa_t addr, int len)
2417{
2418 if (bus->dev_count == NR_IOBUS_DEVS)
2419 return -ENOSPC;
2420
2421 bus->range[bus->dev_count++] = (struct kvm_io_range) {
2422 .addr = addr,
2423 .len = len,
2424 .dev = dev,
2425 };
2426
2427 sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range),
2428 kvm_io_bus_sort_cmp, NULL);
2429
2430 return 0;
2431}
2432
2433int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
2434 gpa_t addr, int len)
2435{
2436 struct kvm_io_range *range, key;
2437 int off;
2438
2439 key = (struct kvm_io_range) {
2440 .addr = addr,
2441 .len = len,
2442 };
2443
2444 range = bsearch(&key, bus->range, bus->dev_count,
2445 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
2446 if (range == NULL)
2447 return -ENOENT;
2448
2449 off = range - bus->range;
2450
2451 while (off > 0 && kvm_io_bus_sort_cmp(&key, &bus->range[off-1]) == 0)
2452 off--;
2453
2454 return off;
2455}
2456
2401/* kvm_io_bus_write - called under kvm->slots_lock */ 2457/* kvm_io_bus_write - called under kvm->slots_lock */
2402int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2458int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2403 int len, const void *val) 2459 int len, const void *val)
2404{ 2460{
2405 int i; 2461 int idx;
2406 struct kvm_io_bus *bus; 2462 struct kvm_io_bus *bus;
2463 struct kvm_io_range range;
2464
2465 range = (struct kvm_io_range) {
2466 .addr = addr,
2467 .len = len,
2468 };
2407 2469
2408 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2470 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2409 for (i = 0; i < bus->dev_count; i++) 2471 idx = kvm_io_bus_get_first_dev(bus, addr, len);
2410 if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) 2472 if (idx < 0)
2473 return -EOPNOTSUPP;
2474
2475 while (idx < bus->dev_count &&
2476 kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
2477 if (!kvm_iodevice_write(bus->range[idx].dev, addr, len, val))
2411 return 0; 2478 return 0;
2479 idx++;
2480 }
2481
2412 return -EOPNOTSUPP; 2482 return -EOPNOTSUPP;
2413} 2483}
2414 2484
@@ -2416,19 +2486,33 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2416int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2486int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2417 int len, void *val) 2487 int len, void *val)
2418{ 2488{
2419 int i; 2489 int idx;
2420 struct kvm_io_bus *bus; 2490 struct kvm_io_bus *bus;
2491 struct kvm_io_range range;
2492
2493 range = (struct kvm_io_range) {
2494 .addr = addr,
2495 .len = len,
2496 };
2421 2497
2422 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2498 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2423 for (i = 0; i < bus->dev_count; i++) 2499 idx = kvm_io_bus_get_first_dev(bus, addr, len);
2424 if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) 2500 if (idx < 0)
2501 return -EOPNOTSUPP;
2502
2503 while (idx < bus->dev_count &&
2504 kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
2505 if (!kvm_iodevice_read(bus->range[idx].dev, addr, len, val))
2425 return 0; 2506 return 0;
2507 idx++;
2508 }
2509
2426 return -EOPNOTSUPP; 2510 return -EOPNOTSUPP;
2427} 2511}
2428 2512
2429/* Caller must hold slots_lock. */ 2513/* Caller must hold slots_lock. */
2430int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2514int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2431 struct kvm_io_device *dev) 2515 int len, struct kvm_io_device *dev)
2432{ 2516{
2433 struct kvm_io_bus *new_bus, *bus; 2517 struct kvm_io_bus *new_bus, *bus;
2434 2518
@@ -2440,7 +2524,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2440 if (!new_bus) 2524 if (!new_bus)
2441 return -ENOMEM; 2525 return -ENOMEM;
2442 memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); 2526 memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
2443 new_bus->devs[new_bus->dev_count++] = dev; 2527 kvm_io_bus_insert_dev(new_bus, dev, addr, len);
2444 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2528 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2445 synchronize_srcu_expedited(&kvm->srcu); 2529 synchronize_srcu_expedited(&kvm->srcu);
2446 kfree(bus); 2530 kfree(bus);
@@ -2464,9 +2548,13 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2464 2548
2465 r = -ENOENT; 2549 r = -ENOENT;
2466 for (i = 0; i < new_bus->dev_count; i++) 2550 for (i = 0; i < new_bus->dev_count; i++)
2467 if (new_bus->devs[i] == dev) { 2551 if (new_bus->range[i].dev == dev) {
2468 r = 0; 2552 r = 0;
2469 new_bus->devs[i] = new_bus->devs[--new_bus->dev_count]; 2553 new_bus->dev_count--;
2554 new_bus->range[i] = new_bus->range[new_bus->dev_count];
2555 sort(new_bus->range, new_bus->dev_count,
2556 sizeof(struct kvm_io_range),
2557 kvm_io_bus_sort_cmp, NULL);
2470 break; 2558 break;
2471 } 2559 }
2472 2560