diff options
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/assigned-dev.c | 13 | ||||
-rw-r--r-- | virt/kvm/eventfd.c | 23 | ||||
-rw-r--r-- | virt/kvm/ioapic.c | 163 | ||||
-rw-r--r-- | virt/kvm/ioapic.h | 27 | ||||
-rw-r--r-- | virt/kvm/irq_comm.c | 35 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 24 |
6 files changed, 203 insertions, 82 deletions
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c index 3642239252b0..f4c7f591b5d8 100644 --- a/virt/kvm/assigned-dev.c +++ b/virt/kvm/assigned-dev.c | |||
@@ -80,11 +80,12 @@ kvm_assigned_dev_raise_guest_irq(struct kvm_assigned_dev_kernel *assigned_dev, | |||
80 | spin_lock(&assigned_dev->intx_mask_lock); | 80 | spin_lock(&assigned_dev->intx_mask_lock); |
81 | if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) | 81 | if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) |
82 | kvm_set_irq(assigned_dev->kvm, | 82 | kvm_set_irq(assigned_dev->kvm, |
83 | assigned_dev->irq_source_id, vector, 1); | 83 | assigned_dev->irq_source_id, vector, 1, |
84 | false); | ||
84 | spin_unlock(&assigned_dev->intx_mask_lock); | 85 | spin_unlock(&assigned_dev->intx_mask_lock); |
85 | } else | 86 | } else |
86 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, | 87 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, |
87 | vector, 1); | 88 | vector, 1, false); |
88 | } | 89 | } |
89 | 90 | ||
90 | static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id) | 91 | static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id) |
@@ -165,7 +166,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | |||
165 | container_of(kian, struct kvm_assigned_dev_kernel, | 166 | container_of(kian, struct kvm_assigned_dev_kernel, |
166 | ack_notifier); | 167 | ack_notifier); |
167 | 168 | ||
168 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); | 169 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0, false); |
169 | 170 | ||
170 | spin_lock(&dev->intx_mask_lock); | 171 | spin_lock(&dev->intx_mask_lock); |
171 | 172 | ||
@@ -188,7 +189,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | |||
188 | 189 | ||
189 | if (reassert) | 190 | if (reassert) |
190 | kvm_set_irq(dev->kvm, dev->irq_source_id, | 191 | kvm_set_irq(dev->kvm, dev->irq_source_id, |
191 | dev->guest_irq, 1); | 192 | dev->guest_irq, 1, false); |
192 | } | 193 | } |
193 | 194 | ||
194 | spin_unlock(&dev->intx_mask_lock); | 195 | spin_unlock(&dev->intx_mask_lock); |
@@ -202,7 +203,7 @@ static void deassign_guest_irq(struct kvm *kvm, | |||
202 | &assigned_dev->ack_notifier); | 203 | &assigned_dev->ack_notifier); |
203 | 204 | ||
204 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, | 205 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, |
205 | assigned_dev->guest_irq, 0); | 206 | assigned_dev->guest_irq, 0, false); |
206 | 207 | ||
207 | if (assigned_dev->irq_source_id != -1) | 208 | if (assigned_dev->irq_source_id != -1) |
208 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); | 209 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); |
@@ -901,7 +902,7 @@ static int kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm, | |||
901 | if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { | 902 | if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { |
902 | if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) { | 903 | if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) { |
903 | kvm_set_irq(match->kvm, match->irq_source_id, | 904 | kvm_set_irq(match->kvm, match->irq_source_id, |
904 | match->guest_irq, 0); | 905 | match->guest_irq, 0, false); |
905 | /* | 906 | /* |
906 | * Masking at hardware-level is performed on demand, | 907 | * Masking at hardware-level is performed on demand, |
907 | * i.e. when an IRQ actually arrives at the host. | 908 | * i.e. when an IRQ actually arrives at the host. |
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 020522ed9094..c5d43ffbf1f3 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
@@ -100,11 +100,13 @@ irqfd_inject(struct work_struct *work) | |||
100 | struct kvm *kvm = irqfd->kvm; | 100 | struct kvm *kvm = irqfd->kvm; |
101 | 101 | ||
102 | if (!irqfd->resampler) { | 102 | if (!irqfd->resampler) { |
103 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1); | 103 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, |
104 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0); | 104 | false); |
105 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, | ||
106 | false); | ||
105 | } else | 107 | } else |
106 | kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, | 108 | kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, |
107 | irqfd->gsi, 1); | 109 | irqfd->gsi, 1, false); |
108 | } | 110 | } |
109 | 111 | ||
110 | /* | 112 | /* |
@@ -121,7 +123,7 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian) | |||
121 | resampler = container_of(kian, struct _irqfd_resampler, notifier); | 123 | resampler = container_of(kian, struct _irqfd_resampler, notifier); |
122 | 124 | ||
123 | kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, | 125 | kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, |
124 | resampler->notifier.gsi, 0); | 126 | resampler->notifier.gsi, 0, false); |
125 | 127 | ||
126 | rcu_read_lock(); | 128 | rcu_read_lock(); |
127 | 129 | ||
@@ -146,7 +148,7 @@ irqfd_resampler_shutdown(struct _irqfd *irqfd) | |||
146 | list_del(&resampler->link); | 148 | list_del(&resampler->link); |
147 | kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); | 149 | kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); |
148 | kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, | 150 | kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, |
149 | resampler->notifier.gsi, 0); | 151 | resampler->notifier.gsi, 0, false); |
150 | kfree(resampler); | 152 | kfree(resampler); |
151 | } | 153 | } |
152 | 154 | ||
@@ -225,7 +227,8 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) | |||
225 | irq = rcu_dereference(irqfd->irq_entry); | 227 | irq = rcu_dereference(irqfd->irq_entry); |
226 | /* An event has been signaled, inject an interrupt */ | 228 | /* An event has been signaled, inject an interrupt */ |
227 | if (irq) | 229 | if (irq) |
228 | kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1); | 230 | kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, |
231 | false); | ||
229 | else | 232 | else |
230 | schedule_work(&irqfd->inject); | 233 | schedule_work(&irqfd->inject); |
231 | rcu_read_unlock(); | 234 | rcu_read_unlock(); |
@@ -574,6 +577,7 @@ struct _ioeventfd { | |||
574 | struct eventfd_ctx *eventfd; | 577 | struct eventfd_ctx *eventfd; |
575 | u64 datamatch; | 578 | u64 datamatch; |
576 | struct kvm_io_device dev; | 579 | struct kvm_io_device dev; |
580 | u8 bus_idx; | ||
577 | bool wildcard; | 581 | bool wildcard; |
578 | }; | 582 | }; |
579 | 583 | ||
@@ -666,7 +670,8 @@ ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p) | |||
666 | struct _ioeventfd *_p; | 670 | struct _ioeventfd *_p; |
667 | 671 | ||
668 | list_for_each_entry(_p, &kvm->ioeventfds, list) | 672 | list_for_each_entry(_p, &kvm->ioeventfds, list) |
669 | if (_p->addr == p->addr && _p->length == p->length && | 673 | if (_p->bus_idx == p->bus_idx && |
674 | _p->addr == p->addr && _p->length == p->length && | ||
670 | (_p->wildcard || p->wildcard || | 675 | (_p->wildcard || p->wildcard || |
671 | _p->datamatch == p->datamatch)) | 676 | _p->datamatch == p->datamatch)) |
672 | return true; | 677 | return true; |
@@ -723,6 +728,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |||
723 | 728 | ||
724 | INIT_LIST_HEAD(&p->list); | 729 | INIT_LIST_HEAD(&p->list); |
725 | p->addr = args->addr; | 730 | p->addr = args->addr; |
731 | p->bus_idx = bus_idx; | ||
726 | p->length = args->len; | 732 | p->length = args->len; |
727 | p->eventfd = eventfd; | 733 | p->eventfd = eventfd; |
728 | 734 | ||
@@ -781,7 +787,8 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |||
781 | list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) { | 787 | list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) { |
782 | bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH); | 788 | bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH); |
783 | 789 | ||
784 | if (p->eventfd != eventfd || | 790 | if (p->bus_idx != bus_idx || |
791 | p->eventfd != eventfd || | ||
785 | p->addr != args->addr || | 792 | p->addr != args->addr || |
786 | p->length != args->len || | 793 | p->length != args->len || |
787 | p->wildcard != wildcard) | 794 | p->wildcard != wildcard) |
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 5ba005c00e2f..2d682977ce82 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -50,7 +50,8 @@ | |||
50 | #else | 50 | #else |
51 | #define ioapic_debug(fmt, arg...) | 51 | #define ioapic_debug(fmt, arg...) |
52 | #endif | 52 | #endif |
53 | static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq); | 53 | static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq, |
54 | bool line_status); | ||
54 | 55 | ||
55 | static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, | 56 | static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, |
56 | unsigned long addr, | 57 | unsigned long addr, |
@@ -90,7 +91,80 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, | |||
90 | return result; | 91 | return result; |
91 | } | 92 | } |
92 | 93 | ||
93 | static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) | 94 | static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) |
95 | { | ||
96 | ioapic->rtc_status.pending_eoi = 0; | ||
97 | bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS); | ||
98 | } | ||
99 | |||
100 | static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) | ||
101 | { | ||
102 | bool new_val, old_val; | ||
103 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; | ||
104 | union kvm_ioapic_redirect_entry *e; | ||
105 | |||
106 | e = &ioapic->redirtbl[RTC_GSI]; | ||
107 | if (!kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id, | ||
108 | e->fields.dest_mode)) | ||
109 | return; | ||
110 | |||
111 | new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector); | ||
112 | old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); | ||
113 | |||
114 | if (new_val == old_val) | ||
115 | return; | ||
116 | |||
117 | if (new_val) { | ||
118 | __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); | ||
119 | ioapic->rtc_status.pending_eoi++; | ||
120 | } else { | ||
121 | __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); | ||
122 | ioapic->rtc_status.pending_eoi--; | ||
123 | } | ||
124 | |||
125 | WARN_ON(ioapic->rtc_status.pending_eoi < 0); | ||
126 | } | ||
127 | |||
128 | void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) | ||
129 | { | ||
130 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; | ||
131 | |||
132 | spin_lock(&ioapic->lock); | ||
133 | __rtc_irq_eoi_tracking_restore_one(vcpu); | ||
134 | spin_unlock(&ioapic->lock); | ||
135 | } | ||
136 | |||
137 | static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic) | ||
138 | { | ||
139 | struct kvm_vcpu *vcpu; | ||
140 | int i; | ||
141 | |||
142 | if (RTC_GSI >= IOAPIC_NUM_PINS) | ||
143 | return; | ||
144 | |||
145 | rtc_irq_eoi_tracking_reset(ioapic); | ||
146 | kvm_for_each_vcpu(i, vcpu, ioapic->kvm) | ||
147 | __rtc_irq_eoi_tracking_restore_one(vcpu); | ||
148 | } | ||
149 | |||
150 | static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu) | ||
151 | { | ||
152 | if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) | ||
153 | --ioapic->rtc_status.pending_eoi; | ||
154 | |||
155 | WARN_ON(ioapic->rtc_status.pending_eoi < 0); | ||
156 | } | ||
157 | |||
158 | static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic) | ||
159 | { | ||
160 | if (ioapic->rtc_status.pending_eoi > 0) | ||
161 | return true; /* coalesced */ | ||
162 | |||
163 | return false; | ||
164 | } | ||
165 | |||
166 | static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx, | ||
167 | bool line_status) | ||
94 | { | 168 | { |
95 | union kvm_ioapic_redirect_entry *pent; | 169 | union kvm_ioapic_redirect_entry *pent; |
96 | int injected = -1; | 170 | int injected = -1; |
@@ -98,7 +172,7 @@ static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) | |||
98 | pent = &ioapic->redirtbl[idx]; | 172 | pent = &ioapic->redirtbl[idx]; |
99 | 173 | ||
100 | if (!pent->fields.mask) { | 174 | if (!pent->fields.mask) { |
101 | injected = ioapic_deliver(ioapic, idx); | 175 | injected = ioapic_deliver(ioapic, idx, line_status); |
102 | if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) | 176 | if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) |
103 | pent->fields.remote_irr = 1; | 177 | pent->fields.remote_irr = 1; |
104 | } | 178 | } |
@@ -119,41 +193,48 @@ static void update_handled_vectors(struct kvm_ioapic *ioapic) | |||
119 | smp_wmb(); | 193 | smp_wmb(); |
120 | } | 194 | } |
121 | 195 | ||
122 | void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu, | 196 | void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap, |
123 | u64 *eoi_exit_bitmap) | 197 | u32 *tmr) |
124 | { | 198 | { |
125 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; | 199 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; |
126 | union kvm_ioapic_redirect_entry *e; | 200 | union kvm_ioapic_redirect_entry *e; |
127 | struct kvm_lapic_irq irqe; | ||
128 | int index; | 201 | int index; |
129 | 202 | ||
130 | spin_lock(&ioapic->lock); | 203 | spin_lock(&ioapic->lock); |
131 | /* traverse ioapic entry to set eoi exit bitmap*/ | ||
132 | for (index = 0; index < IOAPIC_NUM_PINS; index++) { | 204 | for (index = 0; index < IOAPIC_NUM_PINS; index++) { |
133 | e = &ioapic->redirtbl[index]; | 205 | e = &ioapic->redirtbl[index]; |
134 | if (!e->fields.mask && | 206 | if (!e->fields.mask && |
135 | (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || | 207 | (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || |
136 | kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, | 208 | kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, |
137 | index))) { | 209 | index) || index == RTC_GSI)) { |
138 | irqe.dest_id = e->fields.dest_id; | 210 | if (kvm_apic_match_dest(vcpu, NULL, 0, |
139 | irqe.vector = e->fields.vector; | 211 | e->fields.dest_id, e->fields.dest_mode)) { |
140 | irqe.dest_mode = e->fields.dest_mode; | 212 | __set_bit(e->fields.vector, |
141 | irqe.delivery_mode = e->fields.delivery_mode << 8; | 213 | (unsigned long *)eoi_exit_bitmap); |
142 | kvm_calculate_eoi_exitmap(vcpu, &irqe, eoi_exit_bitmap); | 214 | if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG) |
215 | __set_bit(e->fields.vector, | ||
216 | (unsigned long *)tmr); | ||
217 | } | ||
143 | } | 218 | } |
144 | } | 219 | } |
145 | spin_unlock(&ioapic->lock); | 220 | spin_unlock(&ioapic->lock); |
146 | } | 221 | } |
147 | EXPORT_SYMBOL_GPL(kvm_ioapic_calculate_eoi_exitmap); | ||
148 | 222 | ||
149 | void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm) | 223 | #ifdef CONFIG_X86 |
224 | void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) | ||
150 | { | 225 | { |
151 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | 226 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; |
152 | 227 | ||
153 | if (!kvm_apic_vid_enabled(kvm) || !ioapic) | 228 | if (!ioapic) |
154 | return; | 229 | return; |
155 | kvm_make_update_eoibitmap_request(kvm); | 230 | kvm_make_scan_ioapic_request(kvm); |
156 | } | 231 | } |
232 | #else | ||
233 | void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) | ||
234 | { | ||
235 | return; | ||
236 | } | ||
237 | #endif | ||
157 | 238 | ||
158 | static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) | 239 | static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) |
159 | { | 240 | { |
@@ -195,16 +276,17 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) | |||
195 | kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); | 276 | kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); |
196 | if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG | 277 | if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG |
197 | && ioapic->irr & (1 << index)) | 278 | && ioapic->irr & (1 << index)) |
198 | ioapic_service(ioapic, index); | 279 | ioapic_service(ioapic, index, false); |
199 | kvm_ioapic_make_eoibitmap_request(ioapic->kvm); | 280 | kvm_vcpu_request_scan_ioapic(ioapic->kvm); |
200 | break; | 281 | break; |
201 | } | 282 | } |
202 | } | 283 | } |
203 | 284 | ||
204 | static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | 285 | static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status) |
205 | { | 286 | { |
206 | union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; | 287 | union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; |
207 | struct kvm_lapic_irq irqe; | 288 | struct kvm_lapic_irq irqe; |
289 | int ret; | ||
208 | 290 | ||
209 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " | 291 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " |
210 | "vector=%x trig_mode=%x\n", | 292 | "vector=%x trig_mode=%x\n", |
@@ -220,11 +302,19 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | |||
220 | irqe.level = 1; | 302 | irqe.level = 1; |
221 | irqe.shorthand = 0; | 303 | irqe.shorthand = 0; |
222 | 304 | ||
223 | return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); | 305 | if (irq == RTC_GSI && line_status) { |
306 | BUG_ON(ioapic->rtc_status.pending_eoi != 0); | ||
307 | ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, | ||
308 | ioapic->rtc_status.dest_map); | ||
309 | ioapic->rtc_status.pending_eoi = ret; | ||
310 | } else | ||
311 | ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); | ||
312 | |||
313 | return ret; | ||
224 | } | 314 | } |
225 | 315 | ||
226 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, | 316 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, |
227 | int level) | 317 | int level, bool line_status) |
228 | { | 318 | { |
229 | u32 old_irr; | 319 | u32 old_irr; |
230 | u32 mask = 1 << irq; | 320 | u32 mask = 1 << irq; |
@@ -244,13 +334,20 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, | |||
244 | ret = 1; | 334 | ret = 1; |
245 | } else { | 335 | } else { |
246 | int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); | 336 | int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); |
337 | |||
338 | if (irq == RTC_GSI && line_status && | ||
339 | rtc_irq_check_coalesced(ioapic)) { | ||
340 | ret = 0; /* coalesced */ | ||
341 | goto out; | ||
342 | } | ||
247 | ioapic->irr |= mask; | 343 | ioapic->irr |= mask; |
248 | if ((edge && old_irr != ioapic->irr) || | 344 | if ((edge && old_irr != ioapic->irr) || |
249 | (!edge && !entry.fields.remote_irr)) | 345 | (!edge && !entry.fields.remote_irr)) |
250 | ret = ioapic_service(ioapic, irq); | 346 | ret = ioapic_service(ioapic, irq, line_status); |
251 | else | 347 | else |
252 | ret = 0; /* report coalesced interrupt */ | 348 | ret = 0; /* report coalesced interrupt */ |
253 | } | 349 | } |
350 | out: | ||
254 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); | 351 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); |
255 | spin_unlock(&ioapic->lock); | 352 | spin_unlock(&ioapic->lock); |
256 | 353 | ||
@@ -267,8 +364,8 @@ void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id) | |||
267 | spin_unlock(&ioapic->lock); | 364 | spin_unlock(&ioapic->lock); |
268 | } | 365 | } |
269 | 366 | ||
270 | static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, | 367 | static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, |
271 | int trigger_mode) | 368 | struct kvm_ioapic *ioapic, int vector, int trigger_mode) |
272 | { | 369 | { |
273 | int i; | 370 | int i; |
274 | 371 | ||
@@ -278,6 +375,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, | |||
278 | if (ent->fields.vector != vector) | 375 | if (ent->fields.vector != vector) |
279 | continue; | 376 | continue; |
280 | 377 | ||
378 | if (i == RTC_GSI) | ||
379 | rtc_irq_eoi(ioapic, vcpu); | ||
281 | /* | 380 | /* |
282 | * We are dropping lock while calling ack notifiers because ack | 381 | * We are dropping lock while calling ack notifiers because ack |
283 | * notifier callbacks for assigned devices call into IOAPIC | 382 | * notifier callbacks for assigned devices call into IOAPIC |
@@ -296,7 +395,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, | |||
296 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); | 395 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); |
297 | ent->fields.remote_irr = 0; | 396 | ent->fields.remote_irr = 0; |
298 | if (!ent->fields.mask && (ioapic->irr & (1 << i))) | 397 | if (!ent->fields.mask && (ioapic->irr & (1 << i))) |
299 | ioapic_service(ioapic, i); | 398 | ioapic_service(ioapic, i, false); |
300 | } | 399 | } |
301 | } | 400 | } |
302 | 401 | ||
@@ -307,12 +406,12 @@ bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector) | |||
307 | return test_bit(vector, ioapic->handled_vectors); | 406 | return test_bit(vector, ioapic->handled_vectors); |
308 | } | 407 | } |
309 | 408 | ||
310 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) | 409 | void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode) |
311 | { | 410 | { |
312 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | 411 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; |
313 | 412 | ||
314 | spin_lock(&ioapic->lock); | 413 | spin_lock(&ioapic->lock); |
315 | __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); | 414 | __kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode); |
316 | spin_unlock(&ioapic->lock); | 415 | spin_unlock(&ioapic->lock); |
317 | } | 416 | } |
318 | 417 | ||
@@ -410,7 +509,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | |||
410 | break; | 509 | break; |
411 | #ifdef CONFIG_IA64 | 510 | #ifdef CONFIG_IA64 |
412 | case IOAPIC_REG_EOI: | 511 | case IOAPIC_REG_EOI: |
413 | __kvm_ioapic_update_eoi(ioapic, data, IOAPIC_LEVEL_TRIG); | 512 | __kvm_ioapic_update_eoi(NULL, ioapic, data, IOAPIC_LEVEL_TRIG); |
414 | break; | 513 | break; |
415 | #endif | 514 | #endif |
416 | 515 | ||
@@ -431,6 +530,7 @@ void kvm_ioapic_reset(struct kvm_ioapic *ioapic) | |||
431 | ioapic->ioregsel = 0; | 530 | ioapic->ioregsel = 0; |
432 | ioapic->irr = 0; | 531 | ioapic->irr = 0; |
433 | ioapic->id = 0; | 532 | ioapic->id = 0; |
533 | rtc_irq_eoi_tracking_reset(ioapic); | ||
434 | update_handled_vectors(ioapic); | 534 | update_handled_vectors(ioapic); |
435 | } | 535 | } |
436 | 536 | ||
@@ -496,7 +596,8 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | |||
496 | spin_lock(&ioapic->lock); | 596 | spin_lock(&ioapic->lock); |
497 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); | 597 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); |
498 | update_handled_vectors(ioapic); | 598 | update_handled_vectors(ioapic); |
499 | kvm_ioapic_make_eoibitmap_request(kvm); | 599 | kvm_vcpu_request_scan_ioapic(kvm); |
600 | kvm_rtc_eoi_tracking_restore_all(ioapic); | ||
500 | spin_unlock(&ioapic->lock); | 601 | spin_unlock(&ioapic->lock); |
501 | return 0; | 602 | return 0; |
502 | } | 603 | } |
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index 0400a466c50c..615d8c995c3c 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h | |||
@@ -34,6 +34,17 @@ struct kvm_vcpu; | |||
34 | #define IOAPIC_INIT 0x5 | 34 | #define IOAPIC_INIT 0x5 |
35 | #define IOAPIC_EXTINT 0x7 | 35 | #define IOAPIC_EXTINT 0x7 |
36 | 36 | ||
37 | #ifdef CONFIG_X86 | ||
38 | #define RTC_GSI 8 | ||
39 | #else | ||
40 | #define RTC_GSI -1U | ||
41 | #endif | ||
42 | |||
43 | struct rtc_status { | ||
44 | int pending_eoi; | ||
45 | DECLARE_BITMAP(dest_map, KVM_MAX_VCPUS); | ||
46 | }; | ||
47 | |||
37 | struct kvm_ioapic { | 48 | struct kvm_ioapic { |
38 | u64 base_address; | 49 | u64 base_address; |
39 | u32 ioregsel; | 50 | u32 ioregsel; |
@@ -47,6 +58,7 @@ struct kvm_ioapic { | |||
47 | void (*ack_notifier)(void *opaque, int irq); | 58 | void (*ack_notifier)(void *opaque, int irq); |
48 | spinlock_t lock; | 59 | spinlock_t lock; |
49 | DECLARE_BITMAP(handled_vectors, 256); | 60 | DECLARE_BITMAP(handled_vectors, 256); |
61 | struct rtc_status rtc_status; | ||
50 | }; | 62 | }; |
51 | 63 | ||
52 | #ifdef DEBUG | 64 | #ifdef DEBUG |
@@ -67,24 +79,25 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm) | |||
67 | return kvm->arch.vioapic; | 79 | return kvm->arch.vioapic; |
68 | } | 80 | } |
69 | 81 | ||
82 | void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu); | ||
70 | int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, | 83 | int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, |
71 | int short_hand, int dest, int dest_mode); | 84 | int short_hand, int dest, int dest_mode); |
72 | int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); | 85 | int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); |
73 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); | 86 | void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, |
87 | int trigger_mode); | ||
74 | bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector); | 88 | bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector); |
75 | int kvm_ioapic_init(struct kvm *kvm); | 89 | int kvm_ioapic_init(struct kvm *kvm); |
76 | void kvm_ioapic_destroy(struct kvm *kvm); | 90 | void kvm_ioapic_destroy(struct kvm *kvm); |
77 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, | 91 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, |
78 | int level); | 92 | int level, bool line_status); |
79 | void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id); | 93 | void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id); |
80 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); | 94 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); |
81 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | 95 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, |
82 | struct kvm_lapic_irq *irq); | 96 | struct kvm_lapic_irq *irq, unsigned long *dest_map); |
83 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); | 97 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); |
84 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); | 98 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); |
85 | void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm); | 99 | void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); |
86 | void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu, | 100 | void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap, |
87 | u64 *eoi_exit_bitmap); | 101 | u32 *tmr); |
88 | |||
89 | 102 | ||
90 | #endif | 103 | #endif |
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index e9073cf4d040..25ab48007adb 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
@@ -35,7 +35,8 @@ | |||
35 | #include "ioapic.h" | 35 | #include "ioapic.h" |
36 | 36 | ||
37 | static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, | 37 | static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, |
38 | struct kvm *kvm, int irq_source_id, int level) | 38 | struct kvm *kvm, int irq_source_id, int level, |
39 | bool line_status) | ||
39 | { | 40 | { |
40 | #ifdef CONFIG_X86 | 41 | #ifdef CONFIG_X86 |
41 | struct kvm_pic *pic = pic_irqchip(kvm); | 42 | struct kvm_pic *pic = pic_irqchip(kvm); |
@@ -46,10 +47,12 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, | |||
46 | } | 47 | } |
47 | 48 | ||
48 | static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, | 49 | static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, |
49 | struct kvm *kvm, int irq_source_id, int level) | 50 | struct kvm *kvm, int irq_source_id, int level, |
51 | bool line_status) | ||
50 | { | 52 | { |
51 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | 53 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; |
52 | return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level); | 54 | return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, |
55 | line_status); | ||
53 | } | 56 | } |
54 | 57 | ||
55 | inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq) | 58 | inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq) |
@@ -63,7 +66,7 @@ inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq) | |||
63 | } | 66 | } |
64 | 67 | ||
65 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | 68 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, |
66 | struct kvm_lapic_irq *irq) | 69 | struct kvm_lapic_irq *irq, unsigned long *dest_map) |
67 | { | 70 | { |
68 | int i, r = -1; | 71 | int i, r = -1; |
69 | struct kvm_vcpu *vcpu, *lowest = NULL; | 72 | struct kvm_vcpu *vcpu, *lowest = NULL; |
@@ -74,7 +77,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | |||
74 | irq->delivery_mode = APIC_DM_FIXED; | 77 | irq->delivery_mode = APIC_DM_FIXED; |
75 | } | 78 | } |
76 | 79 | ||
77 | if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r)) | 80 | if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map)) |
78 | return r; | 81 | return r; |
79 | 82 | ||
80 | kvm_for_each_vcpu(i, vcpu, kvm) { | 83 | kvm_for_each_vcpu(i, vcpu, kvm) { |
@@ -88,7 +91,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | |||
88 | if (!kvm_is_dm_lowest_prio(irq)) { | 91 | if (!kvm_is_dm_lowest_prio(irq)) { |
89 | if (r < 0) | 92 | if (r < 0) |
90 | r = 0; | 93 | r = 0; |
91 | r += kvm_apic_set_irq(vcpu, irq); | 94 | r += kvm_apic_set_irq(vcpu, irq, dest_map); |
92 | } else if (kvm_lapic_enabled(vcpu)) { | 95 | } else if (kvm_lapic_enabled(vcpu)) { |
93 | if (!lowest) | 96 | if (!lowest) |
94 | lowest = vcpu; | 97 | lowest = vcpu; |
@@ -98,7 +101,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | |||
98 | } | 101 | } |
99 | 102 | ||
100 | if (lowest) | 103 | if (lowest) |
101 | r = kvm_apic_set_irq(lowest, irq); | 104 | r = kvm_apic_set_irq(lowest, irq, dest_map); |
102 | 105 | ||
103 | return r; | 106 | return r; |
104 | } | 107 | } |
@@ -121,7 +124,7 @@ static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e, | |||
121 | } | 124 | } |
122 | 125 | ||
123 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, | 126 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, |
124 | struct kvm *kvm, int irq_source_id, int level) | 127 | struct kvm *kvm, int irq_source_id, int level, bool line_status) |
125 | { | 128 | { |
126 | struct kvm_lapic_irq irq; | 129 | struct kvm_lapic_irq irq; |
127 | 130 | ||
@@ -130,7 +133,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, | |||
130 | 133 | ||
131 | kvm_set_msi_irq(e, &irq); | 134 | kvm_set_msi_irq(e, &irq); |
132 | 135 | ||
133 | return kvm_irq_delivery_to_apic(kvm, NULL, &irq); | 136 | return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL); |
134 | } | 137 | } |
135 | 138 | ||
136 | 139 | ||
@@ -142,7 +145,7 @@ static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e, | |||
142 | 145 | ||
143 | kvm_set_msi_irq(e, &irq); | 146 | kvm_set_msi_irq(e, &irq); |
144 | 147 | ||
145 | if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r)) | 148 | if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) |
146 | return r; | 149 | return r; |
147 | else | 150 | else |
148 | return -EWOULDBLOCK; | 151 | return -EWOULDBLOCK; |
@@ -159,7 +162,7 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi) | |||
159 | route.msi.address_hi = msi->address_hi; | 162 | route.msi.address_hi = msi->address_hi; |
160 | route.msi.data = msi->data; | 163 | route.msi.data = msi->data; |
161 | 164 | ||
162 | return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1); | 165 | return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false); |
163 | } | 166 | } |
164 | 167 | ||
165 | /* | 168 | /* |
@@ -168,7 +171,8 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi) | |||
168 | * = 0 Interrupt was coalesced (previous irq is still pending) | 171 | * = 0 Interrupt was coalesced (previous irq is still pending) |
169 | * > 0 Number of CPUs interrupt was delivered to | 172 | * > 0 Number of CPUs interrupt was delivered to |
170 | */ | 173 | */ |
171 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level) | 174 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, |
175 | bool line_status) | ||
172 | { | 176 | { |
173 | struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS]; | 177 | struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS]; |
174 | int ret = -1, i = 0; | 178 | int ret = -1, i = 0; |
@@ -189,7 +193,8 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level) | |||
189 | 193 | ||
190 | while(i--) { | 194 | while(i--) { |
191 | int r; | 195 | int r; |
192 | r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level); | 196 | r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level, |
197 | line_status); | ||
193 | if (r < 0) | 198 | if (r < 0) |
194 | continue; | 199 | continue; |
195 | 200 | ||
@@ -280,7 +285,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm, | |||
280 | mutex_lock(&kvm->irq_lock); | 285 | mutex_lock(&kvm->irq_lock); |
281 | hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); | 286 | hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); |
282 | mutex_unlock(&kvm->irq_lock); | 287 | mutex_unlock(&kvm->irq_lock); |
283 | kvm_ioapic_make_eoibitmap_request(kvm); | 288 | kvm_vcpu_request_scan_ioapic(kvm); |
284 | } | 289 | } |
285 | 290 | ||
286 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, | 291 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, |
@@ -290,7 +295,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm, | |||
290 | hlist_del_init_rcu(&kian->link); | 295 | hlist_del_init_rcu(&kian->link); |
291 | mutex_unlock(&kvm->irq_lock); | 296 | mutex_unlock(&kvm->irq_lock); |
292 | synchronize_rcu(); | 297 | synchronize_rcu(); |
293 | kvm_ioapic_make_eoibitmap_request(kvm); | 298 | kvm_vcpu_request_scan_ioapic(kvm); |
294 | } | 299 | } |
295 | 300 | ||
296 | int kvm_request_irq_source_id(struct kvm *kvm) | 301 | int kvm_request_irq_source_id(struct kvm *kvm) |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ff7154188b5f..aaac1a7a9ea8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -217,9 +217,9 @@ void kvm_make_mclock_inprogress_request(struct kvm *kvm) | |||
217 | make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); | 217 | make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); |
218 | } | 218 | } |
219 | 219 | ||
220 | void kvm_make_update_eoibitmap_request(struct kvm *kvm) | 220 | void kvm_make_scan_ioapic_request(struct kvm *kvm) |
221 | { | 221 | { |
222 | make_all_cpus_request(kvm, KVM_REQ_EOIBITMAP); | 222 | make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); |
223 | } | 223 | } |
224 | 224 | ||
225 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) | 225 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) |
@@ -1078,7 +1078,7 @@ static int kvm_read_hva_atomic(void *data, void __user *hva, int len) | |||
1078 | return __copy_from_user_inatomic(data, hva, len); | 1078 | return __copy_from_user_inatomic(data, hva, len); |
1079 | } | 1079 | } |
1080 | 1080 | ||
1081 | int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, | 1081 | static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, |
1082 | unsigned long start, int write, struct page **page) | 1082 | unsigned long start, int write, struct page **page) |
1083 | { | 1083 | { |
1084 | int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; | 1084 | int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; |
@@ -1671,6 +1671,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | |||
1671 | smp_send_reschedule(cpu); | 1671 | smp_send_reschedule(cpu); |
1672 | put_cpu(); | 1672 | put_cpu(); |
1673 | } | 1673 | } |
1674 | EXPORT_SYMBOL_GPL(kvm_vcpu_kick); | ||
1674 | #endif /* !CONFIG_S390 */ | 1675 | #endif /* !CONFIG_S390 */ |
1675 | 1676 | ||
1676 | void kvm_resched(struct kvm_vcpu *vcpu) | 1677 | void kvm_resched(struct kvm_vcpu *vcpu) |
@@ -2258,7 +2259,8 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2258 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) | 2259 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) |
2259 | goto out; | 2260 | goto out; |
2260 | 2261 | ||
2261 | r = kvm_vm_ioctl_irq_line(kvm, &irq_event); | 2262 | r = kvm_vm_ioctl_irq_line(kvm, &irq_event, |
2263 | ioctl == KVM_IRQ_LINE_STATUS); | ||
2262 | if (r) | 2264 | if (r) |
2263 | goto out; | 2265 | goto out; |
2264 | 2266 | ||
@@ -2572,14 +2574,6 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | |||
2572 | return NOTIFY_OK; | 2574 | return NOTIFY_OK; |
2573 | } | 2575 | } |
2574 | 2576 | ||
2575 | |||
2576 | asmlinkage void kvm_spurious_fault(void) | ||
2577 | { | ||
2578 | /* Fault while not rebooting. We want the trace. */ | ||
2579 | BUG(); | ||
2580 | } | ||
2581 | EXPORT_SYMBOL_GPL(kvm_spurious_fault); | ||
2582 | |||
2583 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, | 2577 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, |
2584 | void *v) | 2578 | void *v) |
2585 | { | 2579 | { |
@@ -2612,7 +2606,7 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus) | |||
2612 | kfree(bus); | 2606 | kfree(bus); |
2613 | } | 2607 | } |
2614 | 2608 | ||
2615 | int kvm_io_bus_sort_cmp(const void *p1, const void *p2) | 2609 | static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) |
2616 | { | 2610 | { |
2617 | const struct kvm_io_range *r1 = p1; | 2611 | const struct kvm_io_range *r1 = p1; |
2618 | const struct kvm_io_range *r2 = p2; | 2612 | const struct kvm_io_range *r2 = p2; |
@@ -2624,7 +2618,7 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2) | |||
2624 | return 0; | 2618 | return 0; |
2625 | } | 2619 | } |
2626 | 2620 | ||
2627 | int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, | 2621 | static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, |
2628 | gpa_t addr, int len) | 2622 | gpa_t addr, int len) |
2629 | { | 2623 | { |
2630 | bus->range[bus->dev_count++] = (struct kvm_io_range) { | 2624 | bus->range[bus->dev_count++] = (struct kvm_io_range) { |
@@ -2639,7 +2633,7 @@ int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, | |||
2639 | return 0; | 2633 | return 0; |
2640 | } | 2634 | } |
2641 | 2635 | ||
2642 | int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, | 2636 | static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, |
2643 | gpa_t addr, int len) | 2637 | gpa_t addr, int len) |
2644 | { | 2638 | { |
2645 | struct kvm_io_range *range, key; | 2639 | struct kvm_io_range *range, key; |