diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2010-05-19 13:12:41 -0400 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2010-05-19 13:12:41 -0400 |
commit | 8d0bc2b456103a34c11e01305cd1aed1cde579e5 (patch) | |
tree | 5e1e6ad55cc9e2b5c5617f6f320114b8cff9e3f3 /virt | |
parent | 30ba3ead05763b172acaa65ae1be71af2a878940 (diff) | |
parent | e40152ee1e1c7a63f4777791863215e3faa37a86 (diff) |
Merge commit 'v2.6.34' into next
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/ioapic.c | 30 | ||||
-rw-r--r-- | virt/kvm/ioapic.h | 2 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 17 |
3 files changed, 28 insertions, 21 deletions
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 03a5eb22da2b..7c79c1d76d0c 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -197,7 +197,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | |||
197 | union kvm_ioapic_redirect_entry entry; | 197 | union kvm_ioapic_redirect_entry entry; |
198 | int ret = 1; | 198 | int ret = 1; |
199 | 199 | ||
200 | mutex_lock(&ioapic->lock); | 200 | spin_lock(&ioapic->lock); |
201 | if (irq >= 0 && irq < IOAPIC_NUM_PINS) { | 201 | if (irq >= 0 && irq < IOAPIC_NUM_PINS) { |
202 | entry = ioapic->redirtbl[irq]; | 202 | entry = ioapic->redirtbl[irq]; |
203 | level ^= entry.fields.polarity; | 203 | level ^= entry.fields.polarity; |
@@ -214,7 +214,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | |||
214 | } | 214 | } |
215 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); | 215 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); |
216 | } | 216 | } |
217 | mutex_unlock(&ioapic->lock); | 217 | spin_unlock(&ioapic->lock); |
218 | 218 | ||
219 | return ret; | 219 | return ret; |
220 | } | 220 | } |
@@ -238,9 +238,9 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, | |||
238 | * is dropped it will be put into irr and will be delivered | 238 | * is dropped it will be put into irr and will be delivered |
239 | * after ack notifier returns. | 239 | * after ack notifier returns. |
240 | */ | 240 | */ |
241 | mutex_unlock(&ioapic->lock); | 241 | spin_unlock(&ioapic->lock); |
242 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); | 242 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); |
243 | mutex_lock(&ioapic->lock); | 243 | spin_lock(&ioapic->lock); |
244 | 244 | ||
245 | if (trigger_mode != IOAPIC_LEVEL_TRIG) | 245 | if (trigger_mode != IOAPIC_LEVEL_TRIG) |
246 | continue; | 246 | continue; |
@@ -259,9 +259,9 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) | |||
259 | smp_rmb(); | 259 | smp_rmb(); |
260 | if (!test_bit(vector, ioapic->handled_vectors)) | 260 | if (!test_bit(vector, ioapic->handled_vectors)) |
261 | return; | 261 | return; |
262 | mutex_lock(&ioapic->lock); | 262 | spin_lock(&ioapic->lock); |
263 | __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); | 263 | __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); |
264 | mutex_unlock(&ioapic->lock); | 264 | spin_unlock(&ioapic->lock); |
265 | } | 265 | } |
266 | 266 | ||
267 | static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) | 267 | static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) |
@@ -287,7 +287,7 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | |||
287 | ASSERT(!(addr & 0xf)); /* check alignment */ | 287 | ASSERT(!(addr & 0xf)); /* check alignment */ |
288 | 288 | ||
289 | addr &= 0xff; | 289 | addr &= 0xff; |
290 | mutex_lock(&ioapic->lock); | 290 | spin_lock(&ioapic->lock); |
291 | switch (addr) { | 291 | switch (addr) { |
292 | case IOAPIC_REG_SELECT: | 292 | case IOAPIC_REG_SELECT: |
293 | result = ioapic->ioregsel; | 293 | result = ioapic->ioregsel; |
@@ -301,7 +301,7 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | |||
301 | result = 0; | 301 | result = 0; |
302 | break; | 302 | break; |
303 | } | 303 | } |
304 | mutex_unlock(&ioapic->lock); | 304 | spin_unlock(&ioapic->lock); |
305 | 305 | ||
306 | switch (len) { | 306 | switch (len) { |
307 | case 8: | 307 | case 8: |
@@ -338,7 +338,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | |||
338 | } | 338 | } |
339 | 339 | ||
340 | addr &= 0xff; | 340 | addr &= 0xff; |
341 | mutex_lock(&ioapic->lock); | 341 | spin_lock(&ioapic->lock); |
342 | switch (addr) { | 342 | switch (addr) { |
343 | case IOAPIC_REG_SELECT: | 343 | case IOAPIC_REG_SELECT: |
344 | ioapic->ioregsel = data; | 344 | ioapic->ioregsel = data; |
@@ -356,7 +356,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | |||
356 | default: | 356 | default: |
357 | break; | 357 | break; |
358 | } | 358 | } |
359 | mutex_unlock(&ioapic->lock); | 359 | spin_unlock(&ioapic->lock); |
360 | return 0; | 360 | return 0; |
361 | } | 361 | } |
362 | 362 | ||
@@ -386,7 +386,7 @@ int kvm_ioapic_init(struct kvm *kvm) | |||
386 | ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); | 386 | ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); |
387 | if (!ioapic) | 387 | if (!ioapic) |
388 | return -ENOMEM; | 388 | return -ENOMEM; |
389 | mutex_init(&ioapic->lock); | 389 | spin_lock_init(&ioapic->lock); |
390 | kvm->arch.vioapic = ioapic; | 390 | kvm->arch.vioapic = ioapic; |
391 | kvm_ioapic_reset(ioapic); | 391 | kvm_ioapic_reset(ioapic); |
392 | kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); | 392 | kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); |
@@ -419,9 +419,9 @@ int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | |||
419 | if (!ioapic) | 419 | if (!ioapic) |
420 | return -EINVAL; | 420 | return -EINVAL; |
421 | 421 | ||
422 | mutex_lock(&ioapic->lock); | 422 | spin_lock(&ioapic->lock); |
423 | memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); | 423 | memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); |
424 | mutex_unlock(&ioapic->lock); | 424 | spin_unlock(&ioapic->lock); |
425 | return 0; | 425 | return 0; |
426 | } | 426 | } |
427 | 427 | ||
@@ -431,9 +431,9 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | |||
431 | if (!ioapic) | 431 | if (!ioapic) |
432 | return -EINVAL; | 432 | return -EINVAL; |
433 | 433 | ||
434 | mutex_lock(&ioapic->lock); | 434 | spin_lock(&ioapic->lock); |
435 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); | 435 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); |
436 | update_handled_vectors(ioapic); | 436 | update_handled_vectors(ioapic); |
437 | mutex_unlock(&ioapic->lock); | 437 | spin_unlock(&ioapic->lock); |
438 | return 0; | 438 | return 0; |
439 | } | 439 | } |
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index 8a751b78a430..0b190c34ccc3 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h | |||
@@ -45,7 +45,7 @@ struct kvm_ioapic { | |||
45 | struct kvm_io_device dev; | 45 | struct kvm_io_device dev; |
46 | struct kvm *kvm; | 46 | struct kvm *kvm; |
47 | void (*ack_notifier)(void *opaque, int irq); | 47 | void (*ack_notifier)(void *opaque, int irq); |
48 | struct mutex lock; | 48 | spinlock_t lock; |
49 | DECLARE_BITMAP(handled_vectors, 256); | 49 | DECLARE_BITMAP(handled_vectors, 256); |
50 | }; | 50 | }; |
51 | 51 | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5a0cd194dce0..c82ae2492634 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -341,7 +341,11 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, | |||
341 | struct mm_struct *mm) | 341 | struct mm_struct *mm) |
342 | { | 342 | { |
343 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | 343 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
344 | int idx; | ||
345 | |||
346 | idx = srcu_read_lock(&kvm->srcu); | ||
344 | kvm_arch_flush_shadow(kvm); | 347 | kvm_arch_flush_shadow(kvm); |
348 | srcu_read_unlock(&kvm->srcu, idx); | ||
345 | } | 349 | } |
346 | 350 | ||
347 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { | 351 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { |
@@ -648,7 +652,7 @@ skip_lpage: | |||
648 | 652 | ||
649 | /* Allocate page dirty bitmap if needed */ | 653 | /* Allocate page dirty bitmap if needed */ |
650 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { | 654 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { |
651 | unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; | 655 | unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); |
652 | 656 | ||
653 | new.dirty_bitmap = vmalloc(dirty_bytes); | 657 | new.dirty_bitmap = vmalloc(dirty_bytes); |
654 | if (!new.dirty_bitmap) | 658 | if (!new.dirty_bitmap) |
@@ -768,7 +772,7 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
768 | { | 772 | { |
769 | struct kvm_memory_slot *memslot; | 773 | struct kvm_memory_slot *memslot; |
770 | int r, i; | 774 | int r, i; |
771 | int n; | 775 | unsigned long n; |
772 | unsigned long any = 0; | 776 | unsigned long any = 0; |
773 | 777 | ||
774 | r = -EINVAL; | 778 | r = -EINVAL; |
@@ -780,7 +784,7 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
780 | if (!memslot->dirty_bitmap) | 784 | if (!memslot->dirty_bitmap) |
781 | goto out; | 785 | goto out; |
782 | 786 | ||
783 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 787 | n = kvm_dirty_bitmap_bytes(memslot); |
784 | 788 | ||
785 | for (i = 0; !any && i < n/sizeof(long); ++i) | 789 | for (i = 0; !any && i < n/sizeof(long); ++i) |
786 | any = memslot->dirty_bitmap[i]; | 790 | any = memslot->dirty_bitmap[i]; |
@@ -1186,10 +1190,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | |||
1186 | memslot = gfn_to_memslot_unaliased(kvm, gfn); | 1190 | memslot = gfn_to_memslot_unaliased(kvm, gfn); |
1187 | if (memslot && memslot->dirty_bitmap) { | 1191 | if (memslot && memslot->dirty_bitmap) { |
1188 | unsigned long rel_gfn = gfn - memslot->base_gfn; | 1192 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
1193 | unsigned long *p = memslot->dirty_bitmap + | ||
1194 | rel_gfn / BITS_PER_LONG; | ||
1195 | int offset = rel_gfn % BITS_PER_LONG; | ||
1189 | 1196 | ||
1190 | /* avoid RMW */ | 1197 | /* avoid RMW */ |
1191 | if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) | 1198 | if (!generic_test_le_bit(offset, p)) |
1192 | generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); | 1199 | generic___set_le_bit(offset, p); |
1193 | } | 1200 | } |
1194 | } | 1201 | } |
1195 | 1202 | ||