aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-05-18 19:35:51 -0400
committerDave Airlie <airlied@redhat.com>2010-05-18 19:35:51 -0400
commit05ea893c46805b2981ea8ba6df881e3d65edd63b (patch)
treeea381e22d99f49bd2c95238f88491d48b797a17b /virt/kvm/kvm_main.c
parent26481fb15644b5fd85d4cea020f74a234cdf6803 (diff)
parenta7c542782e92f9487c62a571565637be3d6b0ffd (diff)
Merge remote branch 'anholt/drm-intel-next' into drm-next
* anholt/drm-intel-next: (515 commits) drm/i915: Fix out of tree builds drm/i915: move fence lru to struct drm_i915_fence_reg drm/i915: don't allow tiling changes on pinned buffers v2 drm/i915: Be extra careful about A/D matching for multifunction SDVO drm/i915: Fix DDC bus selection for multifunction SDVO drm/i915: cleanup mode setting before unmapping registers drm/i915: Make fbc control wrapper functions drm/i915: Wait for the GPU whilst shrinking, if truly desperate. drm/i915: Use spatio-temporal dithering on PCH [MTD] Remove zero-length files mtdbdi.c and internal.ho pata_pcmcia / ide-cs: Fix bad hashes for Transcend and kingston IDs libata: Fix several inaccuracies in developer's guide slub: Fix bad boundary check in init_kmem_cache_nodes() raid6: fix recovery performance regression KEYS: call_sbin_request_key() must write lock keyrings before modifying them KEYS: Use RCU dereference wrappers in keyring key type code KEYS: find_keyring_by_name() can gain access to a freed keyring ALSA: hda: Fix 0 dB for Packard Bell models using Conexant CX20549 (Venice) ALSA: hda - Add quirk for Dell Inspiron 19T using a Conexant CX20582 ALSA: take tu->qlock with irqs disabled ...
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5a0cd194dce0..c82ae2492634 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -341,7 +341,11 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
341 struct mm_struct *mm) 341 struct mm_struct *mm)
342{ 342{
343 struct kvm *kvm = mmu_notifier_to_kvm(mn); 343 struct kvm *kvm = mmu_notifier_to_kvm(mn);
344 int idx;
345
346 idx = srcu_read_lock(&kvm->srcu);
344 kvm_arch_flush_shadow(kvm); 347 kvm_arch_flush_shadow(kvm);
348 srcu_read_unlock(&kvm->srcu, idx);
345} 349}
346 350
347static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 351static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
@@ -648,7 +652,7 @@ skip_lpage:
648 652
649 /* Allocate page dirty bitmap if needed */ 653 /* Allocate page dirty bitmap if needed */
650 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 654 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
651 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; 655 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
652 656
653 new.dirty_bitmap = vmalloc(dirty_bytes); 657 new.dirty_bitmap = vmalloc(dirty_bytes);
654 if (!new.dirty_bitmap) 658 if (!new.dirty_bitmap)
@@ -768,7 +772,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
768{ 772{
769 struct kvm_memory_slot *memslot; 773 struct kvm_memory_slot *memslot;
770 int r, i; 774 int r, i;
771 int n; 775 unsigned long n;
772 unsigned long any = 0; 776 unsigned long any = 0;
773 777
774 r = -EINVAL; 778 r = -EINVAL;
@@ -780,7 +784,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
780 if (!memslot->dirty_bitmap) 784 if (!memslot->dirty_bitmap)
781 goto out; 785 goto out;
782 786
783 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 787 n = kvm_dirty_bitmap_bytes(memslot);
784 788
785 for (i = 0; !any && i < n/sizeof(long); ++i) 789 for (i = 0; !any && i < n/sizeof(long); ++i)
786 any = memslot->dirty_bitmap[i]; 790 any = memslot->dirty_bitmap[i];
@@ -1186,10 +1190,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1186 memslot = gfn_to_memslot_unaliased(kvm, gfn); 1190 memslot = gfn_to_memslot_unaliased(kvm, gfn);
1187 if (memslot && memslot->dirty_bitmap) { 1191 if (memslot && memslot->dirty_bitmap) {
1188 unsigned long rel_gfn = gfn - memslot->base_gfn; 1192 unsigned long rel_gfn = gfn - memslot->base_gfn;
1193 unsigned long *p = memslot->dirty_bitmap +
1194 rel_gfn / BITS_PER_LONG;
1195 int offset = rel_gfn % BITS_PER_LONG;
1189 1196
1190 /* avoid RMW */ 1197 /* avoid RMW */
1191 if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) 1198 if (!generic_test_le_bit(offset, p))
1192 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); 1199 generic___set_le_bit(offset, p);
1193 } 1200 }
1194} 1201}
1195 1202