diff options
author | Kai Huang <kai.huang@linux.intel.com> | 2015-01-27 21:54:26 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-01-29 09:31:37 -0500 |
commit | 1c91cad42366ce0799ca17e7ad6995418741d012 (patch) | |
tree | b5b6d96d85fbec4f2678470f7439c24e5d05eba6 | |
parent | 9b51a63024bd759f97a12f50907b8af23b065b36 (diff) |
KVM: x86: Change parameter of kvm_mmu_slot_remove_write_access
This patch changes the second parameter of kvm_mmu_slot_remove_write_access from
'slot id' to 'struct kvm_memory_slot *' to align with kvm_x86_ops dirty logging
hooks, which will be introduced in further patch.
Better way is to change second parameter of kvm_arch_commit_memory_region from
'struct kvm_userspace_memory_region *' to 'struct kvm_memory_slot * new', but it
requires changes on other non-x86 ARCH too, so avoid it now.
Signed-off-by: Kai Huang <kai.huang@linux.intel.com>
Reviewed-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 10 |
3 files changed, 11 insertions, 7 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4f6369b6f7d2..67a98d793bf2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -834,7 +834,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, | |||
834 | u64 dirty_mask, u64 nx_mask, u64 x_mask); | 834 | u64 dirty_mask, u64 nx_mask, u64 x_mask); |
835 | 835 | ||
836 | void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | 836 | void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
837 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); | 837 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, |
838 | struct kvm_memory_slot *memslot); | ||
838 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, | 839 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, |
839 | struct kvm_memory_slot *memslot); | 840 | struct kvm_memory_slot *memslot); |
840 | void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, | 841 | void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b7192236dcba..ec62ecd2d513 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -4408,14 +4408,13 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu) | |||
4408 | init_kvm_mmu(vcpu); | 4408 | init_kvm_mmu(vcpu); |
4409 | } | 4409 | } |
4410 | 4410 | ||
4411 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) | 4411 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, |
4412 | struct kvm_memory_slot *memslot) | ||
4412 | { | 4413 | { |
4413 | struct kvm_memory_slot *memslot; | ||
4414 | gfn_t last_gfn; | 4414 | gfn_t last_gfn; |
4415 | int i; | 4415 | int i; |
4416 | bool flush = false; | 4416 | bool flush = false; |
4417 | 4417 | ||
4418 | memslot = id_to_memslot(kvm->memslots, slot); | ||
4419 | last_gfn = memslot->base_gfn + memslot->npages - 1; | 4418 | last_gfn = memslot->base_gfn + memslot->npages - 1; |
4420 | 4419 | ||
4421 | spin_lock(&kvm->mmu_lock); | 4420 | spin_lock(&kvm->mmu_lock); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1e10e3f7f516..3a7fcff81af6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -7538,7 +7538,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
7538 | const struct kvm_memory_slot *old, | 7538 | const struct kvm_memory_slot *old, |
7539 | enum kvm_mr_change change) | 7539 | enum kvm_mr_change change) |
7540 | { | 7540 | { |
7541 | 7541 | struct kvm_memory_slot *new; | |
7542 | int nr_mmu_pages = 0; | 7542 | int nr_mmu_pages = 0; |
7543 | 7543 | ||
7544 | if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) { | 7544 | if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) { |
@@ -7557,6 +7557,10 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
7557 | 7557 | ||
7558 | if (nr_mmu_pages) | 7558 | if (nr_mmu_pages) |
7559 | kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); | 7559 | kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); |
7560 | |||
7561 | /* It's OK to get 'new' slot here as it has already been installed */ | ||
7562 | new = id_to_memslot(kvm->memslots, mem->slot); | ||
7563 | |||
7560 | /* | 7564 | /* |
7561 | * Write protect all pages for dirty logging. | 7565 | * Write protect all pages for dirty logging. |
7562 | * | 7566 | * |
@@ -7566,8 +7570,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
7566 | * | 7570 | * |
7567 | * See the comments in fast_page_fault(). | 7571 | * See the comments in fast_page_fault(). |
7568 | */ | 7572 | */ |
7569 | if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) | 7573 | if ((change != KVM_MR_DELETE) && (new->flags & KVM_MEM_LOG_DIRTY_PAGES)) |
7570 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); | 7574 | kvm_mmu_slot_remove_write_access(kvm, new); |
7571 | } | 7575 | } |
7572 | 7576 | ||
7573 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | 7577 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |