aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/kvm_host.h
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2012-08-20 23:02:51 -0400
committerAvi Kivity <avi@redhat.com>2012-08-22 08:09:03 -0400
commit4d8b81abc47b83a1939e59df2fdb0e98dfe0eedd (patch)
tree47ea77cb4f77a2884a0478ecf84f62e32e5661ee /include/linux/kvm_host.h
parent7068d0971524dd47a38f44f6020ba133432871ca (diff)
KVM: introduce readonly memslot
In current code, if we map a readonly memory space from host to guest and the page is not currently mapped in the host, we will get a fault pfn and async is not allowed, then the vm will crash We introduce readonly memory region to map ROM/ROMD to the guest, read access is happy for readonly memslot, write access on readonly memslot will cause KVM_EXIT_MMIO exit Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r--include/linux/kvm_host.h7
1 files changed, 1 insertions, 6 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a913ac709a9d..5972c9845ddb 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -465,6 +465,7 @@ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
465 465
466struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 466struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
467unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 467unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
468unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
468void kvm_release_page_clean(struct page *page); 469void kvm_release_page_clean(struct page *page);
469void kvm_release_page_dirty(struct page *page); 470void kvm_release_page_dirty(struct page *page);
470void kvm_set_page_dirty(struct page *page); 471void kvm_set_page_dirty(struct page *page);
@@ -792,12 +793,6 @@ hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
792 return slot->base_gfn + gfn_offset; 793 return slot->base_gfn + gfn_offset;
793} 794}
794 795
795static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
796 gfn_t gfn)
797{
798 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
799}
800
801static inline gpa_t gfn_to_gpa(gfn_t gfn) 796static inline gpa_t gfn_to_gpa(gfn_t gfn)
802{ 797{
803 return (gpa_t)gfn << PAGE_SHIFT; 798 return (gpa_t)gfn << PAGE_SHIFT;