diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2010-10-22 12:18:18 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-01-12 04:28:40 -0500 |
commit | 612819c3c6e67bac8fceaa7cc402f13b1b63f7e4 (patch) | |
tree | 3739b8420660fc4de8d37d26004d9992e92acbe3 /virt | |
parent | 7905d9a5ad7a83f1c1c00559839857ab90afbdfc (diff) |
KVM: propagate fault r/w information to gup(), allow read-only memory
As suggested by Andrea, pass r/w error code to gup(), upgrading read fault
to writable if host pte allows it.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/kvm_main.c | 51 |
1 files changed, 41 insertions, 10 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 475a100f3a22..2803b4db2a38 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -959,7 +959,7 @@ static pfn_t get_fault_pfn(void) | |||
959 | } | 959 | } |
960 | 960 | ||
961 | static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic, | 961 | static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic, |
962 | bool *async) | 962 | bool *async, bool write_fault, bool *writable) |
963 | { | 963 | { |
964 | struct page *page[1]; | 964 | struct page *page[1]; |
965 | int npages = 0; | 965 | int npages = 0; |
@@ -968,12 +968,34 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic, | |||
968 | /* we can do it either atomically or asynchronously, not both */ | 968 | /* we can do it either atomically or asynchronously, not both */ |
969 | BUG_ON(atomic && async); | 969 | BUG_ON(atomic && async); |
970 | 970 | ||
971 | BUG_ON(!write_fault && !writable); | ||
972 | |||
973 | if (writable) | ||
974 | *writable = true; | ||
975 | |||
971 | if (atomic || async) | 976 | if (atomic || async) |
972 | npages = __get_user_pages_fast(addr, 1, 1, page); | 977 | npages = __get_user_pages_fast(addr, 1, 1, page); |
973 | 978 | ||
974 | if (unlikely(npages != 1) && !atomic) { | 979 | if (unlikely(npages != 1) && !atomic) { |
975 | might_sleep(); | 980 | might_sleep(); |
976 | npages = get_user_pages_fast(addr, 1, 1, page); | 981 | |
982 | if (writable) | ||
983 | *writable = write_fault; | ||
984 | |||
985 | npages = get_user_pages_fast(addr, 1, write_fault, page); | ||
986 | |||
987 | /* map read fault as writable if possible */ | ||
988 | if (unlikely(!write_fault) && npages == 1) { | ||
989 | struct page *wpage[1]; | ||
990 | |||
991 | npages = __get_user_pages_fast(addr, 1, 1, wpage); | ||
992 | if (npages == 1) { | ||
993 | *writable = true; | ||
994 | put_page(page[0]); | ||
995 | page[0] = wpage[0]; | ||
996 | } | ||
997 | npages = 1; | ||
998 | } | ||
977 | } | 999 | } |
978 | 1000 | ||
979 | if (unlikely(npages != 1)) { | 1001 | if (unlikely(npages != 1)) { |
@@ -1011,11 +1033,12 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic, | |||
1011 | 1033 | ||
1012 | pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr) | 1034 | pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr) |
1013 | { | 1035 | { |
1014 | return hva_to_pfn(kvm, addr, true, NULL); | 1036 | return hva_to_pfn(kvm, addr, true, NULL, true, NULL); |
1015 | } | 1037 | } |
1016 | EXPORT_SYMBOL_GPL(hva_to_pfn_atomic); | 1038 | EXPORT_SYMBOL_GPL(hva_to_pfn_atomic); |
1017 | 1039 | ||
1018 | static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async) | 1040 | static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, |
1041 | bool write_fault, bool *writable) | ||
1019 | { | 1042 | { |
1020 | unsigned long addr; | 1043 | unsigned long addr; |
1021 | 1044 | ||
@@ -1028,32 +1051,40 @@ static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async) | |||
1028 | return page_to_pfn(bad_page); | 1051 | return page_to_pfn(bad_page); |
1029 | } | 1052 | } |
1030 | 1053 | ||
1031 | return hva_to_pfn(kvm, addr, atomic, async); | 1054 | return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable); |
1032 | } | 1055 | } |
1033 | 1056 | ||
1034 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) | 1057 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) |
1035 | { | 1058 | { |
1036 | return __gfn_to_pfn(kvm, gfn, true, NULL); | 1059 | return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL); |
1037 | } | 1060 | } |
1038 | EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); | 1061 | EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); |
1039 | 1062 | ||
1040 | pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async) | 1063 | pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, |
1064 | bool write_fault, bool *writable) | ||
1041 | { | 1065 | { |
1042 | return __gfn_to_pfn(kvm, gfn, false, async); | 1066 | return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable); |
1043 | } | 1067 | } |
1044 | EXPORT_SYMBOL_GPL(gfn_to_pfn_async); | 1068 | EXPORT_SYMBOL_GPL(gfn_to_pfn_async); |
1045 | 1069 | ||
1046 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) | 1070 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) |
1047 | { | 1071 | { |
1048 | return __gfn_to_pfn(kvm, gfn, false, NULL); | 1072 | return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL); |
1049 | } | 1073 | } |
1050 | EXPORT_SYMBOL_GPL(gfn_to_pfn); | 1074 | EXPORT_SYMBOL_GPL(gfn_to_pfn); |
1051 | 1075 | ||
1076 | pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, | ||
1077 | bool *writable) | ||
1078 | { | ||
1079 | return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable); | ||
1080 | } | ||
1081 | EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); | ||
1082 | |||
1052 | pfn_t gfn_to_pfn_memslot(struct kvm *kvm, | 1083 | pfn_t gfn_to_pfn_memslot(struct kvm *kvm, |
1053 | struct kvm_memory_slot *slot, gfn_t gfn) | 1084 | struct kvm_memory_slot *slot, gfn_t gfn) |
1054 | { | 1085 | { |
1055 | unsigned long addr = gfn_to_hva_memslot(slot, gfn); | 1086 | unsigned long addr = gfn_to_hva_memslot(slot, gfn); |
1056 | return hva_to_pfn(kvm, addr, false, NULL); | 1087 | return hva_to_pfn(kvm, addr, false, NULL, true, NULL); |
1057 | } | 1088 | } |
1058 | 1089 | ||
1059 | int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, | 1090 | int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, |