diff options
author | Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> | 2012-07-25 23:58:59 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-07-26 04:55:34 -0400 |
commit | a2766325cf9f9e36d1225145f1ce1b066f001837 (patch) | |
tree | a00cefe5c199c7e4845294f38475c3abd90e9419 /virt/kvm | |
parent | 2b4b5af8f8e7296bc27c52023ab6bb8f53db3a2b (diff) |
KVM: remove dummy pages
Currently, kvm allocates some pages and use them as error indicators,
it wastes memory and is not good for scalability
Base on Avi's suggestion, we use the error codes instead of these pages
to indicate the error conditions
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/async_pf.c | 3 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 121 |
2 files changed, 50 insertions, 74 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index ebae24b62c90..79722782d9d7 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c | |||
@@ -203,8 +203,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) | |||
203 | if (!work) | 203 | if (!work) |
204 | return -ENOMEM; | 204 | return -ENOMEM; |
205 | 205 | ||
206 | work->page = bad_page; | 206 | work->page = get_bad_page(); |
207 | get_page(bad_page); | ||
208 | INIT_LIST_HEAD(&work->queue); /* for list_del to work */ | 207 | INIT_LIST_HEAD(&work->queue); /* for list_del to work */ |
209 | 208 | ||
210 | spin_lock(&vcpu->async_pf.lock); | 209 | spin_lock(&vcpu->async_pf.lock); |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0014ee99dc7f..de89497fe4c7 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -100,17 +100,11 @@ EXPORT_SYMBOL_GPL(kvm_rebooting); | |||
100 | 100 | ||
101 | static bool largepages_enabled = true; | 101 | static bool largepages_enabled = true; |
102 | 102 | ||
103 | struct page *bad_page; | 103 | bool kvm_is_mmio_pfn(pfn_t pfn) |
104 | static pfn_t bad_pfn; | ||
105 | |||
106 | static struct page *hwpoison_page; | ||
107 | static pfn_t hwpoison_pfn; | ||
108 | |||
109 | static struct page *fault_page; | ||
110 | static pfn_t fault_pfn; | ||
111 | |||
112 | inline int kvm_is_mmio_pfn(pfn_t pfn) | ||
113 | { | 104 | { |
105 | if (is_error_pfn(pfn)) | ||
106 | return false; | ||
107 | |||
114 | if (pfn_valid(pfn)) { | 108 | if (pfn_valid(pfn)) { |
115 | int reserved; | 109 | int reserved; |
116 | struct page *tail = pfn_to_page(pfn); | 110 | struct page *tail = pfn_to_page(pfn); |
@@ -939,34 +933,55 @@ EXPORT_SYMBOL_GPL(kvm_disable_largepages); | |||
939 | 933 | ||
940 | int is_error_page(struct page *page) | 934 | int is_error_page(struct page *page) |
941 | { | 935 | { |
942 | return page == bad_page || page == hwpoison_page || page == fault_page; | 936 | return IS_ERR(page); |
943 | } | 937 | } |
944 | EXPORT_SYMBOL_GPL(is_error_page); | 938 | EXPORT_SYMBOL_GPL(is_error_page); |
945 | 939 | ||
946 | int is_error_pfn(pfn_t pfn) | 940 | int is_error_pfn(pfn_t pfn) |
947 | { | 941 | { |
948 | return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn; | 942 | return IS_ERR_VALUE(pfn); |
949 | } | 943 | } |
950 | EXPORT_SYMBOL_GPL(is_error_pfn); | 944 | EXPORT_SYMBOL_GPL(is_error_pfn); |
951 | 945 | ||
946 | static pfn_t get_bad_pfn(void) | ||
947 | { | ||
948 | return -ENOENT; | ||
949 | } | ||
950 | |||
951 | pfn_t get_fault_pfn(void) | ||
952 | { | ||
953 | return -EFAULT; | ||
954 | } | ||
955 | EXPORT_SYMBOL_GPL(get_fault_pfn); | ||
956 | |||
957 | static pfn_t get_hwpoison_pfn(void) | ||
958 | { | ||
959 | return -EHWPOISON; | ||
960 | } | ||
961 | |||
952 | int is_hwpoison_pfn(pfn_t pfn) | 962 | int is_hwpoison_pfn(pfn_t pfn) |
953 | { | 963 | { |
954 | return pfn == hwpoison_pfn; | 964 | return pfn == -EHWPOISON; |
955 | } | 965 | } |
956 | EXPORT_SYMBOL_GPL(is_hwpoison_pfn); | 966 | EXPORT_SYMBOL_GPL(is_hwpoison_pfn); |
957 | 967 | ||
958 | int is_noslot_pfn(pfn_t pfn) | 968 | int is_noslot_pfn(pfn_t pfn) |
959 | { | 969 | { |
960 | return pfn == bad_pfn; | 970 | return pfn == -ENOENT; |
961 | } | 971 | } |
962 | EXPORT_SYMBOL_GPL(is_noslot_pfn); | 972 | EXPORT_SYMBOL_GPL(is_noslot_pfn); |
963 | 973 | ||
964 | int is_invalid_pfn(pfn_t pfn) | 974 | int is_invalid_pfn(pfn_t pfn) |
965 | { | 975 | { |
966 | return pfn == hwpoison_pfn || pfn == fault_pfn; | 976 | return !is_noslot_pfn(pfn) && is_error_pfn(pfn); |
967 | } | 977 | } |
968 | EXPORT_SYMBOL_GPL(is_invalid_pfn); | 978 | EXPORT_SYMBOL_GPL(is_invalid_pfn); |
969 | 979 | ||
980 | struct page *get_bad_page(void) | ||
981 | { | ||
982 | return ERR_PTR(-ENOENT); | ||
983 | } | ||
984 | |||
970 | static inline unsigned long bad_hva(void) | 985 | static inline unsigned long bad_hva(void) |
971 | { | 986 | { |
972 | return PAGE_OFFSET; | 987 | return PAGE_OFFSET; |
@@ -1038,13 +1053,6 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) | |||
1038 | } | 1053 | } |
1039 | EXPORT_SYMBOL_GPL(gfn_to_hva); | 1054 | EXPORT_SYMBOL_GPL(gfn_to_hva); |
1040 | 1055 | ||
1041 | pfn_t get_fault_pfn(void) | ||
1042 | { | ||
1043 | get_page(fault_page); | ||
1044 | return fault_pfn; | ||
1045 | } | ||
1046 | EXPORT_SYMBOL_GPL(get_fault_pfn); | ||
1047 | |||
1048 | int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, | 1056 | int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, |
1049 | unsigned long start, int write, struct page **page) | 1057 | unsigned long start, int write, struct page **page) |
1050 | { | 1058 | { |
@@ -1122,8 +1130,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, | |||
1122 | if (npages == -EHWPOISON || | 1130 | if (npages == -EHWPOISON || |
1123 | (!async && check_user_page_hwpoison(addr))) { | 1131 | (!async && check_user_page_hwpoison(addr))) { |
1124 | up_read(¤t->mm->mmap_sem); | 1132 | up_read(¤t->mm->mmap_sem); |
1125 | get_page(hwpoison_page); | 1133 | return get_hwpoison_pfn(); |
1126 | return page_to_pfn(hwpoison_page); | ||
1127 | } | 1134 | } |
1128 | 1135 | ||
1129 | vma = find_vma_intersection(current->mm, addr, addr+1); | 1136 | vma = find_vma_intersection(current->mm, addr, addr+1); |
@@ -1161,10 +1168,8 @@ static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, | |||
1161 | *async = false; | 1168 | *async = false; |
1162 | 1169 | ||
1163 | addr = gfn_to_hva(kvm, gfn); | 1170 | addr = gfn_to_hva(kvm, gfn); |
1164 | if (kvm_is_error_hva(addr)) { | 1171 | if (kvm_is_error_hva(addr)) |
1165 | get_page(bad_page); | 1172 | return get_bad_pfn(); |
1166 | return page_to_pfn(bad_page); | ||
1167 | } | ||
1168 | 1173 | ||
1169 | return hva_to_pfn(addr, atomic, async, write_fault, writable); | 1174 | return hva_to_pfn(addr, atomic, async, write_fault, writable); |
1170 | } | 1175 | } |
@@ -1218,37 +1223,45 @@ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, | |||
1218 | } | 1223 | } |
1219 | EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); | 1224 | EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); |
1220 | 1225 | ||
1226 | static struct page *kvm_pfn_to_page(pfn_t pfn) | ||
1227 | { | ||
1228 | WARN_ON(kvm_is_mmio_pfn(pfn)); | ||
1229 | |||
1230 | if (is_error_pfn(pfn) || kvm_is_mmio_pfn(pfn)) | ||
1231 | return get_bad_page(); | ||
1232 | |||
1233 | return pfn_to_page(pfn); | ||
1234 | } | ||
1235 | |||
1221 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | 1236 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) |
1222 | { | 1237 | { |
1223 | pfn_t pfn; | 1238 | pfn_t pfn; |
1224 | 1239 | ||
1225 | pfn = gfn_to_pfn(kvm, gfn); | 1240 | pfn = gfn_to_pfn(kvm, gfn); |
1226 | if (!kvm_is_mmio_pfn(pfn)) | ||
1227 | return pfn_to_page(pfn); | ||
1228 | |||
1229 | WARN_ON(kvm_is_mmio_pfn(pfn)); | ||
1230 | 1241 | ||
1231 | get_page(bad_page); | 1242 | return kvm_pfn_to_page(pfn); |
1232 | return bad_page; | ||
1233 | } | 1243 | } |
1234 | 1244 | ||
1235 | EXPORT_SYMBOL_GPL(gfn_to_page); | 1245 | EXPORT_SYMBOL_GPL(gfn_to_page); |
1236 | 1246 | ||
1237 | void kvm_release_page_clean(struct page *page) | 1247 | void kvm_release_page_clean(struct page *page) |
1238 | { | 1248 | { |
1239 | kvm_release_pfn_clean(page_to_pfn(page)); | 1249 | if (!is_error_page(page)) |
1250 | kvm_release_pfn_clean(page_to_pfn(page)); | ||
1240 | } | 1251 | } |
1241 | EXPORT_SYMBOL_GPL(kvm_release_page_clean); | 1252 | EXPORT_SYMBOL_GPL(kvm_release_page_clean); |
1242 | 1253 | ||
1243 | void kvm_release_pfn_clean(pfn_t pfn) | 1254 | void kvm_release_pfn_clean(pfn_t pfn) |
1244 | { | 1255 | { |
1245 | if (!kvm_is_mmio_pfn(pfn)) | 1256 | if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) |
1246 | put_page(pfn_to_page(pfn)); | 1257 | put_page(pfn_to_page(pfn)); |
1247 | } | 1258 | } |
1248 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); | 1259 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); |
1249 | 1260 | ||
1250 | void kvm_release_page_dirty(struct page *page) | 1261 | void kvm_release_page_dirty(struct page *page) |
1251 | { | 1262 | { |
1263 | WARN_ON(is_error_page(page)); | ||
1264 | |||
1252 | kvm_release_pfn_dirty(page_to_pfn(page)); | 1265 | kvm_release_pfn_dirty(page_to_pfn(page)); |
1253 | } | 1266 | } |
1254 | EXPORT_SYMBOL_GPL(kvm_release_page_dirty); | 1267 | EXPORT_SYMBOL_GPL(kvm_release_page_dirty); |
@@ -2771,33 +2784,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, | |||
2771 | if (r) | 2784 | if (r) |
2772 | goto out_fail; | 2785 | goto out_fail; |
2773 | 2786 | ||
2774 | bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
2775 | |||
2776 | if (bad_page == NULL) { | ||
2777 | r = -ENOMEM; | ||
2778 | goto out; | ||
2779 | } | ||
2780 | |||
2781 | bad_pfn = page_to_pfn(bad_page); | ||
2782 | |||
2783 | hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
2784 | |||
2785 | if (hwpoison_page == NULL) { | ||
2786 | r = -ENOMEM; | ||
2787 | goto out_free_0; | ||
2788 | } | ||
2789 | |||
2790 | hwpoison_pfn = page_to_pfn(hwpoison_page); | ||
2791 | |||
2792 | fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
2793 | |||
2794 | if (fault_page == NULL) { | ||
2795 | r = -ENOMEM; | ||
2796 | goto out_free_0; | ||
2797 | } | ||
2798 | |||
2799 | fault_pfn = page_to_pfn(fault_page); | ||
2800 | |||
2801 | if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { | 2787 | if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { |
2802 | r = -ENOMEM; | 2788 | r = -ENOMEM; |
2803 | goto out_free_0; | 2789 | goto out_free_0; |
@@ -2872,12 +2858,6 @@ out_free_1: | |||
2872 | out_free_0a: | 2858 | out_free_0a: |
2873 | free_cpumask_var(cpus_hardware_enabled); | 2859 | free_cpumask_var(cpus_hardware_enabled); |
2874 | out_free_0: | 2860 | out_free_0: |
2875 | if (fault_page) | ||
2876 | __free_page(fault_page); | ||
2877 | if (hwpoison_page) | ||
2878 | __free_page(hwpoison_page); | ||
2879 | __free_page(bad_page); | ||
2880 | out: | ||
2881 | kvm_arch_exit(); | 2861 | kvm_arch_exit(); |
2882 | out_fail: | 2862 | out_fail: |
2883 | return r; | 2863 | return r; |
@@ -2897,8 +2877,5 @@ void kvm_exit(void) | |||
2897 | kvm_arch_hardware_unsetup(); | 2877 | kvm_arch_hardware_unsetup(); |
2898 | kvm_arch_exit(); | 2878 | kvm_arch_exit(); |
2899 | free_cpumask_var(cpus_hardware_enabled); | 2879 | free_cpumask_var(cpus_hardware_enabled); |
2900 | __free_page(fault_page); | ||
2901 | __free_page(hwpoison_page); | ||
2902 | __free_page(bad_page); | ||
2903 | } | 2880 | } |
2904 | EXPORT_SYMBOL_GPL(kvm_exit); | 2881 | EXPORT_SYMBOL_GPL(kvm_exit); |