aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-01-05 19:36:54 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2007-01-06 02:55:27 -0500
commite2dec939db126989808853d218e426daaeebc9e2 (patch)
tree5c742e609e43090df396fc1c7a6b4c526099dbea /drivers/kvm
parent714b93da1a6d97307dfafb9915517879d8a66c0d (diff)
[PATCH] KVM: MMU: Detect oom conditions and propagate error to userspace
Signed-off-by: Avi Kivity <avi@qumranet.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/mmu.c32
-rw-r--r--drivers/kvm/paging_tmpl.h8
-rw-r--r--drivers/kvm/svm.c14
-rw-r--r--drivers/kvm/vmx.c15
4 files changed, 50 insertions, 19 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index e96362aa7947..7761089ef3bc 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -166,19 +166,20 @@ static int is_rmap_pte(u64 pte)
166 == (PT_WRITABLE_MASK | PT_PRESENT_MASK); 166 == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
167} 167}
168 168
169static void mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 169static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
170 size_t objsize, int min) 170 size_t objsize, int min)
171{ 171{
172 void *obj; 172 void *obj;
173 173
174 if (cache->nobjs >= min) 174 if (cache->nobjs >= min)
175 return; 175 return 0;
176 while (cache->nobjs < ARRAY_SIZE(cache->objects)) { 176 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
177 obj = kzalloc(objsize, GFP_NOWAIT); 177 obj = kzalloc(objsize, GFP_NOWAIT);
178 if (!obj) 178 if (!obj)
179 BUG(); 179 return -ENOMEM;
180 cache->objects[cache->nobjs++] = obj; 180 cache->objects[cache->nobjs++] = obj;
181 } 181 }
182 return 0;
182} 183}
183 184
184static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) 185static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
@@ -187,12 +188,18 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
187 kfree(mc->objects[--mc->nobjs]); 188 kfree(mc->objects[--mc->nobjs]);
188} 189}
189 190
190static void mmu_topup_memory_caches(struct kvm_vcpu *vcpu) 191static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
191{ 192{
192 mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache, 193 int r;
193 sizeof(struct kvm_pte_chain), 4); 194
194 mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, 195 r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
195 sizeof(struct kvm_rmap_desc), 1); 196 sizeof(struct kvm_pte_chain), 4);
197 if (r)
198 goto out;
199 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
200 sizeof(struct kvm_rmap_desc), 1);
201out:
202 return r;
196} 203}
197 204
198static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) 205static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
@@ -824,8 +831,11 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
824{ 831{
825 gpa_t addr = gva; 832 gpa_t addr = gva;
826 hpa_t paddr; 833 hpa_t paddr;
834 int r;
827 835
828 mmu_topup_memory_caches(vcpu); 836 r = mmu_topup_memory_caches(vcpu);
837 if (r)
838 return r;
829 839
830 ASSERT(vcpu); 840 ASSERT(vcpu);
831 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); 841 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
@@ -1052,7 +1062,7 @@ int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1052 r = init_kvm_mmu(vcpu); 1062 r = init_kvm_mmu(vcpu);
1053 if (r < 0) 1063 if (r < 0)
1054 goto out; 1064 goto out;
1055 mmu_topup_memory_caches(vcpu); 1065 r = mmu_topup_memory_caches(vcpu);
1056out: 1066out:
1057 return r; 1067 return r;
1058} 1068}
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 4e6670ff1847..32b385188454 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -339,7 +339,8 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
339 * - normal guest page fault due to the guest pte marked not present, not 339 * - normal guest page fault due to the guest pte marked not present, not
340 * writable, or not executable 340 * writable, or not executable
341 * 341 *
342 * Returns: 1 if we need to emulate the instruction, 0 otherwise 342 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
343 * a negative value on error.
343 */ 344 */
344static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, 345static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
345 u32 error_code) 346 u32 error_code)
@@ -351,10 +352,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
351 u64 *shadow_pte; 352 u64 *shadow_pte;
352 int fixed; 353 int fixed;
353 int write_pt = 0; 354 int write_pt = 0;
355 int r;
354 356
355 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code); 357 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
356 358
357 mmu_topup_memory_caches(vcpu); 359 r = mmu_topup_memory_caches(vcpu);
360 if (r)
361 return r;
358 362
359 /* 363 /*
360 * Look up the shadow pte for the faulting address. 364 * Look up the shadow pte for the faulting address.
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 99250011a471..af1e7b3f9171 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -852,6 +852,7 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
852 u64 fault_address; 852 u64 fault_address;
853 u32 error_code; 853 u32 error_code;
854 enum emulation_result er; 854 enum emulation_result er;
855 int r;
855 856
856 if (is_external_interrupt(exit_int_info)) 857 if (is_external_interrupt(exit_int_info))
857 push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); 858 push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
@@ -860,7 +861,12 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
860 861
861 fault_address = vcpu->svm->vmcb->control.exit_info_2; 862 fault_address = vcpu->svm->vmcb->control.exit_info_2;
862 error_code = vcpu->svm->vmcb->control.exit_info_1; 863 error_code = vcpu->svm->vmcb->control.exit_info_1;
863 if (!kvm_mmu_page_fault(vcpu, fault_address, error_code)) { 864 r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
865 if (r < 0) {
866 spin_unlock(&vcpu->kvm->lock);
867 return r;
868 }
869 if (!r) {
864 spin_unlock(&vcpu->kvm->lock); 870 spin_unlock(&vcpu->kvm->lock);
865 return 1; 871 return 1;
866 } 872 }
@@ -1398,6 +1404,7 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1398 u16 fs_selector; 1404 u16 fs_selector;
1399 u16 gs_selector; 1405 u16 gs_selector;
1400 u16 ldt_selector; 1406 u16 ldt_selector;
1407 int r;
1401 1408
1402again: 1409again:
1403 do_interrupt_requests(vcpu, kvm_run); 1410 do_interrupt_requests(vcpu, kvm_run);
@@ -1565,7 +1572,8 @@ again:
1565 return 0; 1572 return 0;
1566 } 1573 }
1567 1574
1568 if (handle_exit(vcpu, kvm_run)) { 1575 r = handle_exit(vcpu, kvm_run);
1576 if (r > 0) {
1569 if (signal_pending(current)) { 1577 if (signal_pending(current)) {
1570 ++kvm_stat.signal_exits; 1578 ++kvm_stat.signal_exits;
1571 post_kvm_run_save(vcpu, kvm_run); 1579 post_kvm_run_save(vcpu, kvm_run);
@@ -1581,7 +1589,7 @@ again:
1581 goto again; 1589 goto again;
1582 } 1590 }
1583 post_kvm_run_save(vcpu, kvm_run); 1591 post_kvm_run_save(vcpu, kvm_run);
1584 return 0; 1592 return r;
1585} 1593}
1586 1594
1587static void svm_flush_tlb(struct kvm_vcpu *vcpu) 1595static void svm_flush_tlb(struct kvm_vcpu *vcpu)
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 59178ad4d344..ed3956739771 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -1289,6 +1289,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1289 unsigned long cr2, rip; 1289 unsigned long cr2, rip;
1290 u32 vect_info; 1290 u32 vect_info;
1291 enum emulation_result er; 1291 enum emulation_result er;
1292 int r;
1292 1293
1293 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 1294 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1294 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 1295 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
@@ -1317,7 +1318,12 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1317 cr2 = vmcs_readl(EXIT_QUALIFICATION); 1318 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1318 1319
1319 spin_lock(&vcpu->kvm->lock); 1320 spin_lock(&vcpu->kvm->lock);
1320 if (!kvm_mmu_page_fault(vcpu, cr2, error_code)) { 1321 r = kvm_mmu_page_fault(vcpu, cr2, error_code);
1322 if (r < 0) {
1323 spin_unlock(&vcpu->kvm->lock);
1324 return r;
1325 }
1326 if (!r) {
1321 spin_unlock(&vcpu->kvm->lock); 1327 spin_unlock(&vcpu->kvm->lock);
1322 return 1; 1328 return 1;
1323 } 1329 }
@@ -1680,6 +1686,7 @@ static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1680 u8 fail; 1686 u8 fail;
1681 u16 fs_sel, gs_sel, ldt_sel; 1687 u16 fs_sel, gs_sel, ldt_sel;
1682 int fs_gs_ldt_reload_needed; 1688 int fs_gs_ldt_reload_needed;
1689 int r;
1683 1690
1684again: 1691again:
1685 /* 1692 /*
@@ -1853,6 +1860,7 @@ again:
1853 if (fail) { 1860 if (fail) {
1854 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; 1861 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY;
1855 kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR); 1862 kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR);
1863 r = 0;
1856 } else { 1864 } else {
1857 if (fs_gs_ldt_reload_needed) { 1865 if (fs_gs_ldt_reload_needed) {
1858 load_ldt(ldt_sel); 1866 load_ldt(ldt_sel);
@@ -1872,7 +1880,8 @@ again:
1872 } 1880 }
1873 vcpu->launched = 1; 1881 vcpu->launched = 1;
1874 kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT; 1882 kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT;
1875 if (kvm_handle_exit(kvm_run, vcpu)) { 1883 r = kvm_handle_exit(kvm_run, vcpu);
1884 if (r > 0) {
1876 /* Give scheduler a change to reschedule. */ 1885 /* Give scheduler a change to reschedule. */
1877 if (signal_pending(current)) { 1886 if (signal_pending(current)) {
1878 ++kvm_stat.signal_exits; 1887 ++kvm_stat.signal_exits;
@@ -1892,7 +1901,7 @@ again:
1892 } 1901 }
1893 1902
1894 post_kvm_run_save(vcpu, kvm_run); 1903 post_kvm_run_save(vcpu, kvm_run);
1895 return 0; 1904 return r;
1896} 1905}
1897 1906
1898static void vmx_flush_tlb(struct kvm_vcpu *vcpu) 1907static void vmx_flush_tlb(struct kvm_vcpu *vcpu)