aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-08-25 20:27:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-08-25 20:27:17 -0400
commit6ec9776c28f9fe0780803a7dc1045c83323d8338 (patch)
tree957376c792451aadb00a1b1d68871cd30e3178c8 /arch
parente1d33a5c634bcf5f944034a2127c0026308c005c (diff)
parent5ad105e569c45dcfad50d724c61d5061248be755 (diff)
Merge git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Marcelo Tosatti. * git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86 emulator: use stack size attribute to mask rsp in stack ops KVM: MMU: Fix mmu_shrink() so that it can free mmu pages as intended ppc: e500_tlb memset clears nothing KVM: PPC: Add cache flush on page map KVM: PPC: Book3S HV: Fix incorrect branch in H_CEDE code KVM: x86: update KVM_SAVE_MSRS_BEGIN to correct value
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h12
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c3
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S12
-rw-r--r--arch/powerpc/kvm/e500_tlb.c11
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/x86/kvm/emulate.c30
-rw-r--r--arch/x86/kvm/mmu.c13
-rw-r--r--arch/x86/kvm/x86.c2
10 files changed, 64 insertions, 23 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 50ea12fd7bf..a8bf5c673a3 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -33,6 +33,7 @@
33#include <asm/kvm_asm.h> 33#include <asm/kvm_asm.h>
34#include <asm/processor.h> 34#include <asm/processor.h>
35#include <asm/page.h> 35#include <asm/page.h>
36#include <asm/cacheflush.h>
36 37
37#define KVM_MAX_VCPUS NR_CPUS 38#define KVM_MAX_VCPUS NR_CPUS
38#define KVM_MAX_VCORES NR_CPUS 39#define KVM_MAX_VCORES NR_CPUS
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 0124937a23b..e006f0bdea9 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -219,4 +219,16 @@ void kvmppc_claim_lpid(long lpid);
219void kvmppc_free_lpid(long lpid); 219void kvmppc_free_lpid(long lpid);
220void kvmppc_init_lpid(unsigned long nr_lpids); 220void kvmppc_init_lpid(unsigned long nr_lpids);
221 221
222static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
223{
224 /* Clear i-cache for new pages */
225 struct page *page;
226 page = pfn_to_page(pfn);
227 if (!test_bit(PG_arch_1, &page->flags)) {
228 flush_dcache_icache_page(page);
229 set_bit(PG_arch_1, &page->flags);
230 }
231}
232
233
222#endif /* __POWERPC_KVM_PPC_H__ */ 234#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index f922c29bb23..837f13e7b6b 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -211,6 +211,9 @@ next_pteg:
211 pteg1 |= PP_RWRX; 211 pteg1 |= PP_RWRX;
212 } 212 }
213 213
214 if (orig_pte->may_execute)
215 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
216
214 local_irq_disable(); 217 local_irq_disable();
215 218
216 if (pteg[rr]) { 219 if (pteg[rr]) {
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 10fc8ec9d2a..0688b6b3958 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -126,6 +126,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
126 126
127 if (!orig_pte->may_execute) 127 if (!orig_pte->may_execute)
128 rflags |= HPTE_R_N; 128 rflags |= HPTE_R_N;
129 else
130 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
129 131
130 hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M); 132 hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
131 133
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 5a84c8d3d04..44b72feaff7 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1421,13 +1421,13 @@ _GLOBAL(kvmppc_h_cede)
1421 sync /* order setting ceded vs. testing prodded */ 1421 sync /* order setting ceded vs. testing prodded */
1422 lbz r5,VCPU_PRODDED(r3) 1422 lbz r5,VCPU_PRODDED(r3)
1423 cmpwi r5,0 1423 cmpwi r5,0
1424 bne 1f 1424 bne kvm_cede_prodded
1425 li r0,0 /* set trap to 0 to say hcall is handled */ 1425 li r0,0 /* set trap to 0 to say hcall is handled */
1426 stw r0,VCPU_TRAP(r3) 1426 stw r0,VCPU_TRAP(r3)
1427 li r0,H_SUCCESS 1427 li r0,H_SUCCESS
1428 std r0,VCPU_GPR(R3)(r3) 1428 std r0,VCPU_GPR(R3)(r3)
1429BEGIN_FTR_SECTION 1429BEGIN_FTR_SECTION
1430 b 2f /* just send it up to host on 970 */ 1430 b kvm_cede_exit /* just send it up to host on 970 */
1431END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 1431END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1432 1432
1433 /* 1433 /*
@@ -1446,7 +1446,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1446 or r4,r4,r0 1446 or r4,r4,r0
1447 PPC_POPCNTW(R7,R4) 1447 PPC_POPCNTW(R7,R4)
1448 cmpw r7,r8 1448 cmpw r7,r8
1449 bge 2f 1449 bge kvm_cede_exit
1450 stwcx. r4,0,r6 1450 stwcx. r4,0,r6
1451 bne 31b 1451 bne 31b
1452 li r0,1 1452 li r0,1
@@ -1555,7 +1555,8 @@ kvm_end_cede:
1555 b hcall_real_fallback 1555 b hcall_real_fallback
1556 1556
1557 /* cede when already previously prodded case */ 1557 /* cede when already previously prodded case */
15581: li r0,0 1558kvm_cede_prodded:
1559 li r0,0
1559 stb r0,VCPU_PRODDED(r3) 1560 stb r0,VCPU_PRODDED(r3)
1560 sync /* order testing prodded vs. clearing ceded */ 1561 sync /* order testing prodded vs. clearing ceded */
1561 stb r0,VCPU_CEDED(r3) 1562 stb r0,VCPU_CEDED(r3)
@@ -1563,7 +1564,8 @@ kvm_end_cede:
1563 blr 1564 blr
1564 1565
1565 /* we've ceded but we want to give control to the host */ 1566 /* we've ceded but we want to give control to the host */
15662: li r3,H_TOO_HARD 1567kvm_cede_exit:
1568 li r3,H_TOO_HARD
1567 blr 1569 blr
1568 1570
1569secondary_too_late: 1571secondary_too_late:
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index c510fc96130..a2b66717813 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -322,11 +322,11 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
322static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) 322static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
323{ 323{
324 if (vcpu_e500->g2h_tlb1_map) 324 if (vcpu_e500->g2h_tlb1_map)
325 memset(vcpu_e500->g2h_tlb1_map, 325 memset(vcpu_e500->g2h_tlb1_map, 0,
326 sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0); 326 sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
327 if (vcpu_e500->h2g_tlb1_rmap) 327 if (vcpu_e500->h2g_tlb1_rmap)
328 memset(vcpu_e500->h2g_tlb1_rmap, 328 memset(vcpu_e500->h2g_tlb1_rmap, 0,
329 sizeof(unsigned int) * host_tlb_params[1].entries, 0); 329 sizeof(unsigned int) * host_tlb_params[1].entries);
330} 330}
331 331
332static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) 332static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -539,6 +539,9 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
539 539
540 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 540 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
541 ref, gvaddr, stlbe); 541 ref, gvaddr, stlbe);
542
543 /* Clear i-cache for new pages */
544 kvmppc_mmu_flush_icache(pfn);
542} 545}
543 546
544/* XXX only map the one-one case, for now use TLB0 */ 547/* XXX only map the one-one case, for now use TLB0 */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index baaafde7d13..fbdad0e3929 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -469,6 +469,7 @@ void flush_dcache_icache_page(struct page *page)
469 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 469 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
470#endif 470#endif
471} 471}
472EXPORT_SYMBOL(flush_dcache_icache_page);
472 473
473void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 474void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
474{ 475{
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 97d9a9914ba..a3b57a27be8 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -475,13 +475,26 @@ register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
475 return address_mask(ctxt, reg); 475 return address_mask(ctxt, reg);
476} 476}
477 477
478static void masked_increment(ulong *reg, ulong mask, int inc)
479{
480 assign_masked(reg, *reg + inc, mask);
481}
482
478static inline void 483static inline void
479register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) 484register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
480{ 485{
486 ulong mask;
487
481 if (ctxt->ad_bytes == sizeof(unsigned long)) 488 if (ctxt->ad_bytes == sizeof(unsigned long))
482 *reg += inc; 489 mask = ~0UL;
483 else 490 else
484 *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt)); 491 mask = ad_mask(ctxt);
492 masked_increment(reg, mask, inc);
493}
494
495static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
496{
497 masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc);
485} 498}
486 499
487static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) 500static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
@@ -1522,8 +1535,8 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1522{ 1535{
1523 struct segmented_address addr; 1536 struct segmented_address addr;
1524 1537
1525 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes); 1538 rsp_increment(ctxt, -bytes);
1526 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); 1539 addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
1527 addr.seg = VCPU_SREG_SS; 1540 addr.seg = VCPU_SREG_SS;
1528 1541
1529 return segmented_write(ctxt, addr, data, bytes); 1542 return segmented_write(ctxt, addr, data, bytes);
@@ -1542,13 +1555,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1542 int rc; 1555 int rc;
1543 struct segmented_address addr; 1556 struct segmented_address addr;
1544 1557
1545 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); 1558 addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
1546 addr.seg = VCPU_SREG_SS; 1559 addr.seg = VCPU_SREG_SS;
1547 rc = segmented_read(ctxt, addr, dest, len); 1560 rc = segmented_read(ctxt, addr, dest, len);
1548 if (rc != X86EMUL_CONTINUE) 1561 if (rc != X86EMUL_CONTINUE)
1549 return rc; 1562 return rc;
1550 1563
1551 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len); 1564 rsp_increment(ctxt, len);
1552 return rc; 1565 return rc;
1553} 1566}
1554 1567
@@ -1688,8 +1701,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
1688 1701
1689 while (reg >= VCPU_REGS_RAX) { 1702 while (reg >= VCPU_REGS_RAX) {
1690 if (reg == VCPU_REGS_RSP) { 1703 if (reg == VCPU_REGS_RSP) {
1691 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], 1704 rsp_increment(ctxt, ctxt->op_bytes);
1692 ctxt->op_bytes);
1693 --reg; 1705 --reg;
1694 } 1706 }
1695 1707
@@ -2825,7 +2837,7 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2825 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); 2837 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2826 if (rc != X86EMUL_CONTINUE) 2838 if (rc != X86EMUL_CONTINUE)
2827 return rc; 2839 return rc;
2828 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val); 2840 rsp_increment(ctxt, ctxt->src.val);
2829 return X86EMUL_CONTINUE; 2841 return X86EMUL_CONTINUE;
2830} 2842}
2831 2843
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 01ca0042393..7fbd0d273ea 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4113,16 +4113,21 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
4113 LIST_HEAD(invalid_list); 4113 LIST_HEAD(invalid_list);
4114 4114
4115 /* 4115 /*
4116 * Never scan more than sc->nr_to_scan VM instances.
4117 * Will not hit this condition practically since we do not try
4118 * to shrink more than one VM and it is very unlikely to see
4119 * !n_used_mmu_pages so many times.
4120 */
4121 if (!nr_to_scan--)
4122 break;
4123 /*
4116 * n_used_mmu_pages is accessed without holding kvm->mmu_lock 4124 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
4117 * here. We may skip a VM instance errorneosly, but we do not 4125 * here. We may skip a VM instance errorneosly, but we do not
4118 * want to shrink a VM that only started to populate its MMU 4126 * want to shrink a VM that only started to populate its MMU
4119 * anyway. 4127 * anyway.
4120 */ 4128 */
4121 if (kvm->arch.n_used_mmu_pages > 0) { 4129 if (!kvm->arch.n_used_mmu_pages)
4122 if (!nr_to_scan--)
4123 break;
4124 continue; 4130 continue;
4125 }
4126 4131
4127 idx = srcu_read_lock(&kvm->srcu); 4132 idx = srcu_read_lock(&kvm->srcu);
4128 spin_lock(&kvm->mmu_lock); 4133 spin_lock(&kvm->mmu_lock);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 42bce48f692..dce75b76031 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -806,7 +806,7 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
806 * kvm-specific. Those are put in the beginning of the list. 806 * kvm-specific. Those are put in the beginning of the list.
807 */ 807 */
808 808
809#define KVM_SAVE_MSRS_BEGIN 9 809#define KVM_SAVE_MSRS_BEGIN 10
810static u32 msrs_to_save[] = { 810static u32 msrs_to_save[] = {
811 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 811 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
812 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, 812 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,