aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-08-05 03:55:22 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-08-05 03:58:11 -0400
commitcc568ead3ce8e0284e7e2cc77bd1dafb03ba4ca1 (patch)
tree6525ab90e70f0e0736e9bc050f66645ca373c802 /arch/powerpc/kvm/book3s_hv.c
parent5d5768660539b6d0da0d46113ffb0676540579a6 (diff)
parent8e6afa36e754be84b468d7df9e5aa71cf4003f3b (diff)
Merge tag 'signed-kvm-ppc-next' of git://github.com/agraf/linux-2.6 into kvm
Patch queue for ppc - 2014-08-01 Highlights in this release include: - BookE: Rework instruction fetch, not racy anymore now - BookE HV: Fix ONE_REG accessors for some in-hardware registers - Book3S: Good number of LE host fixes, enable HV on LE - Book3S: Some misc bug fixes - Book3S HV: Add in-guest debug support - Book3S HV: Preload cache lines on context switch - Remove 440 support Alexander Graf (31): KVM: PPC: Book3s PR: Disable AIL mode with OPAL KVM: PPC: Book3s HV: Fix tlbie compile error KVM: PPC: Book3S PR: Handle hyp doorbell exits KVM: PPC: Book3S PR: Fix ABIv2 on LE KVM: PPC: Book3S PR: Fix sparse endian checks PPC: Add asm helpers for BE 32bit load/store KVM: PPC: Book3S HV: Make HTAB code LE host aware KVM: PPC: Book3S HV: Access guest VPA in BE KVM: PPC: Book3S HV: Access host lppaca and shadow slb in BE KVM: PPC: Book3S HV: Access XICS in BE KVM: PPC: Book3S HV: Fix ABIv2 on LE KVM: PPC: Book3S HV: Enable for little endian hosts KVM: PPC: Book3S: Move vcore definition to end of kvm_arch struct KVM: PPC: Deflect page write faults properly in kvmppc_st KVM: PPC: Book3S: Stop PTE lookup on write errors KVM: PPC: Book3S: Add hack for split real mode KVM: PPC: Book3S: Make magic page properly 4k mappable KVM: PPC: Remove 440 support KVM: Rename and add argument to check_extension KVM: Allow KVM_CHECK_EXTENSION on the vm fd KVM: PPC: Book3S: Provide different CAPs based on HV or PR mode KVM: PPC: Implement kvmppc_xlate for all targets KVM: PPC: Move kvmppc_ld/st to common code KVM: PPC: Remove kvmppc_bad_hva() KVM: PPC: Use kvm_read_guest in kvmppc_ld KVM: PPC: Handle magic page in kvmppc_ld/st KVM: PPC: Separate loadstore emulation from priv emulation KVM: PPC: Expose helper functions for data/inst faults KVM: PPC: Remove DCR handling KVM: PPC: HV: Remove generic instruction emulation KVM: PPC: PR: Handle FSCR feature deselects Alexey Kardashevskiy (1): KVM: PPC: Book3S: Fix LPCR one_reg interface Aneesh Kumar K.V (4): KVM: PPC: BOOK3S: PR: Fix PURR and SPURR emulation KVM: PPC: BOOK3S: PR: Emulate virtual timebase register KVM: PPC: BOOK3S: PR: Emulate instruction counter KVM: PPC: BOOK3S: HV: Update compute_tlbie_rb to handle 16MB base page Anton Blanchard (2): KVM: PPC: Book3S HV: Fix ABIv2 indirect branch issue KVM: PPC: Assembly functions exported to modules need _GLOBAL_TOC() Bharat Bhushan (10): kvm: ppc: bookehv: Added wrapper macros for shadow registers kvm: ppc: booke: Use the shared struct helpers of SRR0 and SRR1 kvm: ppc: booke: Use the shared struct helpers of SPRN_DEAR kvm: ppc: booke: Add shared struct helpers of SPRN_ESR kvm: ppc: booke: Use the shared struct helpers for SPRN_SPRG0-7 kvm: ppc: Add SPRN_EPR get helper function kvm: ppc: bookehv: Save restore SPRN_SPRG9 on guest entry exit KVM: PPC: Booke-hv: Add one reg interface for SPRG9 KVM: PPC: Remove comment saying SPRG1 is used for vcpu pointer KVM: PPC: BOOKEHV: rename e500hv_spr to bookehv_spr Michael Neuling (1): KVM: PPC: Book3S HV: Add H_SET_MODE hcall handling Mihai Caraman (8): KVM: PPC: e500mc: Enhance tlb invalidation condition on vcpu schedule KVM: PPC: e500: Fix default tlb for victim hint KVM: PPC: e500: Emulate power management control SPR KVM: PPC: e500mc: Revert "add load inst fixup" KVM: PPC: Book3e: Add TLBSEL/TSIZE defines for MAS0/1 KVM: PPC: Book3s: Remove kvmppc_read_inst() function KVM: PPC: Allow kvmppc_get_last_inst() to fail KVM: PPC: Bookehv: Get vcpu's last instruction for emulation Paul Mackerras (4): KVM: PPC: Book3S: Controls for in-kernel sPAPR hypercall handling KVM: PPC: Book3S: Allow only implemented hcalls to be enabled or disabled KVM: PPC: Book3S PR: Take SRCU read lock around RTAS kvm_read_guest() call KVM: PPC: Book3S: Make kvmppc_ld return a more accurate error indication Stewart Smith (2): Split out struct kvmppc_vcore creation to separate function Use the POWER8 Micro Partition Prefetch Engine in KVM HV on POWER8 Conflicts: Documentation/virtual/kvm/api.txt
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c271
1 files changed, 228 insertions, 43 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 7a12edbb61e7..27cced9c7249 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -35,6 +35,7 @@
35 35
36#include <asm/reg.h> 36#include <asm/reg.h>
37#include <asm/cputable.h> 37#include <asm/cputable.h>
38#include <asm/cache.h>
38#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
39#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
@@ -67,6 +68,15 @@
67/* Used as a "null" value for timebase values */ 68/* Used as a "null" value for timebase values */
68#define TB_NIL (~(u64)0) 69#define TB_NIL (~(u64)0)
69 70
71static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
72
73#if defined(CONFIG_PPC_64K_PAGES)
74#define MPP_BUFFER_ORDER 0
75#elif defined(CONFIG_PPC_4K_PAGES)
76#define MPP_BUFFER_ORDER 3
77#endif
78
79
70static void kvmppc_end_cede(struct kvm_vcpu *vcpu); 80static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
71static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); 81static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
72 82
@@ -270,7 +280,7 @@ struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
270static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) 280static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
271{ 281{
272 vpa->__old_status |= LPPACA_OLD_SHARED_PROC; 282 vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
273 vpa->yield_count = 1; 283 vpa->yield_count = cpu_to_be32(1);
274} 284}
275 285
276static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, 286static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
@@ -293,8 +303,8 @@ static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
293struct reg_vpa { 303struct reg_vpa {
294 u32 dummy; 304 u32 dummy;
295 union { 305 union {
296 u16 hword; 306 __be16 hword;
297 u32 word; 307 __be32 word;
298 } length; 308 } length;
299}; 309};
300 310
@@ -333,9 +343,9 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
333 if (va == NULL) 343 if (va == NULL)
334 return H_PARAMETER; 344 return H_PARAMETER;
335 if (subfunc == H_VPA_REG_VPA) 345 if (subfunc == H_VPA_REG_VPA)
336 len = ((struct reg_vpa *)va)->length.hword; 346 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
337 else 347 else
338 len = ((struct reg_vpa *)va)->length.word; 348 len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
339 kvmppc_unpin_guest_page(kvm, va, vpa, false); 349 kvmppc_unpin_guest_page(kvm, va, vpa, false);
340 350
341 /* Check length */ 351 /* Check length */
@@ -540,21 +550,63 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
540 return; 550 return;
541 memset(dt, 0, sizeof(struct dtl_entry)); 551 memset(dt, 0, sizeof(struct dtl_entry));
542 dt->dispatch_reason = 7; 552 dt->dispatch_reason = 7;
543 dt->processor_id = vc->pcpu + vcpu->arch.ptid; 553 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
544 dt->timebase = now + vc->tb_offset; 554 dt->timebase = cpu_to_be64(now + vc->tb_offset);
545 dt->enqueue_to_dispatch_time = stolen; 555 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
546 dt->srr0 = kvmppc_get_pc(vcpu); 556 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
547 dt->srr1 = vcpu->arch.shregs.msr; 557 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
548 ++dt; 558 ++dt;
549 if (dt == vcpu->arch.dtl.pinned_end) 559 if (dt == vcpu->arch.dtl.pinned_end)
550 dt = vcpu->arch.dtl.pinned_addr; 560 dt = vcpu->arch.dtl.pinned_addr;
551 vcpu->arch.dtl_ptr = dt; 561 vcpu->arch.dtl_ptr = dt;
552 /* order writing *dt vs. writing vpa->dtl_idx */ 562 /* order writing *dt vs. writing vpa->dtl_idx */
553 smp_wmb(); 563 smp_wmb();
554 vpa->dtl_idx = ++vcpu->arch.dtl_index; 564 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
555 vcpu->arch.dtl.dirty = true; 565 vcpu->arch.dtl.dirty = true;
556} 566}
557 567
568static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
569{
570 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
571 return true;
572 if ((!vcpu->arch.vcore->arch_compat) &&
573 cpu_has_feature(CPU_FTR_ARCH_207S))
574 return true;
575 return false;
576}
577
578static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
579 unsigned long resource, unsigned long value1,
580 unsigned long value2)
581{
582 switch (resource) {
583 case H_SET_MODE_RESOURCE_SET_CIABR:
584 if (!kvmppc_power8_compatible(vcpu))
585 return H_P2;
586 if (value2)
587 return H_P4;
588 if (mflags)
589 return H_UNSUPPORTED_FLAG_START;
590 /* Guests can't breakpoint the hypervisor */
591 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
592 return H_P3;
593 vcpu->arch.ciabr = value1;
594 return H_SUCCESS;
595 case H_SET_MODE_RESOURCE_SET_DAWR:
596 if (!kvmppc_power8_compatible(vcpu))
597 return H_P2;
598 if (mflags)
599 return H_UNSUPPORTED_FLAG_START;
600 if (value2 & DABRX_HYP)
601 return H_P4;
602 vcpu->arch.dawr = value1;
603 vcpu->arch.dawrx = value2;
604 return H_SUCCESS;
605 default:
606 return H_TOO_HARD;
607 }
608}
609
558int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) 610int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
559{ 611{
560 unsigned long req = kvmppc_get_gpr(vcpu, 3); 612 unsigned long req = kvmppc_get_gpr(vcpu, 3);
@@ -562,6 +614,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
562 struct kvm_vcpu *tvcpu; 614 struct kvm_vcpu *tvcpu;
563 int idx, rc; 615 int idx, rc;
564 616
617 if (req <= MAX_HCALL_OPCODE &&
618 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
619 return RESUME_HOST;
620
565 switch (req) { 621 switch (req) {
566 case H_ENTER: 622 case H_ENTER:
567 idx = srcu_read_lock(&vcpu->kvm->srcu); 623 idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -620,7 +676,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
620 676
621 /* Send the error out to userspace via KVM_RUN */ 677 /* Send the error out to userspace via KVM_RUN */
622 return rc; 678 return rc;
623 679 case H_SET_MODE:
680 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
681 kvmppc_get_gpr(vcpu, 5),
682 kvmppc_get_gpr(vcpu, 6),
683 kvmppc_get_gpr(vcpu, 7));
684 if (ret == H_TOO_HARD)
685 return RESUME_HOST;
686 break;
624 case H_XIRR: 687 case H_XIRR:
625 case H_CPPR: 688 case H_CPPR:
626 case H_EOI: 689 case H_EOI:
@@ -639,6 +702,29 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
639 return RESUME_GUEST; 702 return RESUME_GUEST;
640} 703}
641 704
705static int kvmppc_hcall_impl_hv(unsigned long cmd)
706{
707 switch (cmd) {
708 case H_CEDE:
709 case H_PROD:
710 case H_CONFER:
711 case H_REGISTER_VPA:
712 case H_SET_MODE:
713#ifdef CONFIG_KVM_XICS
714 case H_XIRR:
715 case H_CPPR:
716 case H_EOI:
717 case H_IPI:
718 case H_IPOLL:
719 case H_XIRR_X:
720#endif
721 return 1;
722 }
723
724 /* See if it's in the real-mode table */
725 return kvmppc_hcall_impl_hv_realmode(cmd);
726}
727
642static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, 728static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
643 struct task_struct *tsk) 729 struct task_struct *tsk)
644{ 730{
@@ -785,7 +871,8 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
785 return 0; 871 return 0;
786} 872}
787 873
788static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr) 874static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
875 bool preserve_top32)
789{ 876{
790 struct kvmppc_vcore *vc = vcpu->arch.vcore; 877 struct kvmppc_vcore *vc = vcpu->arch.vcore;
791 u64 mask; 878 u64 mask;
@@ -820,6 +907,10 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
820 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; 907 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
821 if (cpu_has_feature(CPU_FTR_ARCH_207S)) 908 if (cpu_has_feature(CPU_FTR_ARCH_207S))
822 mask |= LPCR_AIL; 909 mask |= LPCR_AIL;
910
911 /* Broken 32-bit version of LPCR must not clear top bits */
912 if (preserve_top32)
913 mask &= 0xFFFFFFFF;
823 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); 914 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
824 spin_unlock(&vc->lock); 915 spin_unlock(&vc->lock);
825} 916}
@@ -894,12 +985,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
894 case KVM_REG_PPC_CIABR: 985 case KVM_REG_PPC_CIABR:
895 *val = get_reg_val(id, vcpu->arch.ciabr); 986 *val = get_reg_val(id, vcpu->arch.ciabr);
896 break; 987 break;
897 case KVM_REG_PPC_IC:
898 *val = get_reg_val(id, vcpu->arch.ic);
899 break;
900 case KVM_REG_PPC_VTB:
901 *val = get_reg_val(id, vcpu->arch.vtb);
902 break;
903 case KVM_REG_PPC_CSIGR: 988 case KVM_REG_PPC_CSIGR:
904 *val = get_reg_val(id, vcpu->arch.csigr); 989 *val = get_reg_val(id, vcpu->arch.csigr);
905 break; 990 break;
@@ -939,6 +1024,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
939 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); 1024 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
940 break; 1025 break;
941 case KVM_REG_PPC_LPCR: 1026 case KVM_REG_PPC_LPCR:
1027 case KVM_REG_PPC_LPCR_64:
942 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); 1028 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
943 break; 1029 break;
944 case KVM_REG_PPC_PPR: 1030 case KVM_REG_PPC_PPR:
@@ -1094,12 +1180,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1094 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) 1180 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1095 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ 1181 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
1096 break; 1182 break;
1097 case KVM_REG_PPC_IC:
1098 vcpu->arch.ic = set_reg_val(id, *val);
1099 break;
1100 case KVM_REG_PPC_VTB:
1101 vcpu->arch.vtb = set_reg_val(id, *val);
1102 break;
1103 case KVM_REG_PPC_CSIGR: 1183 case KVM_REG_PPC_CSIGR:
1104 vcpu->arch.csigr = set_reg_val(id, *val); 1184 vcpu->arch.csigr = set_reg_val(id, *val);
1105 break; 1185 break;
@@ -1150,7 +1230,10 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1150 ALIGN(set_reg_val(id, *val), 1UL << 24); 1230 ALIGN(set_reg_val(id, *val), 1UL << 24);
1151 break; 1231 break;
1152 case KVM_REG_PPC_LPCR: 1232 case KVM_REG_PPC_LPCR:
1153 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val)); 1233 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
1234 break;
1235 case KVM_REG_PPC_LPCR_64:
1236 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
1154 break; 1237 break;
1155 case KVM_REG_PPC_PPR: 1238 case KVM_REG_PPC_PPR:
1156 vcpu->arch.ppr = set_reg_val(id, *val); 1239 vcpu->arch.ppr = set_reg_val(id, *val);
@@ -1228,6 +1311,33 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1228 return r; 1311 return r;
1229} 1312}
1230 1313
1314static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
1315{
1316 struct kvmppc_vcore *vcore;
1317
1318 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
1319
1320 if (vcore == NULL)
1321 return NULL;
1322
1323 INIT_LIST_HEAD(&vcore->runnable_threads);
1324 spin_lock_init(&vcore->lock);
1325 init_waitqueue_head(&vcore->wq);
1326 vcore->preempt_tb = TB_NIL;
1327 vcore->lpcr = kvm->arch.lpcr;
1328 vcore->first_vcpuid = core * threads_per_subcore;
1329 vcore->kvm = kvm;
1330
1331 vcore->mpp_buffer_is_valid = false;
1332
1333 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1334 vcore->mpp_buffer = (void *)__get_free_pages(
1335 GFP_KERNEL|__GFP_ZERO,
1336 MPP_BUFFER_ORDER);
1337
1338 return vcore;
1339}
1340
1231static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, 1341static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1232 unsigned int id) 1342 unsigned int id)
1233{ 1343{
@@ -1279,16 +1389,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1279 mutex_lock(&kvm->lock); 1389 mutex_lock(&kvm->lock);
1280 vcore = kvm->arch.vcores[core]; 1390 vcore = kvm->arch.vcores[core];
1281 if (!vcore) { 1391 if (!vcore) {
1282 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL); 1392 vcore = kvmppc_vcore_create(kvm, core);
1283 if (vcore) {
1284 INIT_LIST_HEAD(&vcore->runnable_threads);
1285 spin_lock_init(&vcore->lock);
1286 init_waitqueue_head(&vcore->wq);
1287 vcore->preempt_tb = TB_NIL;
1288 vcore->lpcr = kvm->arch.lpcr;
1289 vcore->first_vcpuid = core * threads_per_subcore;
1290 vcore->kvm = kvm;
1291 }
1292 kvm->arch.vcores[core] = vcore; 1393 kvm->arch.vcores[core] = vcore;
1293 kvm->arch.online_vcores++; 1394 kvm->arch.online_vcores++;
1294 } 1395 }
@@ -1500,6 +1601,33 @@ static int on_primary_thread(void)
1500 return 1; 1601 return 1;
1501} 1602}
1502 1603
1604static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
1605{
1606 phys_addr_t phy_addr, mpp_addr;
1607
1608 phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
1609 mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1610
1611 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
1612 logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
1613
1614 vc->mpp_buffer_is_valid = true;
1615}
1616
1617static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
1618{
1619 phys_addr_t phy_addr, mpp_addr;
1620
1621 phy_addr = virt_to_phys(vc->mpp_buffer);
1622 mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1623
1624 /* We must abort any in-progress save operations to ensure
1625 * the table is valid so that prefetch engine knows when to
1626 * stop prefetching. */
1627 logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
1628 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
1629}
1630
1503/* 1631/*
1504 * Run a set of guest threads on a physical core. 1632 * Run a set of guest threads on a physical core.
1505 * Called with vc->lock held. 1633 * Called with vc->lock held.
@@ -1577,9 +1705,16 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1577 1705
1578 srcu_idx = srcu_read_lock(&vc->kvm->srcu); 1706 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
1579 1707
1708 if (vc->mpp_buffer_is_valid)
1709 kvmppc_start_restoring_l2_cache(vc);
1710
1580 __kvmppc_vcore_entry(); 1711 __kvmppc_vcore_entry();
1581 1712
1582 spin_lock(&vc->lock); 1713 spin_lock(&vc->lock);
1714
1715 if (vc->mpp_buffer)
1716 kvmppc_start_saving_l2_cache(vc);
1717
1583 /* disable sending of IPIs on virtual external irqs */ 1718 /* disable sending of IPIs on virtual external irqs */
1584 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 1719 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1585 vcpu->cpu = -1; 1720 vcpu->cpu = -1;
@@ -1929,12 +2064,6 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
1929 (*sps)->page_shift = def->shift; 2064 (*sps)->page_shift = def->shift;
1930 (*sps)->slb_enc = def->sllp; 2065 (*sps)->slb_enc = def->sllp;
1931 (*sps)->enc[0].page_shift = def->shift; 2066 (*sps)->enc[0].page_shift = def->shift;
1932 /*
1933 * Only return base page encoding. We don't want to return
1934 * all the supporting pte_enc, because our H_ENTER doesn't
1935 * support MPSS yet. Once they do, we can start passing all
1936 * support pte_enc here
1937 */
1938 (*sps)->enc[0].pte_enc = def->penc[linux_psize]; 2067 (*sps)->enc[0].pte_enc = def->penc[linux_psize];
1939 /* 2068 /*
1940 * Add 16MB MPSS support if host supports it 2069 * Add 16MB MPSS support if host supports it
@@ -2281,6 +2410,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
2281 */ 2410 */
2282 cpumask_setall(&kvm->arch.need_tlb_flush); 2411 cpumask_setall(&kvm->arch.need_tlb_flush);
2283 2412
2413 /* Start out with the default set of hcalls enabled */
2414 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
2415 sizeof(kvm->arch.enabled_hcalls));
2416
2284 kvm->arch.rma = NULL; 2417 kvm->arch.rma = NULL;
2285 2418
2286 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); 2419 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
@@ -2323,8 +2456,14 @@ static void kvmppc_free_vcores(struct kvm *kvm)
2323{ 2456{
2324 long int i; 2457 long int i;
2325 2458
2326 for (i = 0; i < KVM_MAX_VCORES; ++i) 2459 for (i = 0; i < KVM_MAX_VCORES; ++i) {
2460 if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
2461 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
2462 free_pages((unsigned long)vc->mpp_buffer,
2463 MPP_BUFFER_ORDER);
2464 }
2327 kfree(kvm->arch.vcores[i]); 2465 kfree(kvm->arch.vcores[i]);
2466 }
2328 kvm->arch.online_vcores = 0; 2467 kvm->arch.online_vcores = 0;
2329} 2468}
2330 2469
@@ -2419,6 +2558,49 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
2419 return r; 2558 return r;
2420} 2559}
2421 2560
2561/*
2562 * List of hcall numbers to enable by default.
2563 * For compatibility with old userspace, we enable by default
2564 * all hcalls that were implemented before the hcall-enabling
2565 * facility was added. Note this list should not include H_RTAS.
2566 */
2567static unsigned int default_hcall_list[] = {
2568 H_REMOVE,
2569 H_ENTER,
2570 H_READ,
2571 H_PROTECT,
2572 H_BULK_REMOVE,
2573 H_GET_TCE,
2574 H_PUT_TCE,
2575 H_SET_DABR,
2576 H_SET_XDABR,
2577 H_CEDE,
2578 H_PROD,
2579 H_CONFER,
2580 H_REGISTER_VPA,
2581#ifdef CONFIG_KVM_XICS
2582 H_EOI,
2583 H_CPPR,
2584 H_IPI,
2585 H_IPOLL,
2586 H_XIRR,
2587 H_XIRR_X,
2588#endif
2589 0
2590};
2591
2592static void init_default_hcalls(void)
2593{
2594 int i;
2595 unsigned int hcall;
2596
2597 for (i = 0; default_hcall_list[i]; ++i) {
2598 hcall = default_hcall_list[i];
2599 WARN_ON(!kvmppc_hcall_impl_hv(hcall));
2600 __set_bit(hcall / 4, default_enabled_hcalls);
2601 }
2602}
2603
2422static struct kvmppc_ops kvm_ops_hv = { 2604static struct kvmppc_ops kvm_ops_hv = {
2423 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, 2605 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
2424 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, 2606 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
@@ -2451,6 +2633,7 @@ static struct kvmppc_ops kvm_ops_hv = {
2451 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv, 2633 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
2452 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, 2634 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
2453 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, 2635 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
2636 .hcall_implemented = kvmppc_hcall_impl_hv,
2454}; 2637};
2455 2638
2456static int kvmppc_book3s_init_hv(void) 2639static int kvmppc_book3s_init_hv(void)
@@ -2466,6 +2649,8 @@ static int kvmppc_book3s_init_hv(void)
2466 kvm_ops_hv.owner = THIS_MODULE; 2649 kvm_ops_hv.owner = THIS_MODULE;
2467 kvmppc_hv_ops = &kvm_ops_hv; 2650 kvmppc_hv_ops = &kvm_ops_hv;
2468 2651
2652 init_default_hcalls();
2653
2469 r = kvmppc_mmu_hv_init(); 2654 r = kvmppc_mmu_hv_init();
2470 return r; 2655 return r;
2471} 2656}