aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-06-28 20:40:08 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 06:16:59 -0400
commit9e368f2915601cd5bc7f5fd638b58435b018bbd7 (patch)
tree104aa8204f17d2d43e4746f614510e256896cb7e /arch/powerpc/kvm/book3s_hv.c
parent969391c58a4efb8411d6881179945f425ad9cbb5 (diff)
KVM: PPC: book3s_hv: Add support for PPC970-family processors
This adds support for running KVM guests in supervisor mode on those PPC970 processors that have a usable hypervisor mode. Unfortunately, Apple G5 machines have supervisor mode disabled (MSR[HV] is forced to 1), but the YDL PowerStation does have a usable hypervisor mode. There are several differences between the PPC970 and POWER7 in how guests are managed. These differences are accommodated using the CPU_FTR_ARCH_201 (PPC970) and CPU_FTR_ARCH_206 (POWER7) CPU feature bits. Notably, on PPC970: * The LPCR, LPID or RMOR registers don't exist, and the functions of those registers are provided by bits in HID4 and one bit in HID0. * External interrupts can be directed to the hypervisor, but unlike POWER7 they are masked by MSR[EE] in non-hypervisor modes and use SRR0/1 not HSRR0/1. * There is no virtual RMA (VRMA) mode; the guest must use an RMO (real mode offset) area. * The TLB entries are not tagged with the LPID, so it is necessary to flush the whole TLB on partition switch. Furthermore, when switching partitions we have to ensure that no other CPU is executing the tlbie or tlbsync instructions in either the old or the new partition, otherwise undefined behaviour can occur. * The PMU has 8 counters (PMC registers) rather than 6. * The DSCR, PURR, SPURR, AMR, AMOR, UAMOR registers don't exist. * The SLB has 64 entries rather than 32. * There is no mediated external interrupt facility, so if we switch to a guest that has a virtual external interrupt pending but the guest has MSR[EE] = 0, we have to arrange to have an interrupt pending for it so that we can get control back once it re-enables interrupts. We do that by sending ourselves an IPI with smp_send_reschedule after hard-disabling interrupts. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c60
1 files changed, 49 insertions, 11 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index dc70e7745ab3..cc0d7f1b19ab 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -443,8 +443,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
443 443
444int kvmppc_core_check_processor_compat(void) 444int kvmppc_core_check_processor_compat(void)
445{ 445{
446 if (cpu_has_feature(CPU_FTR_HVMODE) && 446 if (cpu_has_feature(CPU_FTR_HVMODE))
447 cpu_has_feature(CPU_FTR_ARCH_206))
448 return 0; 447 return 0;
449 return -EIO; 448 return -EIO;
450} 449}
@@ -731,6 +730,10 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
731 return -EINTR; 730 return -EINTR;
732 } 731 }
733 732
733 /* On PPC970, check that we have an RMA region */
734 if (!vcpu->kvm->arch.rma && cpu_has_feature(CPU_FTR_ARCH_201))
735 return -EPERM;
736
734 kvm_run->exit_reason = 0; 737 kvm_run->exit_reason = 0;
735 vcpu->arch.ret = RESUME_GUEST; 738 vcpu->arch.ret = RESUME_GUEST;
736 vcpu->arch.trap = 0; 739 vcpu->arch.trap = 0;
@@ -920,12 +923,14 @@ fail:
920} 923}
921 924
922/* Work out RMLS (real mode limit selector) field value for a given RMA size. 925/* Work out RMLS (real mode limit selector) field value for a given RMA size.
923 Assumes POWER7. */ 926 Assumes POWER7 or PPC970. */
924static inline int lpcr_rmls(unsigned long rma_size) 927static inline int lpcr_rmls(unsigned long rma_size)
925{ 928{
926 switch (rma_size) { 929 switch (rma_size) {
927 case 32ul << 20: /* 32 MB */ 930 case 32ul << 20: /* 32 MB */
928 return 8; 931 if (cpu_has_feature(CPU_FTR_ARCH_206))
932 return 8; /* only supported on POWER7 */
933 return -1;
929 case 64ul << 20: /* 64 MB */ 934 case 64ul << 20: /* 64 MB */
930 return 3; 935 return 3;
931 case 128ul << 20: /* 128 MB */ 936 case 128ul << 20: /* 128 MB */
@@ -1059,6 +1064,10 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1059 mem->userspace_addr == vma->vm_start) 1064 mem->userspace_addr == vma->vm_start)
1060 ri = vma->vm_file->private_data; 1065 ri = vma->vm_file->private_data;
1061 up_read(&current->mm->mmap_sem); 1066 up_read(&current->mm->mmap_sem);
1067 if (!ri && cpu_has_feature(CPU_FTR_ARCH_201)) {
1068 pr_err("CPU requires an RMO\n");
1069 return -EINVAL;
1070 }
1062 } 1071 }
1063 1072
1064 if (ri) { 1073 if (ri) {
@@ -1077,10 +1086,25 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1077 atomic_inc(&ri->use_count); 1086 atomic_inc(&ri->use_count);
1078 kvm->arch.rma = ri; 1087 kvm->arch.rma = ri;
1079 kvm->arch.n_rma_pages = rma_size >> porder; 1088 kvm->arch.n_rma_pages = rma_size >> porder;
1080 lpcr = kvm->arch.lpcr & ~(LPCR_VPM0 | LPCR_VRMA_L); 1089
1081 lpcr |= rmls << LPCR_RMLS_SH; 1090 /* Update LPCR and RMOR */
1091 lpcr = kvm->arch.lpcr;
1092 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1093 /* PPC970; insert RMLS value (split field) in HID4 */
1094 lpcr &= ~((1ul << HID4_RMLS0_SH) |
1095 (3ul << HID4_RMLS2_SH));
1096 lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
1097 ((rmls & 3) << HID4_RMLS2_SH);
1098 /* RMOR is also in HID4 */
1099 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
1100 << HID4_RMOR_SH;
1101 } else {
1102 /* POWER7 */
1103 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
1104 lpcr |= rmls << LPCR_RMLS_SH;
1105 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
1106 }
1082 kvm->arch.lpcr = lpcr; 1107 kvm->arch.lpcr = lpcr;
1083 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
1084 pr_info("Using RMO at %lx size %lx (LPCR = %lx)\n", 1108 pr_info("Using RMO at %lx size %lx (LPCR = %lx)\n",
1085 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); 1109 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
1086 } 1110 }
@@ -1151,11 +1175,25 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1151 kvm->arch.rma = NULL; 1175 kvm->arch.rma = NULL;
1152 kvm->arch.n_rma_pages = 0; 1176 kvm->arch.n_rma_pages = 0;
1153 1177
1154 lpcr = kvm->arch.host_lpcr & (LPCR_PECE | LPCR_LPES); 1178 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
1155 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
1156 LPCR_VPM0 | LPCR_VRMA_L;
1157 kvm->arch.lpcr = lpcr;
1158 1179
1180 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1181 /* PPC970; HID4 is effectively the LPCR */
1182 unsigned long lpid = kvm->arch.lpid;
1183 kvm->arch.host_lpid = 0;
1184 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
1185 lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
1186 lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
1187 ((lpid & 0xf) << HID4_LPID5_SH);
1188 } else {
1189 /* POWER7; init LPCR for virtual RMA mode */
1190 kvm->arch.host_lpid = mfspr(SPRN_LPID);
1191 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
1192 lpcr &= LPCR_PECE | LPCR_LPES;
1193 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
1194 LPCR_VPM0 | LPCR_VRMA_L;
1195 }
1196 kvm->arch.lpcr = lpcr;
1159 1197
1160 return 0; 1198 return 0;
1161 1199