aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-06-28 20:40:08 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 06:16:59 -0400
commit9e368f2915601cd5bc7f5fd638b58435b018bbd7 (patch)
tree104aa8204f17d2d43e4746f614510e256896cb7e /arch/powerpc
parent969391c58a4efb8411d6881179945f425ad9cbb5 (diff)
KVM: PPC: book3s_hv: Add support for PPC970-family processors
This adds support for running KVM guests in supervisor mode on those PPC970 processors that have a usable hypervisor mode. Unfortunately, Apple G5 machines have supervisor mode disabled (MSR[HV] is forced to 1), but the YDL PowerStation does have a usable hypervisor mode. There are several differences between the PPC970 and POWER7 in how guests are managed. These differences are accommodated using the CPU_FTR_ARCH_201 (PPC970) and CPU_FTR_ARCH_206 (POWER7) CPU feature bits. Notably, on PPC970: * The LPCR, LPID or RMOR registers don't exist, and the functions of those registers are provided by bits in HID4 and one bit in HID0. * External interrupts can be directed to the hypervisor, but unlike POWER7 they are masked by MSR[EE] in non-hypervisor modes and use SRR0/1 not HSRR0/1. * There is no virtual RMA (VRMA) mode; the guest must use an RMO (real mode offset) area. * The TLB entries are not tagged with the LPID, so it is necessary to flush the whole TLB on partition switch. Furthermore, when switching partitions we have to ensure that no other CPU is executing the tlbie or tlbsync instructions in either the old or the new partition, otherwise undefined behaviour can occur. * The PMU has 8 counters (PMC registers) rather than 6. * The DSCR, PURR, SPURR, AMR, AMOR, UAMOR registers don't exist. * The SLB has 64 entries rather than 32. * There is no mediated external interrupt facility, so if we switch to a guest that has a virtual external interrupt pending but the guest has MSR[EE] = 0, we have to arrange to have an interrupt pending for it so that we can get control back once it re-enables interrupts. We do that by sending ourselves an IPI with smp_send_reschedule after hard-disabling interrupts. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/exception-64s.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kvm/Kconfig13
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c30
-rw-r--r--arch/powerpc/kvm/book3s_hv.c60
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c11
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S30
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S230
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/powerpc/mm/hash_native_64.c2
14 files changed, 354 insertions, 42 deletions
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 69435da8f2ba..8057f4f6980f 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -246,6 +246,10 @@ label##_hv: \
246 KVMTEST(vec); \ 246 KVMTEST(vec); \
247 _SOFTEN_TEST(EXC_HV) 247 _SOFTEN_TEST(EXC_HV)
248 248
249#define SOFTEN_TEST_HV_201(vec) \
250 KVMTEST(vec); \
251 _SOFTEN_TEST(EXC_STD)
252
249#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ 253#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
250 HMT_MEDIUM; \ 254 HMT_MEDIUM; \
251 SET_SCRATCH0(r13); /* save r13 */ \ 255 SET_SCRATCH0(r13); /* save r13 */ \
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 9cfd5436782d..ef7b3688c3b6 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -82,7 +82,7 @@ struct kvmppc_host_state {
82 unsigned long xics_phys; 82 unsigned long xics_phys;
83 u64 dabr; 83 u64 dabr;
84 u64 host_mmcr[3]; 84 u64 host_mmcr[3];
85 u32 host_pmc[6]; 85 u32 host_pmc[8];
86 u64 host_purr; 86 u64 host_purr;
87 u64 host_spurr; 87 u64 host_spurr;
88 u64 host_dscr; 88 u64 host_dscr;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index f572d9cc31bd..cc22b282d755 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -353,7 +353,7 @@ struct kvm_vcpu_arch {
353 u32 dbsr; 353 u32 dbsr;
354 354
355 u64 mmcr[3]; 355 u64 mmcr[3];
356 u32 pmc[6]; 356 u32 pmc[8];
357 357
358#ifdef CONFIG_KVM_EXIT_TIMING 358#ifdef CONFIG_KVM_EXIT_TIMING
359 struct mutex exit_timing_lock; 359 struct mutex exit_timing_lock;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index f4aba938166b..54b935f2f5de 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -128,6 +128,7 @@ int main(void)
128 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); 128 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
129 /* paca */ 129 /* paca */
130 DEFINE(PACA_SIZE, sizeof(struct paca_struct)); 130 DEFINE(PACA_SIZE, sizeof(struct paca_struct));
131 DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token));
131 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); 132 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
132 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); 133 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
133 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); 134 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a5345380bef3..41b02c792aa3 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -171,7 +171,7 @@ hardware_interrupt_hv:
171 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) 171 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
172 FTR_SECTION_ELSE 172 FTR_SECTION_ELSE
173 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, 173 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
174 EXC_STD, SOFTEN_TEST_PR) 174 EXC_STD, SOFTEN_TEST_HV_201)
175 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) 175 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
176 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 176 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
177 177
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 5d9b78ebbaa6..eeb42e06f2d7 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -67,23 +67,20 @@ config KVM_BOOK3S_64
67 If unsure, say N. 67 If unsure, say N.
68 68
69config KVM_BOOK3S_64_HV 69config KVM_BOOK3S_64_HV
70 bool "KVM support for POWER7 using hypervisor mode in host" 70 bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
71 depends on KVM_BOOK3S_64 71 depends on KVM_BOOK3S_64
72 ---help--- 72 ---help---
73 Support running unmodified book3s_64 guest kernels in 73 Support running unmodified book3s_64 guest kernels in
74 virtual machines on POWER7 processors that have hypervisor 74 virtual machines on POWER7 and PPC970 processors that have
75 mode available to the host. 75 hypervisor mode available to the host.
76 76
77 If you say Y here, KVM will use the hardware virtualization 77 If you say Y here, KVM will use the hardware virtualization
78 facilities of POWER7 (and later) processors, meaning that 78 facilities of POWER7 (and later) processors, meaning that
79 guest operating systems will run at full hardware speed 79 guest operating systems will run at full hardware speed
80 using supervisor and user modes. However, this also means 80 using supervisor and user modes. However, this also means
81 that KVM is not usable under PowerVM (pHyp), is only usable 81 that KVM is not usable under PowerVM (pHyp), is only usable
82 on POWER7 (or later) processors, and can only emulate 82 on POWER7 (or later) processors and PPC970-family processors,
83 POWER5+, POWER6 and POWER7 processors. 83 and cannot emulate a different processor from the host processor.
84
85 This module provides access to the hardware capabilities through
86 a character device node named /dev/kvm.
87 84
88 If unsure, say N. 85 If unsure, say N.
89 86
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 212dcd8fc50b..bc3a2ea94217 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -42,6 +42,8 @@
42#define VRMA_PAGE_ORDER 24 42#define VRMA_PAGE_ORDER 24
43#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */ 43#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
44 44
45/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
46#define MAX_LPID_970 63
45#define NR_LPIDS (LPID_RSVD + 1) 47#define NR_LPIDS (LPID_RSVD + 1)
46unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)]; 48unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
47 49
@@ -69,9 +71,6 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
69 71
70 kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18); 72 kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
71 kvm->arch.lpid = lpid; 73 kvm->arch.lpid = lpid;
72 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
73 kvm->arch.host_lpid = mfspr(SPRN_LPID);
74 kvm->arch.host_lpcr = mfspr(SPRN_LPCR);
75 74
76 pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid); 75 pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
77 return 0; 76 return 0;
@@ -128,12 +127,24 @@ void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem)
128 127
129int kvmppc_mmu_hv_init(void) 128int kvmppc_mmu_hv_init(void)
130{ 129{
131 if (!cpu_has_feature(CPU_FTR_HVMODE) || 130 unsigned long host_lpid, rsvd_lpid;
132 !cpu_has_feature(CPU_FTR_ARCH_206)) 131
132 if (!cpu_has_feature(CPU_FTR_HVMODE))
133 return -EINVAL; 133 return -EINVAL;
134
134 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 135 memset(lpid_inuse, 0, sizeof(lpid_inuse));
135 set_bit(mfspr(SPRN_LPID), lpid_inuse); 136
136 set_bit(LPID_RSVD, lpid_inuse); 137 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
138 host_lpid = mfspr(SPRN_LPID); /* POWER7 */
139 rsvd_lpid = LPID_RSVD;
140 } else {
141 host_lpid = 0; /* PPC970 */
142 rsvd_lpid = MAX_LPID_970;
143 }
144
145 set_bit(host_lpid, lpid_inuse);
146 /* rsvd_lpid is reserved for use in partition switching */
147 set_bit(rsvd_lpid, lpid_inuse);
137 148
138 return 0; 149 return 0;
139} 150}
@@ -157,7 +168,10 @@ void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
157{ 168{
158 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; 169 struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
159 170
160 vcpu->arch.slb_nr = 32; /* Assume POWER7 for now */ 171 if (cpu_has_feature(CPU_FTR_ARCH_206))
172 vcpu->arch.slb_nr = 32; /* POWER7 */
173 else
174 vcpu->arch.slb_nr = 64;
161 175
162 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; 176 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
163 mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr; 177 mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index dc70e7745ab3..cc0d7f1b19ab 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -443,8 +443,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
443 443
444int kvmppc_core_check_processor_compat(void) 444int kvmppc_core_check_processor_compat(void)
445{ 445{
446 if (cpu_has_feature(CPU_FTR_HVMODE) && 446 if (cpu_has_feature(CPU_FTR_HVMODE))
447 cpu_has_feature(CPU_FTR_ARCH_206))
448 return 0; 447 return 0;
449 return -EIO; 448 return -EIO;
450} 449}
@@ -731,6 +730,10 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
731 return -EINTR; 730 return -EINTR;
732 } 731 }
733 732
733 /* On PPC970, check that we have an RMA region */
734 if (!vcpu->kvm->arch.rma && cpu_has_feature(CPU_FTR_ARCH_201))
735 return -EPERM;
736
734 kvm_run->exit_reason = 0; 737 kvm_run->exit_reason = 0;
735 vcpu->arch.ret = RESUME_GUEST; 738 vcpu->arch.ret = RESUME_GUEST;
736 vcpu->arch.trap = 0; 739 vcpu->arch.trap = 0;
@@ -920,12 +923,14 @@ fail:
920} 923}
921 924
922/* Work out RMLS (real mode limit selector) field value for a given RMA size. 925/* Work out RMLS (real mode limit selector) field value for a given RMA size.
923 Assumes POWER7. */ 926 Assumes POWER7 or PPC970. */
924static inline int lpcr_rmls(unsigned long rma_size) 927static inline int lpcr_rmls(unsigned long rma_size)
925{ 928{
926 switch (rma_size) { 929 switch (rma_size) {
927 case 32ul << 20: /* 32 MB */ 930 case 32ul << 20: /* 32 MB */
928 return 8; 931 if (cpu_has_feature(CPU_FTR_ARCH_206))
932 return 8; /* only supported on POWER7 */
933 return -1;
929 case 64ul << 20: /* 64 MB */ 934 case 64ul << 20: /* 64 MB */
930 return 3; 935 return 3;
931 case 128ul << 20: /* 128 MB */ 936 case 128ul << 20: /* 128 MB */
@@ -1059,6 +1064,10 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1059 mem->userspace_addr == vma->vm_start) 1064 mem->userspace_addr == vma->vm_start)
1060 ri = vma->vm_file->private_data; 1065 ri = vma->vm_file->private_data;
1061 up_read(&current->mm->mmap_sem); 1066 up_read(&current->mm->mmap_sem);
1067 if (!ri && cpu_has_feature(CPU_FTR_ARCH_201)) {
1068 pr_err("CPU requires an RMO\n");
1069 return -EINVAL;
1070 }
1062 } 1071 }
1063 1072
1064 if (ri) { 1073 if (ri) {
@@ -1077,10 +1086,25 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1077 atomic_inc(&ri->use_count); 1086 atomic_inc(&ri->use_count);
1078 kvm->arch.rma = ri; 1087 kvm->arch.rma = ri;
1079 kvm->arch.n_rma_pages = rma_size >> porder; 1088 kvm->arch.n_rma_pages = rma_size >> porder;
1080 lpcr = kvm->arch.lpcr & ~(LPCR_VPM0 | LPCR_VRMA_L); 1089
1081 lpcr |= rmls << LPCR_RMLS_SH; 1090 /* Update LPCR and RMOR */
1091 lpcr = kvm->arch.lpcr;
1092 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1093 /* PPC970; insert RMLS value (split field) in HID4 */
1094 lpcr &= ~((1ul << HID4_RMLS0_SH) |
1095 (3ul << HID4_RMLS2_SH));
1096 lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
1097 ((rmls & 3) << HID4_RMLS2_SH);
1098 /* RMOR is also in HID4 */
1099 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
1100 << HID4_RMOR_SH;
1101 } else {
1102 /* POWER7 */
1103 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
1104 lpcr |= rmls << LPCR_RMLS_SH;
1105 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
1106 }
1082 kvm->arch.lpcr = lpcr; 1107 kvm->arch.lpcr = lpcr;
1083 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
1084 pr_info("Using RMO at %lx size %lx (LPCR = %lx)\n", 1108 pr_info("Using RMO at %lx size %lx (LPCR = %lx)\n",
1085 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); 1109 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
1086 } 1110 }
@@ -1151,11 +1175,25 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1151 kvm->arch.rma = NULL; 1175 kvm->arch.rma = NULL;
1152 kvm->arch.n_rma_pages = 0; 1176 kvm->arch.n_rma_pages = 0;
1153 1177
1154 lpcr = kvm->arch.host_lpcr & (LPCR_PECE | LPCR_LPES); 1178 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
1155 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
1156 LPCR_VPM0 | LPCR_VRMA_L;
1157 kvm->arch.lpcr = lpcr;
1158 1179
1180 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1181 /* PPC970; HID4 is effectively the LPCR */
1182 unsigned long lpid = kvm->arch.lpid;
1183 kvm->arch.host_lpid = 0;
1184 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
1185 lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
1186 lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
1187 ((lpid & 0xf) << HID4_LPID5_SH);
1188 } else {
1189 /* POWER7; init LPCR for virtual RMA mode */
1190 kvm->arch.host_lpid = mfspr(SPRN_LPID);
1191 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
1192 lpcr &= LPCR_PECE | LPCR_LPES;
1193 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
1194 LPCR_VPM0 | LPCR_VRMA_L;
1195 }
1196 kvm->arch.lpcr = lpcr;
1159 1197
1160 return 0; 1198 return 0;
1161 1199
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 7315ec6e8177..d43120355eec 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -55,12 +55,14 @@ static LIST_HEAD(free_rmas);
55static DEFINE_SPINLOCK(rma_lock); 55static DEFINE_SPINLOCK(rma_lock);
56 56
57/* Work out RMLS (real mode limit selector) field value for a given RMA size. 57/* Work out RMLS (real mode limit selector) field value for a given RMA size.
58 Assumes POWER7. */ 58 Assumes POWER7 or PPC970. */
59static inline int lpcr_rmls(unsigned long rma_size) 59static inline int lpcr_rmls(unsigned long rma_size)
60{ 60{
61 switch (rma_size) { 61 switch (rma_size) {
62 case 32ul << 20: /* 32 MB */ 62 case 32ul << 20: /* 32 MB */
63 return 8; 63 if (cpu_has_feature(CPU_FTR_ARCH_206))
64 return 8; /* only supported on POWER7 */
65 return -1;
64 case 64ul << 20: /* 64 MB */ 66 case 64ul << 20: /* 64 MB */
65 return 3; 67 return 3;
66 case 128ul << 20: /* 128 MB */ 68 case 128ul << 20: /* 128 MB */
@@ -90,8 +92,9 @@ void kvm_rma_init(void)
90 void *rma; 92 void *rma;
91 struct page *pg; 93 struct page *pg;
92 94
93 /* Only do this in HV mode */ 95 /* Only do this on PPC970 in HV mode */
94 if (!cpu_has_feature(CPU_FTR_HVMODE)) 96 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
97 !cpu_has_feature(CPU_FTR_ARCH_201))
95 return; 98 return;
96 99
97 if (!kvm_rma_size || !kvm_rma_count) 100 if (!kvm_rma_size || !kvm_rma_count)
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 532afaf19841..3f7b674dd4bf 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -50,8 +50,10 @@ _GLOBAL(__kvmppc_vcore_entry)
50 SAVE_NVGPRS(r1) 50 SAVE_NVGPRS(r1)
51 51
52 /* Save host DSCR */ 52 /* Save host DSCR */
53BEGIN_FTR_SECTION
53 mfspr r3, SPRN_DSCR 54 mfspr r3, SPRN_DSCR
54 std r3, HSTATE_DSCR(r13) 55 std r3, HSTATE_DSCR(r13)
56END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
55 57
56 /* Save host DABR */ 58 /* Save host DABR */
57 mfspr r3, SPRN_DABR 59 mfspr r3, SPRN_DABR
@@ -86,12 +88,20 @@ _GLOBAL(__kvmppc_vcore_entry)
86 mfspr r7, SPRN_PMC4 88 mfspr r7, SPRN_PMC4
87 mfspr r8, SPRN_PMC5 89 mfspr r8, SPRN_PMC5
88 mfspr r9, SPRN_PMC6 90 mfspr r9, SPRN_PMC6
91BEGIN_FTR_SECTION
92 mfspr r10, SPRN_PMC7
93 mfspr r11, SPRN_PMC8
94END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
89 stw r3, HSTATE_PMC(r13) 95 stw r3, HSTATE_PMC(r13)
90 stw r5, HSTATE_PMC + 4(r13) 96 stw r5, HSTATE_PMC + 4(r13)
91 stw r6, HSTATE_PMC + 8(r13) 97 stw r6, HSTATE_PMC + 8(r13)
92 stw r7, HSTATE_PMC + 12(r13) 98 stw r7, HSTATE_PMC + 12(r13)
93 stw r8, HSTATE_PMC + 16(r13) 99 stw r8, HSTATE_PMC + 16(r13)
94 stw r9, HSTATE_PMC + 20(r13) 100 stw r9, HSTATE_PMC + 20(r13)
101BEGIN_FTR_SECTION
102 stw r10, HSTATE_PMC + 24(r13)
103 stw r11, HSTATE_PMC + 28(r13)
104END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
9531: 10531:
96 106
97 /* 107 /*
@@ -105,6 +115,26 @@ _GLOBAL(__kvmppc_vcore_entry)
105 add r8,r8,r7 115 add r8,r8,r7
106 std r8,HSTATE_DECEXP(r13) 116 std r8,HSTATE_DECEXP(r13)
107 117
118 /*
119 * On PPC970, if the guest vcpu has an external interrupt pending,
120 * send ourselves an IPI so as to interrupt the guest once it
121 * enables interrupts. (It must have interrupts disabled,
122 * otherwise we would already have delivered the interrupt.)
123 */
124BEGIN_FTR_SECTION
125 ld r0, VCPU_PENDING_EXC(r4)
126 li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
127 oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
128 and. r0, r0, r7
129 beq 32f
130 mr r31, r4
131 lhz r3, PACAPACAINDEX(r13)
132 bl smp_send_reschedule
133 nop
134 mr r4, r31
13532:
136END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
137
108 /* Jump to partition switch code */ 138 /* Jump to partition switch code */
109 bl .kvmppc_hv_entry_trampoline 139 bl .kvmppc_hv_entry_trampoline
110 nop 140 nop
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index edb0aae901a3..fcfe6b055558 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -56,7 +56,8 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
56 /* only handle 4k, 64k and 16M pages for now */ 56 /* only handle 4k, 64k and 16M pages for now */
57 porder = 12; 57 porder = 12;
58 if (pteh & HPTE_V_LARGE) { 58 if (pteh & HPTE_V_LARGE) {
59 if ((ptel & 0xf000) == 0x1000) { 59 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
60 (ptel & 0xf000) == 0x1000) {
60 /* 64k page */ 61 /* 64k page */
61 porder = 16; 62 porder = 16;
62 } else if ((ptel & 0xff000) == 0) { 63 } else if ((ptel & 0xff000) == 0) {
@@ -126,7 +127,8 @@ static unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
126 va_low &= 0x7ff; 127 va_low &= 0x7ff;
127 if (v & HPTE_V_LARGE) { 128 if (v & HPTE_V_LARGE) {
128 rb |= 1; /* L field */ 129 rb |= 1; /* L field */
129 if (r & 0xff000) { 130 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
131 (r & 0xff000)) {
130 /* non-16MB large page, must be 64k */ 132 /* non-16MB large page, must be 64k */
131 /* (masks depend on page size) */ 133 /* (masks depend on page size) */
132 rb |= 0x1000; /* page encoding in LP field */ 134 rb |= 0x1000; /* page encoding in LP field */
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 9ee223c35285..6dd33581a228 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -148,12 +148,20 @@ kvmppc_hv_entry:
148 lwz r7, VCPU_PMC + 12(r4) 148 lwz r7, VCPU_PMC + 12(r4)
149 lwz r8, VCPU_PMC + 16(r4) 149 lwz r8, VCPU_PMC + 16(r4)
150 lwz r9, VCPU_PMC + 20(r4) 150 lwz r9, VCPU_PMC + 20(r4)
151BEGIN_FTR_SECTION
152 lwz r10, VCPU_PMC + 24(r4)
153 lwz r11, VCPU_PMC + 28(r4)
154END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
151 mtspr SPRN_PMC1, r3 155 mtspr SPRN_PMC1, r3
152 mtspr SPRN_PMC2, r5 156 mtspr SPRN_PMC2, r5
153 mtspr SPRN_PMC3, r6 157 mtspr SPRN_PMC3, r6
154 mtspr SPRN_PMC4, r7 158 mtspr SPRN_PMC4, r7
155 mtspr SPRN_PMC5, r8 159 mtspr SPRN_PMC5, r8
156 mtspr SPRN_PMC6, r9 160 mtspr SPRN_PMC6, r9
161BEGIN_FTR_SECTION
162 mtspr SPRN_PMC7, r10
163 mtspr SPRN_PMC8, r11
164END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
157 ld r3, VCPU_MMCR(r4) 165 ld r3, VCPU_MMCR(r4)
158 ld r5, VCPU_MMCR + 8(r4) 166 ld r5, VCPU_MMCR + 8(r4)
159 ld r6, VCPU_MMCR + 16(r4) 167 ld r6, VCPU_MMCR + 16(r4)
@@ -165,9 +173,11 @@ kvmppc_hv_entry:
165 /* Load up FP, VMX and VSX registers */ 173 /* Load up FP, VMX and VSX registers */
166 bl kvmppc_load_fp 174 bl kvmppc_load_fp
167 175
176BEGIN_FTR_SECTION
168 /* Switch DSCR to guest value */ 177 /* Switch DSCR to guest value */
169 ld r5, VCPU_DSCR(r4) 178 ld r5, VCPU_DSCR(r4)
170 mtspr SPRN_DSCR, r5 179 mtspr SPRN_DSCR, r5
180END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
171 181
172 /* 182 /*
173 * Set the decrementer to the guest decrementer. 183 * Set the decrementer to the guest decrementer.
@@ -210,6 +220,7 @@ kvmppc_hv_entry:
210 mtspr SPRN_DABRX,r5 220 mtspr SPRN_DABRX,r5
211 mtspr SPRN_DABR,r6 221 mtspr SPRN_DABR,r6
212 222
223BEGIN_FTR_SECTION
213 /* Restore AMR and UAMOR, set AMOR to all 1s */ 224 /* Restore AMR and UAMOR, set AMOR to all 1s */
214 ld r5,VCPU_AMR(r4) 225 ld r5,VCPU_AMR(r4)
215 ld r6,VCPU_UAMOR(r4) 226 ld r6,VCPU_UAMOR(r4)
@@ -217,6 +228,7 @@ kvmppc_hv_entry:
217 mtspr SPRN_AMR,r5 228 mtspr SPRN_AMR,r5
218 mtspr SPRN_UAMOR,r6 229 mtspr SPRN_UAMOR,r6
219 mtspr SPRN_AMOR,r7 230 mtspr SPRN_AMOR,r7
231END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
220 232
221 /* Clear out SLB */ 233 /* Clear out SLB */
222 li r6,0 234 li r6,0
@@ -224,6 +236,14 @@ kvmppc_hv_entry:
224 slbia 236 slbia
225 ptesync 237 ptesync
226 238
239BEGIN_FTR_SECTION
240 b 30f
241END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
242 /*
243 * POWER7 host -> guest partition switch code.
244 * We don't have to lock against concurrent tlbies,
245 * but we do have to coordinate across hardware threads.
246 */
227 /* Increment entry count iff exit count is zero. */ 247 /* Increment entry count iff exit count is zero. */
228 ld r5,HSTATE_KVM_VCORE(r13) 248 ld r5,HSTATE_KVM_VCORE(r13)
229 addi r9,r5,VCORE_ENTRY_EXIT 249 addi r9,r5,VCORE_ENTRY_EXIT
@@ -315,9 +335,94 @@ kvmppc_hv_entry:
315 ld r8,VCPU_SPURR(r4) 335 ld r8,VCPU_SPURR(r4)
316 mtspr SPRN_PURR,r7 336 mtspr SPRN_PURR,r7
317 mtspr SPRN_SPURR,r8 337 mtspr SPRN_SPURR,r8
338 b 31f
339
340 /*
341 * PPC970 host -> guest partition switch code.
342 * We have to lock against concurrent tlbies,
343 * using native_tlbie_lock to lock against host tlbies
344 * and kvm->arch.tlbie_lock to lock against guest tlbies.
345 * We also have to invalidate the TLB since its
346 * entries aren't tagged with the LPID.
347 */
34830: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
349
350 /* first take native_tlbie_lock */
351 .section ".toc","aw"
352toc_tlbie_lock:
353 .tc native_tlbie_lock[TC],native_tlbie_lock
354 .previous
355 ld r3,toc_tlbie_lock@toc(2)
356 lwz r8,PACA_LOCK_TOKEN(r13)
35724: lwarx r0,0,r3
358 cmpwi r0,0
359 bne 24b
360 stwcx. r8,0,r3
361 bne 24b
362 isync
363
364 ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */
365 li r0,0x18f
366 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
367 or r0,r7,r0
368 ptesync
369 sync
370 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
371 isync
372 li r0,0
373 stw r0,0(r3) /* drop native_tlbie_lock */
374
375 /* invalidate the whole TLB */
376 li r0,256
377 mtctr r0
378 li r6,0
37925: tlbiel r6
380 addi r6,r6,0x1000
381 bdnz 25b
382 ptesync
383
384 /* Take the guest's tlbie_lock */
385 addi r3,r9,KVM_TLBIE_LOCK
38624: lwarx r0,0,r3
387 cmpwi r0,0
388 bne 24b
389 stwcx. r8,0,r3
390 bne 24b
391 isync
392 ld r6,KVM_SDR1(r9)
393 mtspr SPRN_SDR1,r6 /* switch to partition page table */
394
395 /* Set up HID4 with the guest's LPID etc. */
396 sync
397 mtspr SPRN_HID4,r7
398 isync
399
400 /* drop the guest's tlbie_lock */
401 li r0,0
402 stw r0,0(r3)
403
404 /* Check if HDEC expires soon */
405 mfspr r3,SPRN_HDEC
406 cmpwi r3,10
407 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
408 mr r9,r4
409 blt hdec_soon
410
411 /* Enable HDEC interrupts */
412 mfspr r0,SPRN_HID0
413 li r3,1
414 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
415 sync
416 mtspr SPRN_HID0,r0
417 mfspr r0,SPRN_HID0
418 mfspr r0,SPRN_HID0
419 mfspr r0,SPRN_HID0
420 mfspr r0,SPRN_HID0
421 mfspr r0,SPRN_HID0
422 mfspr r0,SPRN_HID0
318 423
319 /* Load up guest SLB entries */ 424 /* Load up guest SLB entries */
320 lwz r5,VCPU_SLB_MAX(r4) 42531: lwz r5,VCPU_SLB_MAX(r4)
321 cmpwi r5,0 426 cmpwi r5,0
322 beq 9f 427 beq 9f
323 mtctr r5 428 mtctr r5
@@ -472,6 +577,7 @@ kvmppc_interrupt:
472hcall_real_cont: 577hcall_real_cont:
473 578
474 /* Check for mediated interrupts (could be done earlier really ...) */ 579 /* Check for mediated interrupts (could be done earlier really ...) */
580BEGIN_FTR_SECTION
475 cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL 581 cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL
476 bne+ 1f 582 bne+ 1f
477 ld r5,VCPU_KVM(r9) 583 ld r5,VCPU_KVM(r9)
@@ -481,6 +587,7 @@ hcall_real_cont:
481 andi. r0,r5,LPCR_MER 587 andi. r0,r5,LPCR_MER
482 bne bounce_ext_interrupt 588 bne bounce_ext_interrupt
4831: 5891:
590END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
484 591
485 /* Save DEC */ 592 /* Save DEC */
486 mfspr r5,SPRN_DEC 593 mfspr r5,SPRN_DEC
@@ -492,9 +599,11 @@ hcall_real_cont:
492 /* Save HEIR (HV emulation assist reg) in last_inst 599 /* Save HEIR (HV emulation assist reg) in last_inst
493 if this is an HEI (HV emulation interrupt, e40) */ 600 if this is an HEI (HV emulation interrupt, e40) */
494 li r3,-1 601 li r3,-1
602BEGIN_FTR_SECTION
495 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 603 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
496 bne 11f 604 bne 11f
497 mfspr r3,SPRN_HEIR 605 mfspr r3,SPRN_HEIR
606END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
49811: stw r3,VCPU_LAST_INST(r9) 60711: stw r3,VCPU_LAST_INST(r9)
499 608
500 /* Save more register state */ 609 /* Save more register state */
@@ -508,8 +617,10 @@ hcall_real_cont:
508 stw r7, VCPU_DSISR(r9) 617 stw r7, VCPU_DSISR(r9)
509 std r8, VCPU_CTR(r9) 618 std r8, VCPU_CTR(r9)
510 /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */ 619 /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */
620BEGIN_FTR_SECTION
511 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 621 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
512 beq 6f 622 beq 6f
623END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
5137: std r6, VCPU_FAULT_DAR(r9) 6247: std r6, VCPU_FAULT_DAR(r9)
514 stw r7, VCPU_FAULT_DSISR(r9) 625 stw r7, VCPU_FAULT_DSISR(r9)
515 626
@@ -543,6 +654,7 @@ hcall_real_cont:
543 /* 654 /*
544 * Save the guest PURR/SPURR 655 * Save the guest PURR/SPURR
545 */ 656 */
657BEGIN_FTR_SECTION
546 mfspr r5,SPRN_PURR 658 mfspr r5,SPRN_PURR
547 mfspr r6,SPRN_SPURR 659 mfspr r6,SPRN_SPURR
548 ld r7,VCPU_PURR(r9) 660 ld r7,VCPU_PURR(r9)
@@ -562,6 +674,7 @@ hcall_real_cont:
562 add r4,r4,r6 674 add r4,r4,r6
563 mtspr SPRN_PURR,r3 675 mtspr SPRN_PURR,r3
564 mtspr SPRN_SPURR,r4 676 mtspr SPRN_SPURR,r4
677END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
565 678
566 /* Clear out SLB */ 679 /* Clear out SLB */
567 li r5,0 680 li r5,0
@@ -570,6 +683,14 @@ hcall_real_cont:
570 ptesync 683 ptesync
571 684
572hdec_soon: 685hdec_soon:
686BEGIN_FTR_SECTION
687 b 32f
688END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
689 /*
690 * POWER7 guest -> host partition switch code.
691 * We don't have to lock against tlbies but we do
692 * have to coordinate the hardware threads.
693 */
573 /* Increment the threads-exiting-guest count in the 0xff00 694 /* Increment the threads-exiting-guest count in the 0xff00
574 bits of vcore->entry_exit_count */ 695 bits of vcore->entry_exit_count */
575 lwsync 696 lwsync
@@ -640,9 +761,82 @@ hdec_soon:
64016: ld r8,KVM_HOST_LPCR(r4) 76116: ld r8,KVM_HOST_LPCR(r4)
641 mtspr SPRN_LPCR,r8 762 mtspr SPRN_LPCR,r8
642 isync 763 isync
764 b 33f
765
766 /*
767 * PPC970 guest -> host partition switch code.
768 * We have to lock against concurrent tlbies, and
769 * we have to flush the whole TLB.
770 */
77132: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
772
773 /* Take the guest's tlbie_lock */
774 lwz r8,PACA_LOCK_TOKEN(r13)
775 addi r3,r4,KVM_TLBIE_LOCK
77624: lwarx r0,0,r3
777 cmpwi r0,0
778 bne 24b
779 stwcx. r8,0,r3
780 bne 24b
781 isync
782
783 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
784 li r0,0x18f
785 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
786 or r0,r7,r0
787 ptesync
788 sync
789 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
790 isync
791 li r0,0
792 stw r0,0(r3) /* drop guest tlbie_lock */
793
794 /* invalidate the whole TLB */
795 li r0,256
796 mtctr r0
797 li r6,0
79825: tlbiel r6
799 addi r6,r6,0x1000
800 bdnz 25b
801 ptesync
802
803 /* take native_tlbie_lock */
804 ld r3,toc_tlbie_lock@toc(2)
80524: lwarx r0,0,r3
806 cmpwi r0,0
807 bne 24b
808 stwcx. r8,0,r3
809 bne 24b
810 isync
811
812 ld r6,KVM_HOST_SDR1(r4)
813 mtspr SPRN_SDR1,r6 /* switch to host page table */
814
815 /* Set up host HID4 value */
816 sync
817 mtspr SPRN_HID4,r7
818 isync
819 li r0,0
820 stw r0,0(r3) /* drop native_tlbie_lock */
821
822 lis r8,0x7fff /* MAX_INT@h */
823 mtspr SPRN_HDEC,r8
824
825 /* Disable HDEC interrupts */
826 mfspr r0,SPRN_HID0
827 li r3,0
828 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
829 sync
830 mtspr SPRN_HID0,r0
831 mfspr r0,SPRN_HID0
832 mfspr r0,SPRN_HID0
833 mfspr r0,SPRN_HID0
834 mfspr r0,SPRN_HID0
835 mfspr r0,SPRN_HID0
836 mfspr r0,SPRN_HID0
643 837
644 /* load host SLB entries */ 838 /* load host SLB entries */
645 ld r8,PACA_SLBSHADOWPTR(r13) 83933: ld r8,PACA_SLBSHADOWPTR(r13)
646 840
647 .rept SLB_NUM_BOLTED 841 .rept SLB_NUM_BOLTED
648 ld r5,SLBSHADOW_SAVEAREA(r8) 842 ld r5,SLBSHADOW_SAVEAREA(r8)
@@ -654,12 +848,14 @@ hdec_soon:
654 .endr 848 .endr
655 849
656 /* Save and reset AMR and UAMOR before turning on the MMU */ 850 /* Save and reset AMR and UAMOR before turning on the MMU */
851BEGIN_FTR_SECTION
657 mfspr r5,SPRN_AMR 852 mfspr r5,SPRN_AMR
658 mfspr r6,SPRN_UAMOR 853 mfspr r6,SPRN_UAMOR
659 std r5,VCPU_AMR(r9) 854 std r5,VCPU_AMR(r9)
660 std r6,VCPU_UAMOR(r9) 855 std r6,VCPU_UAMOR(r9)
661 li r6,0 856 li r6,0
662 mtspr SPRN_AMR,r6 857 mtspr SPRN_AMR,r6
858END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
663 859
664 /* Restore host DABR and DABRX */ 860 /* Restore host DABR and DABRX */
665 ld r5,HSTATE_DABR(r13) 861 ld r5,HSTATE_DABR(r13)
@@ -668,10 +864,12 @@ hdec_soon:
668 mtspr SPRN_DABRX,r6 864 mtspr SPRN_DABRX,r6
669 865
670 /* Switch DSCR back to host value */ 866 /* Switch DSCR back to host value */
867BEGIN_FTR_SECTION
671 mfspr r8, SPRN_DSCR 868 mfspr r8, SPRN_DSCR
672 ld r7, HSTATE_DSCR(r13) 869 ld r7, HSTATE_DSCR(r13)
673 std r8, VCPU_DSCR(r7) 870 std r8, VCPU_DSCR(r7)
674 mtspr SPRN_DSCR, r7 871 mtspr SPRN_DSCR, r7
872END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
675 873
676 /* Save non-volatile GPRs */ 874 /* Save non-volatile GPRs */
677 std r14, VCPU_GPR(r14)(r9) 875 std r14, VCPU_GPR(r14)(r9)
@@ -735,21 +933,31 @@ hdec_soon:
735 mfspr r6, SPRN_PMC4 933 mfspr r6, SPRN_PMC4
736 mfspr r7, SPRN_PMC5 934 mfspr r7, SPRN_PMC5
737 mfspr r8, SPRN_PMC6 935 mfspr r8, SPRN_PMC6
936BEGIN_FTR_SECTION
937 mfspr r10, SPRN_PMC7
938 mfspr r11, SPRN_PMC8
939END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
738 stw r3, VCPU_PMC(r9) 940 stw r3, VCPU_PMC(r9)
739 stw r4, VCPU_PMC + 4(r9) 941 stw r4, VCPU_PMC + 4(r9)
740 stw r5, VCPU_PMC + 8(r9) 942 stw r5, VCPU_PMC + 8(r9)
741 stw r6, VCPU_PMC + 12(r9) 943 stw r6, VCPU_PMC + 12(r9)
742 stw r7, VCPU_PMC + 16(r9) 944 stw r7, VCPU_PMC + 16(r9)
743 stw r8, VCPU_PMC + 20(r9) 945 stw r8, VCPU_PMC + 20(r9)
946BEGIN_FTR_SECTION
947 stw r10, VCPU_PMC + 24(r9)
948 stw r11, VCPU_PMC + 28(r9)
949END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
74422: 95022:
745 /* save FP state */ 951 /* save FP state */
746 mr r3, r9 952 mr r3, r9
747 bl .kvmppc_save_fp 953 bl .kvmppc_save_fp
748 954
749 /* Secondary threads go off to take a nap */ 955 /* Secondary threads go off to take a nap on POWER7 */
956BEGIN_FTR_SECTION
750 lwz r0,VCPU_PTID(r3) 957 lwz r0,VCPU_PTID(r3)
751 cmpwi r0,0 958 cmpwi r0,0
752 bne secondary_nap 959 bne secondary_nap
960END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
753 961
754 /* 962 /*
755 * Reload DEC. HDEC interrupts were disabled when 963 * Reload DEC. HDEC interrupts were disabled when
@@ -771,12 +979,20 @@ hdec_soon:
771 lwz r6, HSTATE_PMC + 12(r13) 979 lwz r6, HSTATE_PMC + 12(r13)
772 lwz r8, HSTATE_PMC + 16(r13) 980 lwz r8, HSTATE_PMC + 16(r13)
773 lwz r9, HSTATE_PMC + 20(r13) 981 lwz r9, HSTATE_PMC + 20(r13)
982BEGIN_FTR_SECTION
983 lwz r10, HSTATE_PMC + 24(r13)
984 lwz r11, HSTATE_PMC + 28(r13)
985END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
774 mtspr SPRN_PMC1, r3 986 mtspr SPRN_PMC1, r3
775 mtspr SPRN_PMC2, r4 987 mtspr SPRN_PMC2, r4
776 mtspr SPRN_PMC3, r5 988 mtspr SPRN_PMC3, r5
777 mtspr SPRN_PMC4, r6 989 mtspr SPRN_PMC4, r6
778 mtspr SPRN_PMC5, r8 990 mtspr SPRN_PMC5, r8
779 mtspr SPRN_PMC6, r9 991 mtspr SPRN_PMC6, r9
992BEGIN_FTR_SECTION
993 mtspr SPRN_PMC7, r10
994 mtspr SPRN_PMC8, r11
995END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
780 ld r3, HSTATE_MMCR(r13) 996 ld r3, HSTATE_MMCR(r13)
781 ld r4, HSTATE_MMCR + 8(r13) 997 ld r4, HSTATE_MMCR + 8(r13)
782 ld r5, HSTATE_MMCR + 16(r13) 998 ld r5, HSTATE_MMCR + 16(r13)
@@ -802,7 +1018,7 @@ hdec_soon:
802 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1018 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
803 1019
804 /* RFI into the highmem handler, or branch to interrupt handler */ 1020 /* RFI into the highmem handler, or branch to interrupt handler */
805 mfmsr r6 102112: mfmsr r6
806 mtctr r12 1022 mtctr r12
807 li r0, MSR_RI 1023 li r0, MSR_RI
808 andc r6, r6, r0 1024 andc r6, r6, r0
@@ -812,7 +1028,11 @@ hdec_soon:
812 beqctr 1028 beqctr
813 RFI 1029 RFI
814 1030
81511: mtspr SPRN_HSRR0, r8 103111:
1032BEGIN_FTR_SECTION
1033 b 12b
1034END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1035 mtspr SPRN_HSRR0, r8
816 mtspr SPRN_HSRR1, r7 1036 mtspr SPRN_HSRR1, r7
817 ba 0x500 1037 ba 0x500
818 1038
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 72c506505fa4..a107c9be0fb1 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -213,6 +213,9 @@ int kvm_dev_ioctl_check_extension(long ext)
213 break; 213 break;
214 case KVM_CAP_PPC_RMA: 214 case KVM_CAP_PPC_RMA:
215 r = 1; 215 r = 1;
216 /* PPC970 requires an RMA */
217 if (cpu_has_feature(CPU_FTR_ARCH_201))
218 r = 2;
216 break; 219 break;
217#endif 220#endif
218 default: 221 default:
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index b44f5f803052..90039bc64119 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -37,7 +37,7 @@
37 37
38#define HPTE_LOCK_BIT 3 38#define HPTE_LOCK_BIT 3
39 39
40static DEFINE_RAW_SPINLOCK(native_tlbie_lock); 40DEFINE_RAW_SPINLOCK(native_tlbie_lock);
41 41
42static inline void __tlbie(unsigned long va, int psize, int ssize) 42static inline void __tlbie(unsigned long va, int psize, int ssize)
43{ 43{