aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h1
-rw-r--r--arch/powerpc/include/asm/kvm_host.h14
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c200
-rw-r--r--arch/powerpc/kvm/book3s_hv.c292
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c104
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S39
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c110
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S245
-rw-r--r--arch/powerpc/kvm/powerpc.c10
13 files changed, 70 insertions, 955 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 6acf0c2a0f99..942c7b1678e3 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -170,8 +170,6 @@ extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
170 unsigned long *nb_ret); 170 unsigned long *nb_ret);
171extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr, 171extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
172 unsigned long gpa, bool dirty); 172 unsigned long gpa, bool dirty);
173extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
174 long pte_index, unsigned long pteh, unsigned long ptel);
175extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, 173extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
176 long pte_index, unsigned long pteh, unsigned long ptel, 174 long pte_index, unsigned long pteh, unsigned long ptel,
177 pgd_t *pgdir, bool realmode, unsigned long *idx_ret); 175 pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index a37f1a4a5b0b..2d81e202bdcc 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -37,7 +37,6 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
37 37
38#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 38#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
39#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ 39#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
40extern unsigned long kvm_rma_pages;
41#endif 40#endif
42 41
43#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */ 42#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 7cf94a5e8411..5686a429d4b7 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -180,11 +180,6 @@ struct kvmppc_spapr_tce_table {
180 struct page *pages[0]; 180 struct page *pages[0];
181}; 181};
182 182
183struct kvm_rma_info {
184 atomic_t use_count;
185 unsigned long base_pfn;
186};
187
188/* XICS components, defined in book3s_xics.c */ 183/* XICS components, defined in book3s_xics.c */
189struct kvmppc_xics; 184struct kvmppc_xics;
190struct kvmppc_icp; 185struct kvmppc_icp;
@@ -214,16 +209,9 @@ struct revmap_entry {
214#define KVMPPC_RMAP_PRESENT 0x100000000ul 209#define KVMPPC_RMAP_PRESENT 0x100000000ul
215#define KVMPPC_RMAP_INDEX 0xfffffffful 210#define KVMPPC_RMAP_INDEX 0xfffffffful
216 211
217/* Low-order bits in memslot->arch.slot_phys[] */
218#define KVMPPC_PAGE_ORDER_MASK 0x1f
219#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
220#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
221#define KVMPPC_GOT_PAGE 0x80
222
223struct kvm_arch_memory_slot { 212struct kvm_arch_memory_slot {
224#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 213#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
225 unsigned long *rmap; 214 unsigned long *rmap;
226 unsigned long *slot_phys;
227#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 215#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
228}; 216};
229 217
@@ -242,14 +230,12 @@ struct kvm_arch {
242 struct kvm_rma_info *rma; 230 struct kvm_rma_info *rma;
243 unsigned long vrma_slb_v; 231 unsigned long vrma_slb_v;
244 int rma_setup_done; 232 int rma_setup_done;
245 int using_mmu_notifiers;
246 u32 hpt_order; 233 u32 hpt_order;
247 atomic_t vcpus_running; 234 atomic_t vcpus_running;
248 u32 online_vcores; 235 u32 online_vcores;
249 unsigned long hpt_npte; 236 unsigned long hpt_npte;
250 unsigned long hpt_mask; 237 unsigned long hpt_mask;
251 atomic_t hpte_mod_interest; 238 atomic_t hpte_mod_interest;
252 spinlock_t slot_phys_lock;
253 cpumask_t need_tlb_flush; 239 cpumask_t need_tlb_flush;
254 int hpt_cma_alloc; 240 int hpt_cma_alloc;
255#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 241#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index a6dcdb6d13c1..46bf652c9169 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -170,8 +170,6 @@ extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
170 unsigned long ioba, unsigned long tce); 170 unsigned long ioba, unsigned long tce);
171extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 171extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
172 unsigned long ioba); 172 unsigned long ioba);
173extern struct kvm_rma_info *kvm_alloc_rma(void);
174extern void kvm_release_rma(struct kvm_rma_info *ri);
175extern struct page *kvm_alloc_hpt(unsigned long nr_pages); 173extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
176extern void kvm_release_hpt(struct page *page, unsigned long nr_pages); 174extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
177extern int kvmppc_core_init_vm(struct kvm *kvm); 175extern int kvmppc_core_init_vm(struct kvm *kvm);
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9d7dede2847c..815212e9d7ba 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -489,7 +489,6 @@ int main(void)
489 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); 489 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
490 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); 490 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
491 DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); 491 DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
492 DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
493 DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits)); 492 DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
494 DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls)); 493 DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
495 DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr)); 494 DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 311e4a38dd8b..534acb3c6c3d 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -39,9 +39,6 @@
39 39
40#include "trace_hv.h" 40#include "trace_hv.h"
41 41
42/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
43#define MAX_LPID_970 63
44
45/* Power architecture requires HPT is at least 256kB */ 42/* Power architecture requires HPT is at least 256kB */
46#define PPC_MIN_HPT_ORDER 18 43#define PPC_MIN_HPT_ORDER 18
47 44
@@ -231,14 +228,9 @@ int kvmppc_mmu_hv_init(void)
231 if (!cpu_has_feature(CPU_FTR_HVMODE)) 228 if (!cpu_has_feature(CPU_FTR_HVMODE))
232 return -EINVAL; 229 return -EINVAL;
233 230
234 /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */ 231 /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
235 if (cpu_has_feature(CPU_FTR_ARCH_206)) { 232 host_lpid = mfspr(SPRN_LPID);
236 host_lpid = mfspr(SPRN_LPID); /* POWER7 */ 233 rsvd_lpid = LPID_RSVD;
237 rsvd_lpid = LPID_RSVD;
238 } else {
239 host_lpid = 0; /* PPC970 */
240 rsvd_lpid = MAX_LPID_970;
241 }
242 234
243 kvmppc_init_lpid(rsvd_lpid + 1); 235 kvmppc_init_lpid(rsvd_lpid + 1);
244 236
@@ -261,130 +253,12 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
261 kvmppc_set_msr(vcpu, msr); 253 kvmppc_set_msr(vcpu, msr);
262} 254}
263 255
264/*
265 * This is called to get a reference to a guest page if there isn't
266 * one already in the memslot->arch.slot_phys[] array.
267 */
268static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
269 struct kvm_memory_slot *memslot,
270 unsigned long psize)
271{
272 unsigned long start;
273 long np, err;
274 struct page *page, *hpage, *pages[1];
275 unsigned long s, pgsize;
276 unsigned long *physp;
277 unsigned int is_io, got, pgorder;
278 struct vm_area_struct *vma;
279 unsigned long pfn, i, npages;
280
281 physp = memslot->arch.slot_phys;
282 if (!physp)
283 return -EINVAL;
284 if (physp[gfn - memslot->base_gfn])
285 return 0;
286
287 is_io = 0;
288 got = 0;
289 page = NULL;
290 pgsize = psize;
291 err = -EINVAL;
292 start = gfn_to_hva_memslot(memslot, gfn);
293
294 /* Instantiate and get the page we want access to */
295 np = get_user_pages_fast(start, 1, 1, pages);
296 if (np != 1) {
297 /* Look up the vma for the page */
298 down_read(&current->mm->mmap_sem);
299 vma = find_vma(current->mm, start);
300 if (!vma || vma->vm_start > start ||
301 start + psize > vma->vm_end ||
302 !(vma->vm_flags & VM_PFNMAP))
303 goto up_err;
304 is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
305 pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
306 /* check alignment of pfn vs. requested page size */
307 if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
308 goto up_err;
309 up_read(&current->mm->mmap_sem);
310
311 } else {
312 page = pages[0];
313 got = KVMPPC_GOT_PAGE;
314
315 /* See if this is a large page */
316 s = PAGE_SIZE;
317 if (PageHuge(page)) {
318 hpage = compound_head(page);
319 s <<= compound_order(hpage);
320 /* Get the whole large page if slot alignment is ok */
321 if (s > psize && slot_is_aligned(memslot, s) &&
322 !(memslot->userspace_addr & (s - 1))) {
323 start &= ~(s - 1);
324 pgsize = s;
325 get_page(hpage);
326 put_page(page);
327 page = hpage;
328 }
329 }
330 if (s < psize)
331 goto out;
332 pfn = page_to_pfn(page);
333 }
334
335 npages = pgsize >> PAGE_SHIFT;
336 pgorder = __ilog2(npages);
337 physp += (gfn - memslot->base_gfn) & ~(npages - 1);
338 spin_lock(&kvm->arch.slot_phys_lock);
339 for (i = 0; i < npages; ++i) {
340 if (!physp[i]) {
341 physp[i] = ((pfn + i) << PAGE_SHIFT) +
342 got + is_io + pgorder;
343 got = 0;
344 }
345 }
346 spin_unlock(&kvm->arch.slot_phys_lock);
347 err = 0;
348
349 out:
350 if (got)
351 put_page(page);
352 return err;
353
354 up_err:
355 up_read(&current->mm->mmap_sem);
356 return err;
357}
358
359long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, 256long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
360 long pte_index, unsigned long pteh, 257 long pte_index, unsigned long pteh,
361 unsigned long ptel, unsigned long *pte_idx_ret) 258 unsigned long ptel, unsigned long *pte_idx_ret)
362{ 259{
363 unsigned long psize, gpa, gfn;
364 struct kvm_memory_slot *memslot;
365 long ret; 260 long ret;
366 261
367 if (kvm->arch.using_mmu_notifiers)
368 goto do_insert;
369
370 psize = hpte_page_size(pteh, ptel);
371 if (!psize)
372 return H_PARAMETER;
373
374 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
375
376 /* Find the memslot (if any) for this address */
377 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
378 gfn = gpa >> PAGE_SHIFT;
379 memslot = gfn_to_memslot(kvm, gfn);
380 if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
381 if (!slot_is_aligned(memslot, psize))
382 return H_PARAMETER;
383 if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
384 return H_PARAMETER;
385 }
386
387 do_insert:
388 /* Protect linux PTE lookup from page table destruction */ 262 /* Protect linux PTE lookup from page table destruction */
389 rcu_read_lock_sched(); /* this disables preemption too */ 263 rcu_read_lock_sched(); /* this disables preemption too */
390 ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel, 264 ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
@@ -399,19 +273,6 @@ long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
399 273
400} 274}
401 275
402/*
403 * We come here on a H_ENTER call from the guest when we are not
404 * using mmu notifiers and we don't have the requested page pinned
405 * already.
406 */
407long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
408 long pte_index, unsigned long pteh,
409 unsigned long ptel)
410{
411 return kvmppc_virtmode_do_h_enter(vcpu->kvm, flags, pte_index,
412 pteh, ptel, &vcpu->arch.gpr[4]);
413}
414
415static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, 276static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
416 gva_t eaddr) 277 gva_t eaddr)
417{ 278{
@@ -496,7 +357,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
496 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); 357 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
497 358
498 /* Storage key permission check for POWER7 */ 359 /* Storage key permission check for POWER7 */
499 if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) { 360 if (data && virtmode) {
500 int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); 361 int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
501 if (amrfield & 1) 362 if (amrfield & 1)
502 gpte->may_read = 0; 363 gpte->may_read = 0;
@@ -631,9 +492,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
631 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, 492 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
632 dsisr & DSISR_ISSTORE); 493 dsisr & DSISR_ISSTORE);
633 494
634 if (!kvm->arch.using_mmu_notifiers)
635 return -EFAULT; /* should never get here */
636
637 /* 495 /*
638 * This should never happen, because of the slot_is_aligned() 496 * This should never happen, because of the slot_is_aligned()
639 * check in kvmppc_do_h_enter(). 497 * check in kvmppc_do_h_enter().
@@ -902,8 +760,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
902 psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel); 760 psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel);
903 if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && 761 if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
904 hpte_rpn(ptel, psize) == gfn) { 762 hpte_rpn(ptel, psize) == gfn) {
905 if (kvm->arch.using_mmu_notifiers) 763 hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
906 hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
907 kvmppc_invalidate_hpte(kvm, hptep, i); 764 kvmppc_invalidate_hpte(kvm, hptep, i);
908 /* Harvest R and C */ 765 /* Harvest R and C */
909 rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); 766 rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
@@ -921,15 +778,13 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
921 778
922int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva) 779int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
923{ 780{
924 if (kvm->arch.using_mmu_notifiers) 781 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
925 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
926 return 0; 782 return 0;
927} 783}
928 784
929int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end) 785int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
930{ 786{
931 if (kvm->arch.using_mmu_notifiers) 787 kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
932 kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
933 return 0; 788 return 0;
934} 789}
935 790
@@ -1011,8 +866,6 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1011 866
1012int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end) 867int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
1013{ 868{
1014 if (!kvm->arch.using_mmu_notifiers)
1015 return 0;
1016 return kvm_handle_hva_range(kvm, start, end, kvm_age_rmapp); 869 return kvm_handle_hva_range(kvm, start, end, kvm_age_rmapp);
1017} 870}
1018 871
@@ -1049,15 +902,11 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1049 902
1050int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva) 903int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
1051{ 904{
1052 if (!kvm->arch.using_mmu_notifiers)
1053 return 0;
1054 return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp); 905 return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
1055} 906}
1056 907
1057void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte) 908void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
1058{ 909{
1059 if (!kvm->arch.using_mmu_notifiers)
1060 return;
1061 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); 910 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
1062} 911}
1063 912
@@ -1216,35 +1065,17 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
1216 struct page *page, *pages[1]; 1065 struct page *page, *pages[1];
1217 int npages; 1066 int npages;
1218 unsigned long hva, offset; 1067 unsigned long hva, offset;
1219 unsigned long pa;
1220 unsigned long *physp;
1221 int srcu_idx; 1068 int srcu_idx;
1222 1069
1223 srcu_idx = srcu_read_lock(&kvm->srcu); 1070 srcu_idx = srcu_read_lock(&kvm->srcu);
1224 memslot = gfn_to_memslot(kvm, gfn); 1071 memslot = gfn_to_memslot(kvm, gfn);
1225 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) 1072 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
1226 goto err; 1073 goto err;
1227 if (!kvm->arch.using_mmu_notifiers) { 1074 hva = gfn_to_hva_memslot(memslot, gfn);
1228 physp = memslot->arch.slot_phys; 1075 npages = get_user_pages_fast(hva, 1, 1, pages);
1229 if (!physp) 1076 if (npages < 1)
1230 goto err; 1077 goto err;
1231 physp += gfn - memslot->base_gfn; 1078 page = pages[0];
1232 pa = *physp;
1233 if (!pa) {
1234 if (kvmppc_get_guest_page(kvm, gfn, memslot,
1235 PAGE_SIZE) < 0)
1236 goto err;
1237 pa = *physp;
1238 }
1239 page = pfn_to_page(pa >> PAGE_SHIFT);
1240 get_page(page);
1241 } else {
1242 hva = gfn_to_hva_memslot(memslot, gfn);
1243 npages = get_user_pages_fast(hva, 1, 1, pages);
1244 if (npages < 1)
1245 goto err;
1246 page = pages[0];
1247 }
1248 srcu_read_unlock(&kvm->srcu, srcu_idx); 1079 srcu_read_unlock(&kvm->srcu, srcu_idx);
1249 1080
1250 offset = gpa & (PAGE_SIZE - 1); 1081 offset = gpa & (PAGE_SIZE - 1);
@@ -1268,7 +1099,7 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
1268 1099
1269 put_page(page); 1100 put_page(page);
1270 1101
1271 if (!dirty || !kvm->arch.using_mmu_notifiers) 1102 if (!dirty)
1272 return; 1103 return;
1273 1104
1274 /* We need to mark this page dirty in the rmap chain */ 1105 /* We need to mark this page dirty in the rmap chain */
@@ -1668,10 +1499,7 @@ void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
1668{ 1499{
1669 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; 1500 struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
1670 1501
1671 if (cpu_has_feature(CPU_FTR_ARCH_206)) 1502 vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */
1672 vcpu->arch.slb_nr = 32; /* POWER7 */
1673 else
1674 vcpu->arch.slb_nr = 64;
1675 1503
1676 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; 1504 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
1677 mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr; 1505 mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 325ed9443e43..1ee4e9e786cc 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -205,9 +205,6 @@ int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
205 struct kvmppc_vcore *vc = vcpu->arch.vcore; 205 struct kvmppc_vcore *vc = vcpu->arch.vcore;
206 206
207 if (arch_compat) { 207 if (arch_compat) {
208 if (!cpu_has_feature(CPU_FTR_ARCH_206))
209 return -EINVAL; /* 970 has no compat mode support */
210
211 switch (arch_compat) { 208 switch (arch_compat) {
212 case PVR_ARCH_205: 209 case PVR_ARCH_205:
213 /* 210 /*
@@ -622,14 +619,6 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
622 return RESUME_HOST; 619 return RESUME_HOST;
623 620
624 switch (req) { 621 switch (req) {
625 case H_ENTER:
626 idx = srcu_read_lock(&vcpu->kvm->srcu);
627 ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
628 kvmppc_get_gpr(vcpu, 5),
629 kvmppc_get_gpr(vcpu, 6),
630 kvmppc_get_gpr(vcpu, 7));
631 srcu_read_unlock(&vcpu->kvm->srcu, idx);
632 break;
633 case H_CEDE: 622 case H_CEDE:
634 break; 623 break;
635 case H_PROD: 624 case H_PROD:
@@ -2003,7 +1992,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2003 /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */ 1992 /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */
2004 smp_mb(); 1993 smp_mb();
2005 1994
2006 /* On the first time here, set up HTAB and VRMA or RMA */ 1995 /* On the first time here, set up HTAB and VRMA */
2007 if (!vcpu->kvm->arch.rma_setup_done) { 1996 if (!vcpu->kvm->arch.rma_setup_done) {
2008 r = kvmppc_hv_setup_htab_rma(vcpu); 1997 r = kvmppc_hv_setup_htab_rma(vcpu);
2009 if (r) 1998 if (r)
@@ -2040,98 +2029,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2040 return r; 2029 return r;
2041} 2030}
2042 2031
2043
2044/* Work out RMLS (real mode limit selector) field value for a given RMA size.
2045 Assumes POWER7 or PPC970. */
2046static inline int lpcr_rmls(unsigned long rma_size)
2047{
2048 switch (rma_size) {
2049 case 32ul << 20: /* 32 MB */
2050 if (cpu_has_feature(CPU_FTR_ARCH_206))
2051 return 8; /* only supported on POWER7 */
2052 return -1;
2053 case 64ul << 20: /* 64 MB */
2054 return 3;
2055 case 128ul << 20: /* 128 MB */
2056 return 7;
2057 case 256ul << 20: /* 256 MB */
2058 return 4;
2059 case 1ul << 30: /* 1 GB */
2060 return 2;
2061 case 16ul << 30: /* 16 GB */
2062 return 1;
2063 case 256ul << 30: /* 256 GB */
2064 return 0;
2065 default:
2066 return -1;
2067 }
2068}
2069
2070static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2071{
2072 struct page *page;
2073 struct kvm_rma_info *ri = vma->vm_file->private_data;
2074
2075 if (vmf->pgoff >= kvm_rma_pages)
2076 return VM_FAULT_SIGBUS;
2077
2078 page = pfn_to_page(ri->base_pfn + vmf->pgoff);
2079 get_page(page);
2080 vmf->page = page;
2081 return 0;
2082}
2083
2084static const struct vm_operations_struct kvm_rma_vm_ops = {
2085 .fault = kvm_rma_fault,
2086};
2087
2088static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
2089{
2090 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2091 vma->vm_ops = &kvm_rma_vm_ops;
2092 return 0;
2093}
2094
2095static int kvm_rma_release(struct inode *inode, struct file *filp)
2096{
2097 struct kvm_rma_info *ri = filp->private_data;
2098
2099 kvm_release_rma(ri);
2100 return 0;
2101}
2102
2103static const struct file_operations kvm_rma_fops = {
2104 .mmap = kvm_rma_mmap,
2105 .release = kvm_rma_release,
2106};
2107
2108static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
2109 struct kvm_allocate_rma *ret)
2110{
2111 long fd;
2112 struct kvm_rma_info *ri;
2113 /*
2114 * Only do this on PPC970 in HV mode
2115 */
2116 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
2117 !cpu_has_feature(CPU_FTR_ARCH_201))
2118 return -EINVAL;
2119
2120 if (!kvm_rma_pages)
2121 return -EINVAL;
2122
2123 ri = kvm_alloc_rma();
2124 if (!ri)
2125 return -ENOMEM;
2126
2127 fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR | O_CLOEXEC);
2128 if (fd < 0)
2129 kvm_release_rma(ri);
2130
2131 ret->rma_size = kvm_rma_pages << PAGE_SHIFT;
2132 return fd;
2133}
2134
2135static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, 2032static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
2136 int linux_psize) 2033 int linux_psize)
2137{ 2034{
@@ -2210,26 +2107,6 @@ out:
2210 return r; 2107 return r;
2211} 2108}
2212 2109
2213static void unpin_slot(struct kvm_memory_slot *memslot)
2214{
2215 unsigned long *physp;
2216 unsigned long j, npages, pfn;
2217 struct page *page;
2218
2219 physp = memslot->arch.slot_phys;
2220 npages = memslot->npages;
2221 if (!physp)
2222 return;
2223 for (j = 0; j < npages; j++) {
2224 if (!(physp[j] & KVMPPC_GOT_PAGE))
2225 continue;
2226 pfn = physp[j] >> PAGE_SHIFT;
2227 page = pfn_to_page(pfn);
2228 SetPageDirty(page);
2229 put_page(page);
2230 }
2231}
2232
2233static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free, 2110static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
2234 struct kvm_memory_slot *dont) 2111 struct kvm_memory_slot *dont)
2235{ 2112{
@@ -2237,11 +2114,6 @@ static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
2237 vfree(free->arch.rmap); 2114 vfree(free->arch.rmap);
2238 free->arch.rmap = NULL; 2115 free->arch.rmap = NULL;
2239 } 2116 }
2240 if (!dont || free->arch.slot_phys != dont->arch.slot_phys) {
2241 unpin_slot(free);
2242 vfree(free->arch.slot_phys);
2243 free->arch.slot_phys = NULL;
2244 }
2245} 2117}
2246 2118
2247static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot, 2119static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
@@ -2250,7 +2122,6 @@ static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
2250 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); 2122 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
2251 if (!slot->arch.rmap) 2123 if (!slot->arch.rmap)
2252 return -ENOMEM; 2124 return -ENOMEM;
2253 slot->arch.slot_phys = NULL;
2254 2125
2255 return 0; 2126 return 0;
2256} 2127}
@@ -2259,17 +2130,6 @@ static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
2259 struct kvm_memory_slot *memslot, 2130 struct kvm_memory_slot *memslot,
2260 struct kvm_userspace_memory_region *mem) 2131 struct kvm_userspace_memory_region *mem)
2261{ 2132{
2262 unsigned long *phys;
2263
2264 /* Allocate a slot_phys array if needed */
2265 phys = memslot->arch.slot_phys;
2266 if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) {
2267 phys = vzalloc(memslot->npages * sizeof(unsigned long));
2268 if (!phys)
2269 return -ENOMEM;
2270 memslot->arch.slot_phys = phys;
2271 }
2272
2273 return 0; 2133 return 0;
2274} 2134}
2275 2135
@@ -2327,17 +2187,11 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
2327{ 2187{
2328 int err = 0; 2188 int err = 0;
2329 struct kvm *kvm = vcpu->kvm; 2189 struct kvm *kvm = vcpu->kvm;
2330 struct kvm_rma_info *ri = NULL;
2331 unsigned long hva; 2190 unsigned long hva;
2332 struct kvm_memory_slot *memslot; 2191 struct kvm_memory_slot *memslot;
2333 struct vm_area_struct *vma; 2192 struct vm_area_struct *vma;
2334 unsigned long lpcr = 0, senc; 2193 unsigned long lpcr = 0, senc;
2335 unsigned long lpcr_mask = 0;
2336 unsigned long psize, porder; 2194 unsigned long psize, porder;
2337 unsigned long rma_size;
2338 unsigned long rmls;
2339 unsigned long *physp;
2340 unsigned long i, npages;
2341 int srcu_idx; 2195 int srcu_idx;
2342 2196
2343 mutex_lock(&kvm->lock); 2197 mutex_lock(&kvm->lock);
@@ -2372,88 +2226,25 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
2372 psize = vma_kernel_pagesize(vma); 2226 psize = vma_kernel_pagesize(vma);
2373 porder = __ilog2(psize); 2227 porder = __ilog2(psize);
2374 2228
2375 /* Is this one of our preallocated RMAs? */
2376 if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
2377 hva == vma->vm_start)
2378 ri = vma->vm_file->private_data;
2379
2380 up_read(&current->mm->mmap_sem); 2229 up_read(&current->mm->mmap_sem);
2381 2230
2382 if (!ri) { 2231 /* We can handle 4k, 64k or 16M pages in the VRMA */
2383 /* On POWER7, use VRMA; on PPC970, give up */ 2232 err = -EINVAL;
2384 err = -EPERM; 2233 if (!(psize == 0x1000 || psize == 0x10000 ||
2385 if (cpu_has_feature(CPU_FTR_ARCH_201)) { 2234 psize == 0x1000000))
2386 pr_err("KVM: CPU requires an RMO\n"); 2235 goto out_srcu;
2387 goto out_srcu;
2388 }
2389
2390 /* We can handle 4k, 64k or 16M pages in the VRMA */
2391 err = -EINVAL;
2392 if (!(psize == 0x1000 || psize == 0x10000 ||
2393 psize == 0x1000000))
2394 goto out_srcu;
2395
2396 /* Update VRMASD field in the LPCR */
2397 senc = slb_pgsize_encoding(psize);
2398 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
2399 (VRMA_VSID << SLB_VSID_SHIFT_1T);
2400 lpcr_mask = LPCR_VRMASD;
2401 /* the -4 is to account for senc values starting at 0x10 */
2402 lpcr = senc << (LPCR_VRMASD_SH - 4);
2403 2236
2404 /* Create HPTEs in the hash page table for the VRMA */ 2237 /* Update VRMASD field in the LPCR */
2405 kvmppc_map_vrma(vcpu, memslot, porder); 2238 senc = slb_pgsize_encoding(psize);
2239 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
2240 (VRMA_VSID << SLB_VSID_SHIFT_1T);
2241 /* the -4 is to account for senc values starting at 0x10 */
2242 lpcr = senc << (LPCR_VRMASD_SH - 4);
2406 2243
2407 } else { 2244 /* Create HPTEs in the hash page table for the VRMA */
2408 /* Set up to use an RMO region */ 2245 kvmppc_map_vrma(vcpu, memslot, porder);
2409 rma_size = kvm_rma_pages;
2410 if (rma_size > memslot->npages)
2411 rma_size = memslot->npages;
2412 rma_size <<= PAGE_SHIFT;
2413 rmls = lpcr_rmls(rma_size);
2414 err = -EINVAL;
2415 if ((long)rmls < 0) {
2416 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
2417 goto out_srcu;
2418 }
2419 atomic_inc(&ri->use_count);
2420 kvm->arch.rma = ri;
2421
2422 /* Update LPCR and RMOR */
2423 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
2424 /* PPC970; insert RMLS value (split field) in HID4 */
2425 lpcr_mask = (1ul << HID4_RMLS0_SH) |
2426 (3ul << HID4_RMLS2_SH) | HID4_RMOR;
2427 lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
2428 ((rmls & 3) << HID4_RMLS2_SH);
2429 /* RMOR is also in HID4 */
2430 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
2431 << HID4_RMOR_SH;
2432 } else {
2433 /* POWER7 */
2434 lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
2435 lpcr = rmls << LPCR_RMLS_SH;
2436 kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
2437 }
2438 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
2439 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
2440
2441 /* Initialize phys addrs of pages in RMO */
2442 npages = kvm_rma_pages;
2443 porder = __ilog2(npages);
2444 physp = memslot->arch.slot_phys;
2445 if (physp) {
2446 if (npages > memslot->npages)
2447 npages = memslot->npages;
2448 spin_lock(&kvm->arch.slot_phys_lock);
2449 for (i = 0; i < npages; ++i)
2450 physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) +
2451 porder;
2452 spin_unlock(&kvm->arch.slot_phys_lock);
2453 }
2454 }
2455 2246
2456 kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); 2247 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
2457 2248
2458 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ 2249 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
2459 smp_wmb(); 2250 smp_wmb();
@@ -2492,35 +2283,21 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
2492 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, 2283 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
2493 sizeof(kvm->arch.enabled_hcalls)); 2284 sizeof(kvm->arch.enabled_hcalls));
2494 2285
2495 kvm->arch.rma = NULL;
2496
2497 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); 2286 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
2498 2287
2499 if (cpu_has_feature(CPU_FTR_ARCH_201)) { 2288 /* Init LPCR for virtual RMA mode */
2500 /* PPC970; HID4 is effectively the LPCR */ 2289 kvm->arch.host_lpid = mfspr(SPRN_LPID);
2501 kvm->arch.host_lpid = 0; 2290 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
2502 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4); 2291 lpcr &= LPCR_PECE | LPCR_LPES;
2503 lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH)); 2292 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
2504 lpcr |= ((lpid >> 4) << HID4_LPID1_SH) | 2293 LPCR_VPM0 | LPCR_VPM1;
2505 ((lpid & 0xf) << HID4_LPID5_SH); 2294 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
2506 } else { 2295 (VRMA_VSID << SLB_VSID_SHIFT_1T);
2507 /* POWER7; init LPCR for virtual RMA mode */ 2296 /* On POWER8 turn on online bit to enable PURR/SPURR */
2508 kvm->arch.host_lpid = mfspr(SPRN_LPID); 2297 if (cpu_has_feature(CPU_FTR_ARCH_207S))
2509 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); 2298 lpcr |= LPCR_ONL;
2510 lpcr &= LPCR_PECE | LPCR_LPES;
2511 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
2512 LPCR_VPM0 | LPCR_VPM1;
2513 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
2514 (VRMA_VSID << SLB_VSID_SHIFT_1T);
2515 /* On POWER8 turn on online bit to enable PURR/SPURR */
2516 if (cpu_has_feature(CPU_FTR_ARCH_207S))
2517 lpcr |= LPCR_ONL;
2518 }
2519 kvm->arch.lpcr = lpcr; 2299 kvm->arch.lpcr = lpcr;
2520 2300
2521 kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
2522 spin_lock_init(&kvm->arch.slot_phys_lock);
2523
2524 /* 2301 /*
2525 * Track that we now have a HV mode VM active. This blocks secondary 2302 * Track that we now have a HV mode VM active. This blocks secondary
2526 * CPU threads from coming online. 2303 * CPU threads from coming online.
@@ -2550,10 +2327,6 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
2550 kvm_hv_vm_deactivated(); 2327 kvm_hv_vm_deactivated();
2551 2328
2552 kvmppc_free_vcores(kvm); 2329 kvmppc_free_vcores(kvm);
2553 if (kvm->arch.rma) {
2554 kvm_release_rma(kvm->arch.rma);
2555 kvm->arch.rma = NULL;
2556 }
2557 2330
2558 kvmppc_free_hpt(kvm); 2331 kvmppc_free_hpt(kvm);
2559} 2332}
@@ -2579,7 +2352,8 @@ static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
2579 2352
2580static int kvmppc_core_check_processor_compat_hv(void) 2353static int kvmppc_core_check_processor_compat_hv(void)
2581{ 2354{
2582 if (!cpu_has_feature(CPU_FTR_HVMODE)) 2355 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
2356 !cpu_has_feature(CPU_FTR_ARCH_206))
2583 return -EIO; 2357 return -EIO;
2584 return 0; 2358 return 0;
2585} 2359}
@@ -2593,16 +2367,6 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
2593 2367
2594 switch (ioctl) { 2368 switch (ioctl) {
2595 2369
2596 case KVM_ALLOCATE_RMA: {
2597 struct kvm_allocate_rma rma;
2598 struct kvm *kvm = filp->private_data;
2599
2600 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
2601 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
2602 r = -EFAULT;
2603 break;
2604 }
2605
2606 case KVM_PPC_ALLOCATE_HTAB: { 2370 case KVM_PPC_ALLOCATE_HTAB: {
2607 u32 htab_order; 2371 u32 htab_order;
2608 2372
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 4fdc27c80f4c..1786bf80bf00 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -33,95 +33,9 @@
33 * By default we reserve 5% of memory for hash pagetable allocation. 33 * By default we reserve 5% of memory for hash pagetable allocation.
34 */ 34 */
35static unsigned long kvm_cma_resv_ratio = 5; 35static unsigned long kvm_cma_resv_ratio = 5;
36/*
37 * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
38 * Each RMA has to be physically contiguous and of a size that the
39 * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
40 * and other larger sizes. Since we are unlikely to be allocate that
41 * much physically contiguous memory after the system is up and running,
42 * we preallocate a set of RMAs in early boot using CMA.
43 * should be power of 2.
44 */
45unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */
46EXPORT_SYMBOL_GPL(kvm_rma_pages);
47 36
48static struct cma *kvm_cma; 37static struct cma *kvm_cma;
49 38
50/* Work out RMLS (real mode limit selector) field value for a given RMA size.
51 Assumes POWER7 or PPC970. */
52static inline int lpcr_rmls(unsigned long rma_size)
53{
54 switch (rma_size) {
55 case 32ul << 20: /* 32 MB */
56 if (cpu_has_feature(CPU_FTR_ARCH_206))
57 return 8; /* only supported on POWER7 */
58 return -1;
59 case 64ul << 20: /* 64 MB */
60 return 3;
61 case 128ul << 20: /* 128 MB */
62 return 7;
63 case 256ul << 20: /* 256 MB */
64 return 4;
65 case 1ul << 30: /* 1 GB */
66 return 2;
67 case 16ul << 30: /* 16 GB */
68 return 1;
69 case 256ul << 30: /* 256 GB */
70 return 0;
71 default:
72 return -1;
73 }
74}
75
76static int __init early_parse_rma_size(char *p)
77{
78 unsigned long kvm_rma_size;
79
80 pr_debug("%s(%s)\n", __func__, p);
81 if (!p)
82 return -EINVAL;
83 kvm_rma_size = memparse(p, &p);
84 /*
85 * Check that the requested size is one supported in hardware
86 */
87 if (lpcr_rmls(kvm_rma_size) < 0) {
88 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
89 return -EINVAL;
90 }
91 kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
92 return 0;
93}
94early_param("kvm_rma_size", early_parse_rma_size);
95
96struct kvm_rma_info *kvm_alloc_rma()
97{
98 struct page *page;
99 struct kvm_rma_info *ri;
100
101 ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
102 if (!ri)
103 return NULL;
104 page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages));
105 if (!page)
106 goto err_out;
107 atomic_set(&ri->use_count, 1);
108 ri->base_pfn = page_to_pfn(page);
109 return ri;
110err_out:
111 kfree(ri);
112 return NULL;
113}
114EXPORT_SYMBOL_GPL(kvm_alloc_rma);
115
116void kvm_release_rma(struct kvm_rma_info *ri)
117{
118 if (atomic_dec_and_test(&ri->use_count)) {
119 cma_release(kvm_cma, pfn_to_page(ri->base_pfn), kvm_rma_pages);
120 kfree(ri);
121 }
122}
123EXPORT_SYMBOL_GPL(kvm_release_rma);
124
125static int __init early_parse_kvm_cma_resv(char *p) 39static int __init early_parse_kvm_cma_resv(char *p)
126{ 40{
127 pr_debug("%s(%s)\n", __func__, p); 41 pr_debug("%s(%s)\n", __func__, p);
@@ -133,14 +47,9 @@ early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
133 47
134struct page *kvm_alloc_hpt(unsigned long nr_pages) 48struct page *kvm_alloc_hpt(unsigned long nr_pages)
135{ 49{
136 unsigned long align_pages = HPT_ALIGN_PAGES;
137
138 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 50 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
139 51
140 /* Old CPUs require HPT aligned on a multiple of its size */ 52 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
141 if (!cpu_has_feature(CPU_FTR_ARCH_206))
142 align_pages = nr_pages;
143 return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages));
144} 53}
145EXPORT_SYMBOL_GPL(kvm_alloc_hpt); 54EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
146 55
@@ -181,16 +90,7 @@ void __init kvm_cma_reserve(void)
181 if (selected_size) { 90 if (selected_size) {
182 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 91 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
183 (unsigned long)selected_size / SZ_1M); 92 (unsigned long)selected_size / SZ_1M);
184 /* 93 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
185 * Old CPUs require HPT aligned on a multiple of its size. So for them
186 * make the alignment as max size we could request.
187 */
188 if (!cpu_has_feature(CPU_FTR_ARCH_206))
189 align_size = __rounddown_pow_of_two(selected_size);
190 else
191 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
192
193 align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
194 cma_declare_contiguous(0, selected_size, 0, align_size, 94 cma_declare_contiguous(0, selected_size, 0, align_size,
195 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma); 95 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
196 } 96 }
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 731be7478b27..36540a99d178 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -52,10 +52,8 @@ _GLOBAL(__kvmppc_vcore_entry)
52 std r3, _CCR(r1) 52 std r3, _CCR(r1)
53 53
54 /* Save host DSCR */ 54 /* Save host DSCR */
55BEGIN_FTR_SECTION
56 mfspr r3, SPRN_DSCR 55 mfspr r3, SPRN_DSCR
57 std r3, HSTATE_DSCR(r13) 56 std r3, HSTATE_DSCR(r13)
58END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
59 57
60BEGIN_FTR_SECTION 58BEGIN_FTR_SECTION
61 /* Save host DABR */ 59 /* Save host DABR */
@@ -84,11 +82,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
84 mfspr r7, SPRN_MMCR0 /* save MMCR0 */ 82 mfspr r7, SPRN_MMCR0 /* save MMCR0 */
85 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */ 83 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
86 mfspr r6, SPRN_MMCRA 84 mfspr r6, SPRN_MMCRA
87BEGIN_FTR_SECTION 85 /* Clear MMCRA in order to disable SDAR updates */
88 /* On P7, clear MMCRA in order to disable SDAR updates */
89 li r5, 0 86 li r5, 0
90 mtspr SPRN_MMCRA, r5 87 mtspr SPRN_MMCRA, r5
91END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
92 isync 88 isync
93 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 89 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
94 lbz r5, LPPACA_PMCINUSE(r3) 90 lbz r5, LPPACA_PMCINUSE(r3)
@@ -113,20 +109,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
113 mfspr r7, SPRN_PMC4 109 mfspr r7, SPRN_PMC4
114 mfspr r8, SPRN_PMC5 110 mfspr r8, SPRN_PMC5
115 mfspr r9, SPRN_PMC6 111 mfspr r9, SPRN_PMC6
116BEGIN_FTR_SECTION
117 mfspr r10, SPRN_PMC7
118 mfspr r11, SPRN_PMC8
119END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
120 stw r3, HSTATE_PMC(r13) 112 stw r3, HSTATE_PMC(r13)
121 stw r5, HSTATE_PMC + 4(r13) 113 stw r5, HSTATE_PMC + 4(r13)
122 stw r6, HSTATE_PMC + 8(r13) 114 stw r6, HSTATE_PMC + 8(r13)
123 stw r7, HSTATE_PMC + 12(r13) 115 stw r7, HSTATE_PMC + 12(r13)
124 stw r8, HSTATE_PMC + 16(r13) 116 stw r8, HSTATE_PMC + 16(r13)
125 stw r9, HSTATE_PMC + 20(r13) 117 stw r9, HSTATE_PMC + 20(r13)
126BEGIN_FTR_SECTION
127 stw r10, HSTATE_PMC + 24(r13)
128 stw r11, HSTATE_PMC + 28(r13)
129END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
13031: 11831:
131 119
132 /* 120 /*
@@ -140,31 +128,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
140 add r8,r8,r7 128 add r8,r8,r7
141 std r8,HSTATE_DECEXP(r13) 129 std r8,HSTATE_DECEXP(r13)
142 130
143#ifdef CONFIG_SMP
144 /*
145 * On PPC970, if the guest vcpu has an external interrupt pending,
146 * send ourselves an IPI so as to interrupt the guest once it
147 * enables interrupts. (It must have interrupts disabled,
148 * otherwise we would already have delivered the interrupt.)
149 *
150 * XXX If this is a UP build, smp_send_reschedule is not available,
151 * so the interrupt will be delayed until the next time the vcpu
152 * enters the guest with interrupts enabled.
153 */
154BEGIN_FTR_SECTION
155 ld r4, HSTATE_KVM_VCPU(r13)
156 ld r0, VCPU_PENDING_EXC(r4)
157 li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
158 oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
159 and. r0, r0, r7
160 beq 32f
161 lhz r3, PACAPACAINDEX(r13)
162 bl smp_send_reschedule
163 nop
16432:
165END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
166#endif /* CONFIG_SMP */
167
168 /* Jump to partition switch code */ 131 /* Jump to partition switch code */
169 bl kvmppc_hv_entry_trampoline 132 bl kvmppc_hv_entry_trampoline
170 nop 133 nop
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index d562c8e2bc30..60081bd75847 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -138,8 +138,5 @@ out:
138 138
139long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu) 139long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
140{ 140{
141 if (cpu_has_feature(CPU_FTR_ARCH_206)) 141 return kvmppc_realmode_mc_power7(vcpu);
142 return kvmppc_realmode_mc_power7(vcpu);
143
144 return 0;
145} 142}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 411720f59643..510bdfbc4073 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -45,16 +45,12 @@ static int global_invalidates(struct kvm *kvm, unsigned long flags)
45 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set, 45 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
46 * we can use tlbiel as long as we mark all other physical 46 * we can use tlbiel as long as we mark all other physical
47 * cores as potentially having stale TLB entries for this lpid. 47 * cores as potentially having stale TLB entries for this lpid.
48 * If we're not using MMU notifiers, we never take pages away
49 * from the guest, so we can use tlbiel if requested.
50 * Otherwise, don't use tlbiel. 48 * Otherwise, don't use tlbiel.
51 */ 49 */
52 if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu) 50 if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
53 global = 0; 51 global = 0;
54 else if (kvm->arch.using_mmu_notifiers)
55 global = 1;
56 else 52 else
57 global = !(flags & H_LOCAL); 53 global = 1;
58 54
59 if (!global) { 55 if (!global) {
60 /* any other core might now have stale TLB entries... */ 56 /* any other core might now have stale TLB entries... */
@@ -170,7 +166,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
170 struct revmap_entry *rev; 166 struct revmap_entry *rev;
171 unsigned long g_ptel; 167 unsigned long g_ptel;
172 struct kvm_memory_slot *memslot; 168 struct kvm_memory_slot *memslot;
173 unsigned long *physp, pte_size; 169 unsigned long pte_size;
174 unsigned long is_io; 170 unsigned long is_io;
175 unsigned long *rmap; 171 unsigned long *rmap;
176 pte_t pte; 172 pte_t pte;
@@ -198,9 +194,6 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
198 is_io = ~0ul; 194 is_io = ~0ul;
199 rmap = NULL; 195 rmap = NULL;
200 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) { 196 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
201 /* PPC970 can't do emulated MMIO */
202 if (!cpu_has_feature(CPU_FTR_ARCH_206))
203 return H_PARAMETER;
204 /* Emulated MMIO - mark this with key=31 */ 197 /* Emulated MMIO - mark this with key=31 */
205 pteh |= HPTE_V_ABSENT; 198 pteh |= HPTE_V_ABSENT;
206 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO; 199 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
@@ -213,37 +206,20 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
213 slot_fn = gfn - memslot->base_gfn; 206 slot_fn = gfn - memslot->base_gfn;
214 rmap = &memslot->arch.rmap[slot_fn]; 207 rmap = &memslot->arch.rmap[slot_fn];
215 208
216 if (!kvm->arch.using_mmu_notifiers) { 209 /* Translate to host virtual address */
217 physp = memslot->arch.slot_phys; 210 hva = __gfn_to_hva_memslot(memslot, gfn);
218 if (!physp) 211
219 return H_PARAMETER; 212 /* Look up the Linux PTE for the backing page */
220 physp += slot_fn; 213 pte_size = psize;
221 if (realmode) 214 pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size);
222 physp = real_vmalloc_addr(physp); 215 if (pte_present(pte) && !pte_numa(pte)) {
223 pa = *physp; 216 if (writing && !pte_write(pte))
224 if (!pa) 217 /* make the actual HPTE be read-only */
225 return H_TOO_HARD; 218 ptel = hpte_make_readonly(ptel);
226 is_io = pa & (HPTE_R_I | HPTE_R_W); 219 is_io = hpte_cache_bits(pte_val(pte));
227 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); 220 pa = pte_pfn(pte) << PAGE_SHIFT;
228 pa &= PAGE_MASK; 221 pa |= hva & (pte_size - 1);
229 pa |= gpa & ~PAGE_MASK; 222 pa |= gpa & ~PAGE_MASK;
230 } else {
231 /* Translate to host virtual address */
232 hva = __gfn_to_hva_memslot(memslot, gfn);
233
234 /* Look up the Linux PTE for the backing page */
235 pte_size = psize;
236 pte = lookup_linux_pte_and_update(pgdir, hva, writing,
237 &pte_size);
238 if (pte_present(pte) && !pte_numa(pte)) {
239 if (writing && !pte_write(pte))
240 /* make the actual HPTE be read-only */
241 ptel = hpte_make_readonly(ptel);
242 is_io = hpte_cache_bits(pte_val(pte));
243 pa = pte_pfn(pte) << PAGE_SHIFT;
244 pa |= hva & (pte_size - 1);
245 pa |= gpa & ~PAGE_MASK;
246 }
247 } 223 }
248 224
249 if (pte_size < psize) 225 if (pte_size < psize)
@@ -337,8 +313,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
337 rmap = real_vmalloc_addr(rmap); 313 rmap = real_vmalloc_addr(rmap);
338 lock_rmap(rmap); 314 lock_rmap(rmap);
339 /* Check for pending invalidations under the rmap chain lock */ 315 /* Check for pending invalidations under the rmap chain lock */
340 if (kvm->arch.using_mmu_notifiers && 316 if (mmu_notifier_retry(kvm, mmu_seq)) {
341 mmu_notifier_retry(kvm, mmu_seq)) {
342 /* inval in progress, write a non-present HPTE */ 317 /* inval in progress, write a non-present HPTE */
343 pteh |= HPTE_V_ABSENT; 318 pteh |= HPTE_V_ABSENT;
344 pteh &= ~HPTE_V_VALID; 319 pteh &= ~HPTE_V_VALID;
@@ -395,61 +370,11 @@ static inline int try_lock_tlbie(unsigned int *lock)
395 return old == 0; 370 return old == 0;
396} 371}
397 372
398/*
399 * tlbie/tlbiel is a bit different on the PPC970 compared to later
400 * processors such as POWER7; the large page bit is in the instruction
401 * not RB, and the top 16 bits and the bottom 12 bits of the VA
402 * in RB must be 0.
403 */
404static void do_tlbies_970(struct kvm *kvm, unsigned long *rbvalues,
405 long npages, int global, bool need_sync)
406{
407 long i;
408
409 if (global) {
410 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
411 cpu_relax();
412 if (need_sync)
413 asm volatile("ptesync" : : : "memory");
414 for (i = 0; i < npages; ++i) {
415 unsigned long rb = rbvalues[i];
416
417 if (rb & 1) /* large page */
418 asm volatile("tlbie %0,1" : :
419 "r" (rb & 0x0000fffffffff000ul));
420 else
421 asm volatile("tlbie %0,0" : :
422 "r" (rb & 0x0000fffffffff000ul));
423 }
424 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
425 kvm->arch.tlbie_lock = 0;
426 } else {
427 if (need_sync)
428 asm volatile("ptesync" : : : "memory");
429 for (i = 0; i < npages; ++i) {
430 unsigned long rb = rbvalues[i];
431
432 if (rb & 1) /* large page */
433 asm volatile("tlbiel %0,1" : :
434 "r" (rb & 0x0000fffffffff000ul));
435 else
436 asm volatile("tlbiel %0,0" : :
437 "r" (rb & 0x0000fffffffff000ul));
438 }
439 asm volatile("ptesync" : : : "memory");
440 }
441}
442
443static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, 373static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
444 long npages, int global, bool need_sync) 374 long npages, int global, bool need_sync)
445{ 375{
446 long i; 376 long i;
447 377
448 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
449 /* PPC970 tlbie instruction is a bit different */
450 do_tlbies_970(kvm, rbvalues, npages, global, need_sync);
451 return;
452 }
453 if (global) { 378 if (global) {
454 while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) 379 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
455 cpu_relax(); 380 cpu_relax();
@@ -677,8 +602,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
677 */ 602 */
678 pte = be64_to_cpu(hpte[1]); 603 pte = be64_to_cpu(hpte[1]);
679 r = (pte & ~mask) | bits; 604 r = (pte & ~mask) | bits;
680 if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers && 605 if (hpte_is_writable(r) && !hpte_is_writable(pte))
681 !hpte_is_writable(pte))
682 r = hpte_make_readonly(r); 606 r = hpte_make_readonly(r);
683 /* If the PTE is changing, invalidate it first */ 607 /* If the PTE is changing, invalidate it first */
684 if (r != pte) { 608 if (r != pte) {
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index edb2ccdbb2ba..c0f9e68c5db2 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -94,20 +94,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
94 lwz r6, HSTATE_PMC + 12(r13) 94 lwz r6, HSTATE_PMC + 12(r13)
95 lwz r8, HSTATE_PMC + 16(r13) 95 lwz r8, HSTATE_PMC + 16(r13)
96 lwz r9, HSTATE_PMC + 20(r13) 96 lwz r9, HSTATE_PMC + 20(r13)
97BEGIN_FTR_SECTION
98 lwz r10, HSTATE_PMC + 24(r13)
99 lwz r11, HSTATE_PMC + 28(r13)
100END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
101 mtspr SPRN_PMC1, r3 97 mtspr SPRN_PMC1, r3
102 mtspr SPRN_PMC2, r4 98 mtspr SPRN_PMC2, r4
103 mtspr SPRN_PMC3, r5 99 mtspr SPRN_PMC3, r5
104 mtspr SPRN_PMC4, r6 100 mtspr SPRN_PMC4, r6
105 mtspr SPRN_PMC5, r8 101 mtspr SPRN_PMC5, r8
106 mtspr SPRN_PMC6, r9 102 mtspr SPRN_PMC6, r9
107BEGIN_FTR_SECTION
108 mtspr SPRN_PMC7, r10
109 mtspr SPRN_PMC8, r11
110END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
111 ld r3, HSTATE_MMCR(r13) 103 ld r3, HSTATE_MMCR(r13)
112 ld r4, HSTATE_MMCR + 8(r13) 104 ld r4, HSTATE_MMCR + 8(r13)
113 ld r5, HSTATE_MMCR + 16(r13) 105 ld r5, HSTATE_MMCR + 16(r13)
@@ -153,11 +145,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
153 145
154 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK 146 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
155 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 147 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
156BEGIN_FTR_SECTION
157 beq 11f 148 beq 11f
158 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI 149 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
159 beq cr2, 14f /* HMI check */ 150 beq cr2, 14f /* HMI check */
160END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
161 151
162 /* RFI into the highmem handler, or branch to interrupt handler */ 152 /* RFI into the highmem handler, or branch to interrupt handler */
163 mfmsr r6 153 mfmsr r6
@@ -166,7 +156,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
166 mtmsrd r6, 1 /* Clear RI in MSR */ 156 mtmsrd r6, 1 /* Clear RI in MSR */
167 mtsrr0 r8 157 mtsrr0 r8
168 mtsrr1 r7 158 mtsrr1 r7
169 beqa 0x500 /* external interrupt (PPC970) */
170 beq cr1, 13f /* machine check */ 159 beq cr1, 13f /* machine check */
171 RFI 160 RFI
172 161
@@ -374,11 +363,8 @@ kvmppc_hv_entry:
374 slbia 363 slbia
375 ptesync 364 ptesync
376 365
377BEGIN_FTR_SECTION
378 b 30f
379END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
380 /* 366 /*
381 * POWER7 host -> guest partition switch code. 367 * POWER7/POWER8 host -> guest partition switch code.
382 * We don't have to lock against concurrent tlbies, 368 * We don't have to lock against concurrent tlbies,
383 * but we do have to coordinate across hardware threads. 369 * but we do have to coordinate across hardware threads.
384 */ 370 */
@@ -486,97 +472,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
486 cmpwi r3,512 /* 1 microsecond */ 472 cmpwi r3,512 /* 1 microsecond */
487 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 473 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
488 blt hdec_soon 474 blt hdec_soon
489 b 31f
490
491 /*
492 * PPC970 host -> guest partition switch code.
493 * We have to lock against concurrent tlbies,
494 * using native_tlbie_lock to lock against host tlbies
495 * and kvm->arch.tlbie_lock to lock against guest tlbies.
496 * We also have to invalidate the TLB since its
497 * entries aren't tagged with the LPID.
498 */
49930: ld r5,HSTATE_KVM_VCORE(r13)
500 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
501
502 /* first take native_tlbie_lock */
503 .section ".toc","aw"
504toc_tlbie_lock:
505 .tc native_tlbie_lock[TC],native_tlbie_lock
506 .previous
507 ld r3,toc_tlbie_lock@toc(r2)
508#ifdef __BIG_ENDIAN__
509 lwz r8,PACA_LOCK_TOKEN(r13)
510#else
511 lwz r8,PACAPACAINDEX(r13)
512#endif
51324: lwarx r0,0,r3
514 cmpwi r0,0
515 bne 24b
516 stwcx. r8,0,r3
517 bne 24b
518 isync
519
520 ld r5,HSTATE_KVM_VCORE(r13)
521 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
522 li r0,0x18f
523 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
524 or r0,r7,r0
525 ptesync
526 sync
527 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
528 isync
529 li r0,0
530 stw r0,0(r3) /* drop native_tlbie_lock */
531
532 /* invalidate the whole TLB */
533 li r0,256
534 mtctr r0
535 li r6,0
53625: tlbiel r6
537 addi r6,r6,0x1000
538 bdnz 25b
539 ptesync
540 475
541 /* Take the guest's tlbie_lock */
542 addi r3,r9,KVM_TLBIE_LOCK
54324: lwarx r0,0,r3
544 cmpwi r0,0
545 bne 24b
546 stwcx. r8,0,r3
547 bne 24b
548 isync
549 ld r6,KVM_SDR1(r9)
550 mtspr SPRN_SDR1,r6 /* switch to partition page table */
551
552 /* Set up HID4 with the guest's LPID etc. */
553 sync
554 mtspr SPRN_HID4,r7
555 isync
556
557 /* drop the guest's tlbie_lock */
558 li r0,0
559 stw r0,0(r3)
560
561 /* Check if HDEC expires soon */
562 mfspr r3,SPRN_HDEC
563 cmpwi r3,10
564 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
565 blt hdec_soon
566
567 /* Enable HDEC interrupts */
568 mfspr r0,SPRN_HID0
569 li r3,1
570 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
571 sync
572 mtspr SPRN_HID0,r0
573 mfspr r0,SPRN_HID0
574 mfspr r0,SPRN_HID0
575 mfspr r0,SPRN_HID0
576 mfspr r0,SPRN_HID0
577 mfspr r0,SPRN_HID0
578 mfspr r0,SPRN_HID0
57931:
580 /* Do we have a guest vcpu to run? */ 476 /* Do we have a guest vcpu to run? */
581 cmpdi r4, 0 477 cmpdi r4, 0
582 beq kvmppc_primary_no_guest 478 beq kvmppc_primary_no_guest
@@ -606,7 +502,6 @@ kvmppc_got_guest:
606 stb r6, VCPU_VPA_DIRTY(r4) 502 stb r6, VCPU_VPA_DIRTY(r4)
60725: 50325:
608 504
609BEGIN_FTR_SECTION
610 /* Save purr/spurr */ 505 /* Save purr/spurr */
611 mfspr r5,SPRN_PURR 506 mfspr r5,SPRN_PURR
612 mfspr r6,SPRN_SPURR 507 mfspr r6,SPRN_SPURR
@@ -616,7 +511,6 @@ BEGIN_FTR_SECTION
616 ld r8,VCPU_SPURR(r4) 511 ld r8,VCPU_SPURR(r4)
617 mtspr SPRN_PURR,r7 512 mtspr SPRN_PURR,r7
618 mtspr SPRN_SPURR,r8 513 mtspr SPRN_SPURR,r8
619END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
620 514
621BEGIN_FTR_SECTION 515BEGIN_FTR_SECTION
622 /* Set partition DABR */ 516 /* Set partition DABR */
@@ -625,9 +519,7 @@ BEGIN_FTR_SECTION
625 ld r6,VCPU_DABR(r4) 519 ld r6,VCPU_DABR(r4)
626 mtspr SPRN_DABRX,r5 520 mtspr SPRN_DABRX,r5
627 mtspr SPRN_DABR,r6 521 mtspr SPRN_DABR,r6
628 BEGIN_FTR_SECTION_NESTED(89)
629 isync 522 isync
630 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
631END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 523END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
632 524
633#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 525#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -758,20 +650,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
758 lwz r7, VCPU_PMC + 12(r4) 650 lwz r7, VCPU_PMC + 12(r4)
759 lwz r8, VCPU_PMC + 16(r4) 651 lwz r8, VCPU_PMC + 16(r4)
760 lwz r9, VCPU_PMC + 20(r4) 652 lwz r9, VCPU_PMC + 20(r4)
761BEGIN_FTR_SECTION
762 lwz r10, VCPU_PMC + 24(r4)
763 lwz r11, VCPU_PMC + 28(r4)
764END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
765 mtspr SPRN_PMC1, r3 653 mtspr SPRN_PMC1, r3
766 mtspr SPRN_PMC2, r5 654 mtspr SPRN_PMC2, r5
767 mtspr SPRN_PMC3, r6 655 mtspr SPRN_PMC3, r6
768 mtspr SPRN_PMC4, r7 656 mtspr SPRN_PMC4, r7
769 mtspr SPRN_PMC5, r8 657 mtspr SPRN_PMC5, r8
770 mtspr SPRN_PMC6, r9 658 mtspr SPRN_PMC6, r9
771BEGIN_FTR_SECTION
772 mtspr SPRN_PMC7, r10
773 mtspr SPRN_PMC8, r11
774END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
775 ld r3, VCPU_MMCR(r4) 659 ld r3, VCPU_MMCR(r4)
776 ld r5, VCPU_MMCR + 8(r4) 660 ld r5, VCPU_MMCR + 8(r4)
777 ld r6, VCPU_MMCR + 16(r4) 661 ld r6, VCPU_MMCR + 16(r4)
@@ -818,14 +702,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
818 ld r30, VCPU_GPR(R30)(r4) 702 ld r30, VCPU_GPR(R30)(r4)
819 ld r31, VCPU_GPR(R31)(r4) 703 ld r31, VCPU_GPR(R31)(r4)
820 704
821BEGIN_FTR_SECTION
822 /* Switch DSCR to guest value */ 705 /* Switch DSCR to guest value */
823 ld r5, VCPU_DSCR(r4) 706 ld r5, VCPU_DSCR(r4)
824 mtspr SPRN_DSCR, r5 707 mtspr SPRN_DSCR, r5
825END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
826 708
827BEGIN_FTR_SECTION 709BEGIN_FTR_SECTION
828 /* Skip next section on POWER7 or PPC970 */ 710 /* Skip next section on POWER7 */
829 b 8f 711 b 8f
830END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 712END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
831 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ 713 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
@@ -901,7 +783,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
901 mtspr SPRN_DAR, r5 783 mtspr SPRN_DAR, r5
902 mtspr SPRN_DSISR, r6 784 mtspr SPRN_DSISR, r6
903 785
904BEGIN_FTR_SECTION
905 /* Restore AMR and UAMOR, set AMOR to all 1s */ 786 /* Restore AMR and UAMOR, set AMOR to all 1s */
906 ld r5,VCPU_AMR(r4) 787 ld r5,VCPU_AMR(r4)
907 ld r6,VCPU_UAMOR(r4) 788 ld r6,VCPU_UAMOR(r4)
@@ -909,7 +790,6 @@ BEGIN_FTR_SECTION
909 mtspr SPRN_AMR,r5 790 mtspr SPRN_AMR,r5
910 mtspr SPRN_UAMOR,r6 791 mtspr SPRN_UAMOR,r6
911 mtspr SPRN_AMOR,r7 792 mtspr SPRN_AMOR,r7
912END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
913 793
914 /* Restore state of CTRL run bit; assume 1 on entry */ 794 /* Restore state of CTRL run bit; assume 1 on entry */
915 lwz r5,VCPU_CTRL(r4) 795 lwz r5,VCPU_CTRL(r4)
@@ -944,13 +824,11 @@ deliver_guest_interrupt:
944 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 824 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
945 cmpdi cr1, r0, 0 825 cmpdi cr1, r0, 0
946 andi. r8, r11, MSR_EE 826 andi. r8, r11, MSR_EE
947BEGIN_FTR_SECTION
948 mfspr r8, SPRN_LPCR 827 mfspr r8, SPRN_LPCR
949 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ 828 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
950 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH 829 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
951 mtspr SPRN_LPCR, r8 830 mtspr SPRN_LPCR, r8
952 isync 831 isync
953END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
954 beq 5f 832 beq 5f
955 li r0, BOOK3S_INTERRUPT_EXTERNAL 833 li r0, BOOK3S_INTERRUPT_EXTERNAL
956 bne cr1, 12f 834 bne cr1, 12f
@@ -1108,11 +986,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1108 /* Save HEIR (HV emulation assist reg) in last_inst 986 /* Save HEIR (HV emulation assist reg) in last_inst
1109 if this is an HEI (HV emulation interrupt, e40) */ 987 if this is an HEI (HV emulation interrupt, e40) */
1110 li r3,KVM_INST_FETCH_FAILED 988 li r3,KVM_INST_FETCH_FAILED
1111BEGIN_FTR_SECTION
1112 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 989 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1113 bne 11f 990 bne 11f
1114 mfspr r3,SPRN_HEIR 991 mfspr r3,SPRN_HEIR
1115END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
111611: stw r3,VCPU_LAST_INST(r9) 99211: stw r3,VCPU_LAST_INST(r9)
1117 993
1118 /* these are volatile across C function calls */ 994 /* these are volatile across C function calls */
@@ -1121,13 +997,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1121 std r3, VCPU_CTR(r9) 997 std r3, VCPU_CTR(r9)
1122 stw r4, VCPU_XER(r9) 998 stw r4, VCPU_XER(r9)
1123 999
1124BEGIN_FTR_SECTION
1125 /* If this is a page table miss then see if it's theirs or ours */ 1000 /* If this is a page table miss then see if it's theirs or ours */
1126 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1001 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1127 beq kvmppc_hdsi 1002 beq kvmppc_hdsi
1128 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1003 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1129 beq kvmppc_hisi 1004 beq kvmppc_hisi
1130END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1131 1005
1132 /* See if this is a leftover HDEC interrupt */ 1006 /* See if this is a leftover HDEC interrupt */
1133 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1007 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
@@ -1140,11 +1014,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1140 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 1014 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1141 beq hcall_try_real_mode 1015 beq hcall_try_real_mode
1142 1016
1143 /* Only handle external interrupts here on arch 206 and later */
1144BEGIN_FTR_SECTION
1145 b ext_interrupt_to_host
1146END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1147
1148 /* External interrupt ? */ 1017 /* External interrupt ? */
1149 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1018 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1150 bne+ ext_interrupt_to_host 1019 bne+ ext_interrupt_to_host
@@ -1174,11 +1043,9 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1174 mfdsisr r7 1043 mfdsisr r7
1175 std r6, VCPU_DAR(r9) 1044 std r6, VCPU_DAR(r9)
1176 stw r7, VCPU_DSISR(r9) 1045 stw r7, VCPU_DSISR(r9)
1177BEGIN_FTR_SECTION
1178 /* don't overwrite fault_dar/fault_dsisr if HDSI */ 1046 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1179 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 1047 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1180 beq 6f 1048 beq 6f
1181END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1182 std r6, VCPU_FAULT_DAR(r9) 1049 std r6, VCPU_FAULT_DAR(r9)
1183 stw r7, VCPU_FAULT_DSISR(r9) 1050 stw r7, VCPU_FAULT_DSISR(r9)
1184 1051
@@ -1217,7 +1084,6 @@ mc_cont:
1217 /* 1084 /*
1218 * Save the guest PURR/SPURR 1085 * Save the guest PURR/SPURR
1219 */ 1086 */
1220BEGIN_FTR_SECTION
1221 mfspr r5,SPRN_PURR 1087 mfspr r5,SPRN_PURR
1222 mfspr r6,SPRN_SPURR 1088 mfspr r6,SPRN_SPURR
1223 ld r7,VCPU_PURR(r9) 1089 ld r7,VCPU_PURR(r9)
@@ -1237,7 +1103,6 @@ BEGIN_FTR_SECTION
1237 add r4,r4,r6 1103 add r4,r4,r6
1238 mtspr SPRN_PURR,r3 1104 mtspr SPRN_PURR,r3
1239 mtspr SPRN_SPURR,r4 1105 mtspr SPRN_SPURR,r4
1240END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
1241 1106
1242 /* Save DEC */ 1107 /* Save DEC */
1243 mfspr r5,SPRN_DEC 1108 mfspr r5,SPRN_DEC
@@ -1287,22 +1152,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
12878: 11528:
1288 1153
1289 /* Save and reset AMR and UAMOR before turning on the MMU */ 1154 /* Save and reset AMR and UAMOR before turning on the MMU */
1290BEGIN_FTR_SECTION
1291 mfspr r5,SPRN_AMR 1155 mfspr r5,SPRN_AMR
1292 mfspr r6,SPRN_UAMOR 1156 mfspr r6,SPRN_UAMOR
1293 std r5,VCPU_AMR(r9) 1157 std r5,VCPU_AMR(r9)
1294 std r6,VCPU_UAMOR(r9) 1158 std r6,VCPU_UAMOR(r9)
1295 li r6,0 1159 li r6,0
1296 mtspr SPRN_AMR,r6 1160 mtspr SPRN_AMR,r6
1297END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1298 1161
1299 /* Switch DSCR back to host value */ 1162 /* Switch DSCR back to host value */
1300BEGIN_FTR_SECTION
1301 mfspr r8, SPRN_DSCR 1163 mfspr r8, SPRN_DSCR
1302 ld r7, HSTATE_DSCR(r13) 1164 ld r7, HSTATE_DSCR(r13)
1303 std r8, VCPU_DSCR(r9) 1165 std r8, VCPU_DSCR(r9)
1304 mtspr SPRN_DSCR, r7 1166 mtspr SPRN_DSCR, r7
1305END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1306 1167
1307 /* Save non-volatile GPRs */ 1168 /* Save non-volatile GPRs */
1308 std r14, VCPU_GPR(R14)(r9) 1169 std r14, VCPU_GPR(R14)(r9)
@@ -1484,11 +1345,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1484 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 1345 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1485 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 1346 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1486 mfspr r6, SPRN_MMCRA 1347 mfspr r6, SPRN_MMCRA
1487BEGIN_FTR_SECTION 1348 /* Clear MMCRA in order to disable SDAR updates */
1488 /* On P7, clear MMCRA in order to disable SDAR updates */
1489 li r7, 0 1349 li r7, 0
1490 mtspr SPRN_MMCRA, r7 1350 mtspr SPRN_MMCRA, r7
1491END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1492 isync 1351 isync
1493 beq 21f /* if no VPA, save PMU stuff anyway */ 1352 beq 21f /* if no VPA, save PMU stuff anyway */
1494 lbz r7, LPPACA_PMCINUSE(r8) 1353 lbz r7, LPPACA_PMCINUSE(r8)
@@ -1513,10 +1372,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1513 mfspr r6, SPRN_PMC4 1372 mfspr r6, SPRN_PMC4
1514 mfspr r7, SPRN_PMC5 1373 mfspr r7, SPRN_PMC5
1515 mfspr r8, SPRN_PMC6 1374 mfspr r8, SPRN_PMC6
1516BEGIN_FTR_SECTION
1517 mfspr r10, SPRN_PMC7
1518 mfspr r11, SPRN_PMC8
1519END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1520 stw r3, VCPU_PMC(r9) 1375 stw r3, VCPU_PMC(r9)
1521 stw r4, VCPU_PMC + 4(r9) 1376 stw r4, VCPU_PMC + 4(r9)
1522 stw r5, VCPU_PMC + 8(r9) 1377 stw r5, VCPU_PMC + 8(r9)
@@ -1524,10 +1379,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1524 stw r7, VCPU_PMC + 16(r9) 1379 stw r7, VCPU_PMC + 16(r9)
1525 stw r8, VCPU_PMC + 20(r9) 1380 stw r8, VCPU_PMC + 20(r9)
1526BEGIN_FTR_SECTION 1381BEGIN_FTR_SECTION
1527 stw r10, VCPU_PMC + 24(r9)
1528 stw r11, VCPU_PMC + 28(r9)
1529END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1530BEGIN_FTR_SECTION
1531 mfspr r5, SPRN_SIER 1382 mfspr r5, SPRN_SIER
1532 mfspr r6, SPRN_SPMC1 1383 mfspr r6, SPRN_SPMC1
1533 mfspr r7, SPRN_SPMC2 1384 mfspr r7, SPRN_SPMC2
@@ -1547,11 +1398,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1547 ptesync 1398 ptesync
1548 1399
1549hdec_soon: /* r12 = trap, r13 = paca */ 1400hdec_soon: /* r12 = trap, r13 = paca */
1550BEGIN_FTR_SECTION
1551 b 32f
1552END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1553 /* 1401 /*
1554 * POWER7 guest -> host partition switch code. 1402 * POWER7/POWER8 guest -> host partition switch code.
1555 * We don't have to lock against tlbies but we do 1403 * We don't have to lock against tlbies but we do
1556 * have to coordinate the hardware threads. 1404 * have to coordinate the hardware threads.
1557 */ 1405 */
@@ -1679,87 +1527,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
167916: ld r8,KVM_HOST_LPCR(r4) 152716: ld r8,KVM_HOST_LPCR(r4)
1680 mtspr SPRN_LPCR,r8 1528 mtspr SPRN_LPCR,r8
1681 isync 1529 isync
1682 b 33f
1683
1684 /*
1685 * PPC970 guest -> host partition switch code.
1686 * We have to lock against concurrent tlbies, and
1687 * we have to flush the whole TLB.
1688 */
168932: ld r5,HSTATE_KVM_VCORE(r13)
1690 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1691
1692 /* Take the guest's tlbie_lock */
1693#ifdef __BIG_ENDIAN__
1694 lwz r8,PACA_LOCK_TOKEN(r13)
1695#else
1696 lwz r8,PACAPACAINDEX(r13)
1697#endif
1698 addi r3,r4,KVM_TLBIE_LOCK
169924: lwarx r0,0,r3
1700 cmpwi r0,0
1701 bne 24b
1702 stwcx. r8,0,r3
1703 bne 24b
1704 isync
1705
1706 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
1707 li r0,0x18f
1708 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
1709 or r0,r7,r0
1710 ptesync
1711 sync
1712 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
1713 isync
1714 li r0,0
1715 stw r0,0(r3) /* drop guest tlbie_lock */
1716
1717 /* invalidate the whole TLB */
1718 li r0,256
1719 mtctr r0
1720 li r6,0
172125: tlbiel r6
1722 addi r6,r6,0x1000
1723 bdnz 25b
1724 ptesync
1725
1726 /* take native_tlbie_lock */
1727 ld r3,toc_tlbie_lock@toc(2)
172824: lwarx r0,0,r3
1729 cmpwi r0,0
1730 bne 24b
1731 stwcx. r8,0,r3
1732 bne 24b
1733 isync
1734
1735 ld r6,KVM_HOST_SDR1(r4)
1736 mtspr SPRN_SDR1,r6 /* switch to host page table */
1737
1738 /* Set up host HID4 value */
1739 sync
1740 mtspr SPRN_HID4,r7
1741 isync
1742 li r0,0
1743 stw r0,0(r3) /* drop native_tlbie_lock */
1744
1745 lis r8,0x7fff /* MAX_INT@h */
1746 mtspr SPRN_HDEC,r8
1747
1748 /* Disable HDEC interrupts */
1749 mfspr r0,SPRN_HID0
1750 li r3,0
1751 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
1752 sync
1753 mtspr SPRN_HID0,r0
1754 mfspr r0,SPRN_HID0
1755 mfspr r0,SPRN_HID0
1756 mfspr r0,SPRN_HID0
1757 mfspr r0,SPRN_HID0
1758 mfspr r0,SPRN_HID0
1759 mfspr r0,SPRN_HID0
1760 1530
1761 /* load host SLB entries */ 1531 /* load host SLB entries */
176233: ld r8,PACA_SLBSHADOWPTR(r13) 1532 ld r8,PACA_SLBSHADOWPTR(r13)
1763 1533
1764 .rept SLB_NUM_BOLTED 1534 .rept SLB_NUM_BOLTED
1765 li r3, SLBSHADOW_SAVEAREA 1535 li r3, SLBSHADOW_SAVEAREA
@@ -2107,9 +1877,6 @@ _GLOBAL(kvmppc_h_cede)
2107 stw r0,VCPU_TRAP(r3) 1877 stw r0,VCPU_TRAP(r3)
2108 li r0,H_SUCCESS 1878 li r0,H_SUCCESS
2109 std r0,VCPU_GPR(R3)(r3) 1879 std r0,VCPU_GPR(R3)(r3)
2110BEGIN_FTR_SECTION
2111 b kvm_cede_exit /* just send it up to host on 970 */
2112END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
2113 1880
2114 /* 1881 /*
2115 * Set our bit in the bitmask of napping threads unless all the 1882 * Set our bit in the bitmask of napping threads unless all the
@@ -2435,7 +2202,6 @@ BEGIN_FTR_SECTION
2435END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2202END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2436#endif 2203#endif
2437 mtmsrd r8 2204 mtmsrd r8
2438 isync
2439 addi r3,r3,VCPU_FPRS 2205 addi r3,r3,VCPU_FPRS
2440 bl store_fp_state 2206 bl store_fp_state
2441#ifdef CONFIG_ALTIVEC 2207#ifdef CONFIG_ALTIVEC
@@ -2471,7 +2237,6 @@ BEGIN_FTR_SECTION
2471END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2237END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2472#endif 2238#endif
2473 mtmsrd r8 2239 mtmsrd r8
2474 isync
2475 addi r3,r4,VCPU_FPRS 2240 addi r3,r4,VCPU_FPRS
2476 bl load_fp_state 2241 bl load_fp_state
2477#ifdef CONFIG_ALTIVEC 2242#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index c1f8f53cd312..c45eaab752b0 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -527,18 +527,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
527 r = 0; 527 r = 0;
528 break; 528 break;
529 case KVM_CAP_PPC_RMA: 529 case KVM_CAP_PPC_RMA:
530 r = hv_enabled; 530 r = 0;
531 /* PPC970 requires an RMA */
532 if (r && cpu_has_feature(CPU_FTR_ARCH_201))
533 r = 2;
534 break; 531 break;
535#endif 532#endif
536 case KVM_CAP_SYNC_MMU: 533 case KVM_CAP_SYNC_MMU:
537#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 534#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
538 if (hv_enabled) 535 r = hv_enabled;
539 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
540 else
541 r = 0;
542#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) 536#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
543 r = 1; 537 r = 1;
544#else 538#else