diff options
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_64.h | 69 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_radix.c | 44 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_nested.c | 138 |
5 files changed, 240 insertions, 15 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 63f7ccfac174..d7aeb6f701a6 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -196,6 +196,9 @@ extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
196 | int table_index, u64 *pte_ret_p); | 196 | int table_index, u64 *pte_ret_p); |
197 | extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | 197 | extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
198 | struct kvmppc_pte *gpte, bool data, bool iswrite); | 198 | struct kvmppc_pte *gpte, bool data, bool iswrite); |
199 | extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, | ||
200 | unsigned int shift, struct kvm_memory_slot *memslot, | ||
201 | unsigned int lpid); | ||
199 | extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, | 202 | extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, |
200 | bool writing, unsigned long gpa, | 203 | bool writing, unsigned long gpa, |
201 | unsigned int lpid); | 204 | unsigned int lpid); |
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 5496152f70e1..c2a9146ee016 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h | |||
@@ -53,6 +53,66 @@ struct kvm_nested_guest { | |||
53 | struct kvm_nested_guest *next; | 53 | struct kvm_nested_guest *next; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | /* | ||
57 | * We define a nested rmap entry as a single 64-bit quantity | ||
58 | * 0xFFF0000000000000 12-bit lpid field | ||
59 | * 0x000FFFFFFFFFF000 40-bit guest 4k page frame number | ||
60 | * 0x0000000000000001 1-bit single entry flag | ||
61 | */ | ||
62 | #define RMAP_NESTED_LPID_MASK 0xFFF0000000000000UL | ||
63 | #define RMAP_NESTED_LPID_SHIFT (52) | ||
64 | #define RMAP_NESTED_GPA_MASK 0x000FFFFFFFFFF000UL | ||
65 | #define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL | ||
66 | |||
67 | /* Structure for a nested guest rmap entry */ | ||
68 | struct rmap_nested { | ||
69 | struct llist_node list; | ||
70 | u64 rmap; | ||
71 | }; | ||
72 | |||
73 | /* | ||
74 | * for_each_nest_rmap_safe - iterate over the list of nested rmap entries | ||
75 | * safe against removal of the list entry or NULL list | ||
76 | * @pos: a (struct rmap_nested *) to use as a loop cursor | ||
77 | * @node: pointer to the first entry | ||
78 | * NOTE: this can be NULL | ||
79 | * @rmapp: an (unsigned long *) in which to return the rmap entries on each | ||
80 | * iteration | ||
81 | * NOTE: this must point to already allocated memory | ||
82 | * | ||
83 | * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the | ||
84 | * rmap entry in the memslot. The list is always terminated by a "single entry" | ||
85 | * stored in the list element of the final entry of the llist. If there is ONLY | ||
86 | * a single entry then this is itself in the rmap entry of the memslot, not a | ||
87 | * llist head pointer. | ||
88 | * | ||
89 | * Note that the iterator below assumes that a nested rmap entry is always | ||
90 | * non-zero. This is true for our usage because the LPID field is always | ||
91 | * non-zero (zero is reserved for the host). | ||
92 | * | ||
93 | * This should be used to iterate over the list of rmap_nested entries with | ||
94 | * processing done on the u64 rmap value given by each iteration. This is safe | ||
95 | * against removal of list entries and it is always safe to call free on (pos). | ||
96 | * | ||
97 | * e.g. | ||
98 | * struct rmap_nested *cursor; | ||
99 | * struct llist_node *first; | ||
100 | * unsigned long rmap; | ||
101 | * for_each_nest_rmap_safe(cursor, first, &rmap) { | ||
102 | * do_something(rmap); | ||
103 | * free(cursor); | ||
104 | * } | ||
105 | */ | ||
106 | #define for_each_nest_rmap_safe(pos, node, rmapp) \ | ||
107 | for ((pos) = llist_entry((node), typeof(*(pos)), list); \ | ||
108 | (node) && \ | ||
109 | (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \ | ||
110 | ((u64) (node)) : ((pos)->rmap))) && \ | ||
111 | (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \ | ||
112 | ((struct llist_node *) ((pos) = NULL)) : \ | ||
113 | (pos)->list.next)), true); \ | ||
114 | (pos) = llist_entry((node), typeof(*(pos)), list)) | ||
115 | |||
56 | struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, | 116 | struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, |
57 | bool create); | 117 | bool create); |
58 | void kvmhv_put_nested(struct kvm_nested_guest *gp); | 118 | void kvmhv_put_nested(struct kvm_nested_guest *gp); |
@@ -551,7 +611,14 @@ static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) | |||
551 | 611 | ||
552 | extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, | 612 | extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, |
553 | unsigned long gpa, unsigned int level, | 613 | unsigned long gpa, unsigned int level, |
554 | unsigned long mmu_seq, unsigned int lpid); | 614 | unsigned long mmu_seq, unsigned int lpid, |
615 | unsigned long *rmapp, struct rmap_nested **n_rmap); | ||
616 | extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp, | ||
617 | struct rmap_nested **n_rmap); | ||
618 | extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm, | ||
619 | struct kvm_memory_slot *memslot, | ||
620 | unsigned long gpa, unsigned long hpa, | ||
621 | unsigned long nbytes); | ||
555 | 622 | ||
556 | #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ | 623 | #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ |
557 | 624 | ||
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index c4b1a9e1e3ff..4c1eccb20190 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c | |||
@@ -256,27 +256,38 @@ static void kvmppc_pmd_free(pmd_t *pmdp) | |||
256 | kmem_cache_free(kvm_pmd_cache, pmdp); | 256 | kmem_cache_free(kvm_pmd_cache, pmdp); |
257 | } | 257 | } |
258 | 258 | ||
259 | void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, | 259 | /* Called with kvm->mmu_lock held */ |
260 | unsigned long gpa, unsigned int shift, | 260 | void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, |
261 | struct kvm_memory_slot *memslot, | 261 | unsigned int shift, struct kvm_memory_slot *memslot, |
262 | unsigned int lpid) | 262 | unsigned int lpid) |
263 | 263 | ||
264 | { | 264 | { |
265 | unsigned long old; | 265 | unsigned long old; |
266 | unsigned long gfn = gpa >> PAGE_SHIFT; | ||
267 | unsigned long page_size = PAGE_SIZE; | ||
268 | unsigned long hpa; | ||
266 | 269 | ||
267 | old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); | 270 | old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); |
268 | kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); | 271 | kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); |
269 | if ((old & _PAGE_DIRTY) && (lpid == kvm->arch.lpid)) { | ||
270 | unsigned long gfn = gpa >> PAGE_SHIFT; | ||
271 | unsigned long page_size = PAGE_SIZE; | ||
272 | 272 | ||
273 | if (shift) | 273 | /* The following only applies to L1 entries */ |
274 | page_size = 1ul << shift; | 274 | if (lpid != kvm->arch.lpid) |
275 | return; | ||
276 | |||
277 | if (!memslot) { | ||
278 | memslot = gfn_to_memslot(kvm, gfn); | ||
275 | if (!memslot) | 279 | if (!memslot) |
276 | memslot = gfn_to_memslot(kvm, gfn); | 280 | return; |
277 | if (memslot && memslot->dirty_bitmap) | ||
278 | kvmppc_update_dirty_map(memslot, gfn, page_size); | ||
279 | } | 281 | } |
282 | if (shift) | ||
283 | page_size = 1ul << shift; | ||
284 | |||
285 | gpa &= ~(page_size - 1); | ||
286 | hpa = old & PTE_RPN_MASK; | ||
287 | kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size); | ||
288 | |||
289 | if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) | ||
290 | kvmppc_update_dirty_map(memslot, gfn, page_size); | ||
280 | } | 291 | } |
281 | 292 | ||
282 | /* | 293 | /* |
@@ -430,7 +441,8 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, | |||
430 | 441 | ||
431 | int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, | 442 | int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, |
432 | unsigned long gpa, unsigned int level, | 443 | unsigned long gpa, unsigned int level, |
433 | unsigned long mmu_seq, unsigned int lpid) | 444 | unsigned long mmu_seq, unsigned int lpid, |
445 | unsigned long *rmapp, struct rmap_nested **n_rmap) | ||
434 | { | 446 | { |
435 | pgd_t *pgd; | 447 | pgd_t *pgd; |
436 | pud_t *pud, *new_pud = NULL; | 448 | pud_t *pud, *new_pud = NULL; |
@@ -509,6 +521,8 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, | |||
509 | kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid); | 521 | kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid); |
510 | } | 522 | } |
511 | kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte); | 523 | kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte); |
524 | if (rmapp && n_rmap) | ||
525 | kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); | ||
512 | ret = 0; | 526 | ret = 0; |
513 | goto out_unlock; | 527 | goto out_unlock; |
514 | } | 528 | } |
@@ -559,6 +573,8 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, | |||
559 | kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid); | 573 | kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid); |
560 | } | 574 | } |
561 | kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); | 575 | kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); |
576 | if (rmapp && n_rmap) | ||
577 | kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); | ||
562 | ret = 0; | 578 | ret = 0; |
563 | goto out_unlock; | 579 | goto out_unlock; |
564 | } | 580 | } |
@@ -583,6 +599,8 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, | |||
583 | goto out_unlock; | 599 | goto out_unlock; |
584 | } | 600 | } |
585 | kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte); | 601 | kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte); |
602 | if (rmapp && n_rmap) | ||
603 | kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); | ||
586 | ret = 0; | 604 | ret = 0; |
587 | 605 | ||
588 | out_unlock: | 606 | out_unlock: |
@@ -710,7 +728,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, | |||
710 | 728 | ||
711 | /* Allocate space in the tree and write the PTE */ | 729 | /* Allocate space in the tree and write the PTE */ |
712 | ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level, | 730 | ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level, |
713 | mmu_seq, kvm->arch.lpid); | 731 | mmu_seq, kvm->arch.lpid, NULL, NULL); |
714 | if (inserted_pte) | 732 | if (inserted_pte) |
715 | *inserted_pte = pte; | 733 | *inserted_pte = pte; |
716 | if (levelp) | 734 | if (levelp) |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index a87912508f63..6e69b4de9a9a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -4486,6 +4486,7 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm) | |||
4486 | kvmppc_free_hpt(&kvm->arch.hpt); | 4486 | kvmppc_free_hpt(&kvm->arch.hpt); |
4487 | kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR, | 4487 | kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR, |
4488 | LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); | 4488 | LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); |
4489 | kvmppc_rmap_reset(kvm); | ||
4489 | kvm->arch.radix = 1; | 4490 | kvm->arch.radix = 1; |
4490 | return 0; | 4491 | return 0; |
4491 | } | 4492 | } |
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 21a210c134af..3fa676b2acd9 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/kvm_host.h> | 12 | #include <linux/kvm_host.h> |
13 | #include <linux/llist.h> | ||
13 | 14 | ||
14 | #include <asm/kvm_ppc.h> | 15 | #include <asm/kvm_ppc.h> |
15 | #include <asm/kvm_book3s.h> | 16 | #include <asm/kvm_book3s.h> |
@@ -22,6 +23,7 @@ | |||
22 | static struct patb_entry *pseries_partition_tb; | 23 | static struct patb_entry *pseries_partition_tb; |
23 | 24 | ||
24 | static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp); | 25 | static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp); |
26 | static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free); | ||
25 | 27 | ||
26 | void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) | 28 | void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) |
27 | { | 29 | { |
@@ -456,6 +458,8 @@ void kvmhv_release_all_nested(struct kvm *kvm) | |||
456 | int i; | 458 | int i; |
457 | struct kvm_nested_guest *gp; | 459 | struct kvm_nested_guest *gp; |
458 | struct kvm_nested_guest *freelist = NULL; | 460 | struct kvm_nested_guest *freelist = NULL; |
461 | struct kvm_memory_slot *memslot; | ||
462 | int srcu_idx; | ||
459 | 463 | ||
460 | spin_lock(&kvm->mmu_lock); | 464 | spin_lock(&kvm->mmu_lock); |
461 | for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { | 465 | for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { |
@@ -474,6 +478,11 @@ void kvmhv_release_all_nested(struct kvm *kvm) | |||
474 | freelist = gp->next; | 478 | freelist = gp->next; |
475 | kvmhv_release_nested(gp); | 479 | kvmhv_release_nested(gp); |
476 | } | 480 | } |
481 | |||
482 | srcu_idx = srcu_read_lock(&kvm->srcu); | ||
483 | kvm_for_each_memslot(memslot, kvm_memslots(kvm)) | ||
484 | kvmhv_free_memslot_nest_rmap(memslot); | ||
485 | srcu_read_unlock(&kvm->srcu, srcu_idx); | ||
477 | } | 486 | } |
478 | 487 | ||
479 | /* caller must hold gp->tlb_lock */ | 488 | /* caller must hold gp->tlb_lock */ |
@@ -544,6 +553,123 @@ void kvmhv_put_nested(struct kvm_nested_guest *gp) | |||
544 | kvmhv_release_nested(gp); | 553 | kvmhv_release_nested(gp); |
545 | } | 554 | } |
546 | 555 | ||
556 | static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid) | ||
557 | { | ||
558 | if (lpid > kvm->arch.max_nested_lpid) | ||
559 | return NULL; | ||
560 | return kvm->arch.nested_guests[lpid]; | ||
561 | } | ||
562 | |||
563 | static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2) | ||
564 | { | ||
565 | return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK | | ||
566 | RMAP_NESTED_GPA_MASK)); | ||
567 | } | ||
568 | |||
569 | void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp, | ||
570 | struct rmap_nested **n_rmap) | ||
571 | { | ||
572 | struct llist_node *entry = ((struct llist_head *) rmapp)->first; | ||
573 | struct rmap_nested *cursor; | ||
574 | u64 rmap, new_rmap = (*n_rmap)->rmap; | ||
575 | |||
576 | /* Are there any existing entries? */ | ||
577 | if (!(*rmapp)) { | ||
578 | /* No -> use the rmap as a single entry */ | ||
579 | *rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY; | ||
580 | return; | ||
581 | } | ||
582 | |||
583 | /* Do any entries match what we're trying to insert? */ | ||
584 | for_each_nest_rmap_safe(cursor, entry, &rmap) { | ||
585 | if (kvmhv_n_rmap_is_equal(rmap, new_rmap)) | ||
586 | return; | ||
587 | } | ||
588 | |||
589 | /* Do we need to create a list or just add the new entry? */ | ||
590 | rmap = *rmapp; | ||
591 | if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */ | ||
592 | *rmapp = 0UL; | ||
593 | llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp); | ||
594 | if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */ | ||
595 | (*n_rmap)->list.next = (struct llist_node *) rmap; | ||
596 | |||
597 | /* Set NULL so not freed by caller */ | ||
598 | *n_rmap = NULL; | ||
599 | } | ||
600 | |||
601 | static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap, | ||
602 | unsigned long hpa, unsigned long mask) | ||
603 | { | ||
604 | struct kvm_nested_guest *gp; | ||
605 | unsigned long gpa; | ||
606 | unsigned int shift, lpid; | ||
607 | pte_t *ptep; | ||
608 | |||
609 | gpa = n_rmap & RMAP_NESTED_GPA_MASK; | ||
610 | lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT; | ||
611 | gp = kvmhv_find_nested(kvm, lpid); | ||
612 | if (!gp) | ||
613 | return; | ||
614 | |||
615 | /* Find and invalidate the pte */ | ||
616 | ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift); | ||
617 | /* Don't spuriously invalidate ptes if the pfn has changed */ | ||
618 | if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) | ||
619 | kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); | ||
620 | } | ||
621 | |||
622 | static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp, | ||
623 | unsigned long hpa, unsigned long mask) | ||
624 | { | ||
625 | struct llist_node *entry = llist_del_all((struct llist_head *) rmapp); | ||
626 | struct rmap_nested *cursor; | ||
627 | unsigned long rmap; | ||
628 | |||
629 | for_each_nest_rmap_safe(cursor, entry, &rmap) { | ||
630 | kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask); | ||
631 | kfree(cursor); | ||
632 | } | ||
633 | } | ||
634 | |||
635 | /* called with kvm->mmu_lock held */ | ||
636 | void kvmhv_remove_nest_rmap_range(struct kvm *kvm, | ||
637 | struct kvm_memory_slot *memslot, | ||
638 | unsigned long gpa, unsigned long hpa, | ||
639 | unsigned long nbytes) | ||
640 | { | ||
641 | unsigned long gfn, end_gfn; | ||
642 | unsigned long addr_mask; | ||
643 | |||
644 | if (!memslot) | ||
645 | return; | ||
646 | gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; | ||
647 | end_gfn = gfn + (nbytes >> PAGE_SHIFT); | ||
648 | |||
649 | addr_mask = PTE_RPN_MASK & ~(nbytes - 1); | ||
650 | hpa &= addr_mask; | ||
651 | |||
652 | for (; gfn < end_gfn; gfn++) { | ||
653 | unsigned long *rmap = &memslot->arch.rmap[gfn]; | ||
654 | kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask); | ||
655 | } | ||
656 | } | ||
657 | |||
658 | static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free) | ||
659 | { | ||
660 | unsigned long page; | ||
661 | |||
662 | for (page = 0; page < free->npages; page++) { | ||
663 | unsigned long rmap, *rmapp = &free->arch.rmap[page]; | ||
664 | struct rmap_nested *cursor; | ||
665 | struct llist_node *entry; | ||
666 | |||
667 | entry = llist_del_all((struct llist_head *) rmapp); | ||
668 | for_each_nest_rmap_safe(cursor, entry, &rmap) | ||
669 | kfree(cursor); | ||
670 | } | ||
671 | } | ||
672 | |||
547 | static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu, | 673 | static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu, |
548 | struct kvm_nested_guest *gp, | 674 | struct kvm_nested_guest *gp, |
549 | long gpa, int *shift_ret) | 675 | long gpa, int *shift_ret) |
@@ -695,11 +821,13 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, | |||
695 | { | 821 | { |
696 | struct kvm *kvm = vcpu->kvm; | 822 | struct kvm *kvm = vcpu->kvm; |
697 | struct kvm_memory_slot *memslot; | 823 | struct kvm_memory_slot *memslot; |
824 | struct rmap_nested *n_rmap; | ||
698 | struct kvmppc_pte gpte; | 825 | struct kvmppc_pte gpte; |
699 | pte_t pte, *pte_p; | 826 | pte_t pte, *pte_p; |
700 | unsigned long mmu_seq; | 827 | unsigned long mmu_seq; |
701 | unsigned long dsisr = vcpu->arch.fault_dsisr; | 828 | unsigned long dsisr = vcpu->arch.fault_dsisr; |
702 | unsigned long ea = vcpu->arch.fault_dar; | 829 | unsigned long ea = vcpu->arch.fault_dar; |
830 | unsigned long *rmapp; | ||
703 | unsigned long n_gpa, gpa, gfn, perm = 0UL; | 831 | unsigned long n_gpa, gpa, gfn, perm = 0UL; |
704 | unsigned int shift, l1_shift, level; | 832 | unsigned int shift, l1_shift, level; |
705 | bool writing = !!(dsisr & DSISR_ISSTORE); | 833 | bool writing = !!(dsisr & DSISR_ISSTORE); |
@@ -833,8 +961,16 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, | |||
833 | 961 | ||
834 | /* 4. Insert the pte into our shadow_pgtable */ | 962 | /* 4. Insert the pte into our shadow_pgtable */ |
835 | 963 | ||
964 | n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL); | ||
965 | if (!n_rmap) | ||
966 | return RESUME_GUEST; /* Let the guest try again */ | ||
967 | n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) | | ||
968 | (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT); | ||
969 | rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; | ||
836 | ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level, | 970 | ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level, |
837 | mmu_seq, gp->shadow_lpid); | 971 | mmu_seq, gp->shadow_lpid, rmapp, &n_rmap); |
972 | if (n_rmap) | ||
973 | kfree(n_rmap); | ||
838 | if (ret == -EAGAIN) | 974 | if (ret == -EAGAIN) |
839 | ret = RESUME_GUEST; /* Let the guest try again */ | 975 | ret = RESUME_GUEST; /* Let the guest try again */ |
840 | 976 | ||