aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h9
-rw-r--r--arch/powerpc/include/asm/kvm_host.h17
-rw-r--r--arch/powerpc/kvm/Makefile2
-rw-r--r--arch/powerpc/kvm/book3s.c14
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c104
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c98
6 files changed, 54 insertions, 190 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 4e995593e479..8274a2d43925 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -115,6 +115,15 @@ extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
115extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 115extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
116extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 116extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
117extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 117extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
118
119extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
120extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
121extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
122extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
123extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
124extern int kvmppc_mmu_hpte_sysinit(void);
125extern void kvmppc_mmu_hpte_sysexit(void);
126
118extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); 127extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
119extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); 128extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
120extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); 129extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 0c9ad869decd..e004eafcd3f0 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -38,7 +38,13 @@
38#define KVM_NR_PAGE_SIZES 1 38#define KVM_NR_PAGE_SIZES 1
39#define KVM_PAGES_PER_HPAGE(x) (1UL<<31) 39#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
40 40
41#define HPTEG_CACHE_NUM 1024 41#define HPTEG_CACHE_NUM (1 << 15)
42#define HPTEG_HASH_BITS_PTE 13
43#define HPTEG_HASH_BITS_VPTE 13
44#define HPTEG_HASH_BITS_VPTE_LONG 5
45#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
46#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
47#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
42 48
43struct kvm; 49struct kvm;
44struct kvm_run; 50struct kvm_run;
@@ -151,6 +157,9 @@ struct kvmppc_mmu {
151}; 157};
152 158
153struct hpte_cache { 159struct hpte_cache {
160 struct hlist_node list_pte;
161 struct hlist_node list_vpte;
162 struct hlist_node list_vpte_long;
154 u64 host_va; 163 u64 host_va;
155 u64 pfn; 164 u64 pfn;
156 ulong slot; 165 ulong slot;
@@ -282,8 +291,10 @@ struct kvm_vcpu_arch {
282 unsigned long pending_exceptions; 291 unsigned long pending_exceptions;
283 292
284#ifdef CONFIG_PPC_BOOK3S 293#ifdef CONFIG_PPC_BOOK3S
285 struct hpte_cache hpte_cache[HPTEG_CACHE_NUM]; 294 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
286 int hpte_cache_offset; 295 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
296 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
297 int hpte_cache_count;
287#endif 298#endif
288}; 299};
289 300
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index ff436066bf77..d45c818a384c 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -45,6 +45,7 @@ kvm-book3s_64-objs := \
45 book3s.o \ 45 book3s.o \
46 book3s_emulate.o \ 46 book3s_emulate.o \
47 book3s_interrupts.o \ 47 book3s_interrupts.o \
48 book3s_mmu_hpte.o \
48 book3s_64_mmu_host.o \ 49 book3s_64_mmu_host.o \
49 book3s_64_mmu.o \ 50 book3s_64_mmu.o \
50 book3s_32_mmu.o 51 book3s_32_mmu.o
@@ -57,6 +58,7 @@ kvm-book3s_32-objs := \
57 book3s.o \ 58 book3s.o \
58 book3s_emulate.o \ 59 book3s_emulate.o \
59 book3s_interrupts.o \ 60 book3s_interrupts.o \
61 book3s_mmu_hpte.o \
60 book3s_32_mmu_host.o \ 62 book3s_32_mmu_host.o \
61 book3s_32_mmu.o 63 book3s_32_mmu.o
62kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs) 64kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 801d9f3c70ae..a3cef30d1d42 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -1384,12 +1384,22 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1384 1384
1385static int kvmppc_book3s_init(void) 1385static int kvmppc_book3s_init(void)
1386{ 1386{
1387 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, 1387 int r;
1388 THIS_MODULE); 1388
1389 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
1390 THIS_MODULE);
1391
1392 if (r)
1393 return r;
1394
1395 r = kvmppc_mmu_hpte_sysinit();
1396
1397 return r;
1389} 1398}
1390 1399
1391static void kvmppc_book3s_exit(void) 1400static void kvmppc_book3s_exit(void)
1392{ 1401{
1402 kvmppc_mmu_hpte_sysexit();
1393 kvm_exit(); 1403 kvm_exit();
1394} 1404}
1395 1405
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 904f5ac78f54..0b51ef872c1e 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -58,105 +58,19 @@
58static ulong htab; 58static ulong htab;
59static u32 htabmask; 59static u32 htabmask;
60 60
61static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 61void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
62{ 62{
63 volatile u32 *pteg; 63 volatile u32 *pteg;
64 64
65 dprintk_mmu("KVM: Flushing SPTE: 0x%llx (0x%llx) -> 0x%llx\n", 65 /* Remove from host HTAB */
66 pte->pte.eaddr, pte->pte.vpage, pte->host_va);
67
68 pteg = (u32*)pte->slot; 66 pteg = (u32*)pte->slot;
69
70 pteg[0] = 0; 67 pteg[0] = 0;
68
69 /* And make sure it's gone from the TLB too */
71 asm volatile ("sync"); 70 asm volatile ("sync");
72 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); 71 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
73 asm volatile ("sync"); 72 asm volatile ("sync");
74 asm volatile ("tlbsync"); 73 asm volatile ("tlbsync");
75
76 pte->host_va = 0;
77
78 if (pte->pte.may_write)
79 kvm_release_pfn_dirty(pte->pfn);
80 else
81 kvm_release_pfn_clean(pte->pfn);
82}
83
84void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
85{
86 int i;
87
88 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n",
89 vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
90 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
91
92 guest_ea &= ea_mask;
93 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
94 struct hpte_cache *pte;
95
96 pte = &vcpu->arch.hpte_cache[i];
97 if (!pte->host_va)
98 continue;
99
100 if ((pte->pte.eaddr & ea_mask) == guest_ea) {
101 invalidate_pte(vcpu, pte);
102 }
103 }
104
105 /* Doing a complete flush -> start from scratch */
106 if (!ea_mask)
107 vcpu->arch.hpte_cache_offset = 0;
108}
109
110void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
111{
112 int i;
113
114 dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
115 vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
116 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
117
118 guest_vp &= vp_mask;
119 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
120 struct hpte_cache *pte;
121
122 pte = &vcpu->arch.hpte_cache[i];
123 if (!pte->host_va)
124 continue;
125
126 if ((pte->pte.vpage & vp_mask) == guest_vp) {
127 invalidate_pte(vcpu, pte);
128 }
129 }
130}
131
132void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
133{
134 int i;
135
136 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n",
137 vcpu->arch.hpte_cache_offset, pa_start, pa_end);
138 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
139
140 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
141 struct hpte_cache *pte;
142
143 pte = &vcpu->arch.hpte_cache[i];
144 if (!pte->host_va)
145 continue;
146
147 if ((pte->pte.raddr >= pa_start) &&
148 (pte->pte.raddr < pa_end)) {
149 invalidate_pte(vcpu, pte);
150 }
151 }
152}
153
154static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
155{
156 if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM)
157 kvmppc_mmu_pte_flush(vcpu, 0, 0);
158
159 return vcpu->arch.hpte_cache_offset++;
160} 74}
161 75
162/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using 76/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
@@ -230,7 +144,6 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
230 register int rr = 0; 144 register int rr = 0;
231 bool primary = false; 145 bool primary = false;
232 bool evict = false; 146 bool evict = false;
233 int hpte_id;
234 struct hpte_cache *pte; 147 struct hpte_cache *pte;
235 148
236 /* Get host physical address for gpa */ 149 /* Get host physical address for gpa */
@@ -315,8 +228,7 @@ next_pteg:
315 228
316 /* Now tell our Shadow PTE code about the new page */ 229 /* Now tell our Shadow PTE code about the new page */
317 230
318 hpte_id = kvmppc_mmu_hpte_cache_next(vcpu); 231 pte = kvmppc_mmu_hpte_cache_next(vcpu);
319 pte = &vcpu->arch.hpte_cache[hpte_id];
320 232
321 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", 233 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
322 orig_pte->may_write ? 'w' : '-', 234 orig_pte->may_write ? 'w' : '-',
@@ -329,6 +241,8 @@ next_pteg:
329 pte->pte = *orig_pte; 241 pte->pte = *orig_pte;
330 pte->pfn = hpaddr >> PAGE_SHIFT; 242 pte->pfn = hpaddr >> PAGE_SHIFT;
331 243
244 kvmppc_mmu_hpte_cache_map(vcpu, pte);
245
332 return 0; 246 return 0;
333} 247}
334 248
@@ -413,7 +327,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
413 327
414void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 328void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
415{ 329{
416 kvmppc_mmu_pte_flush(vcpu, 0, 0); 330 kvmppc_mmu_hpte_destroy(vcpu);
417 preempt_disable(); 331 preempt_disable();
418 __destroy_context(to_book3s(vcpu)->context_id); 332 __destroy_context(to_book3s(vcpu)->context_id);
419 preempt_enable(); 333 preempt_enable();
@@ -453,5 +367,7 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
453 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0; 367 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
454 htab = (ulong)__va(sdr1 & 0xffff0000); 368 htab = (ulong)__va(sdr1 & 0xffff0000);
455 369
370 kvmppc_mmu_hpte_init(vcpu);
371
456 return 0; 372 return 0;
457} 373}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 4ccdde152c37..384179a5002b 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -47,98 +47,11 @@
47#define dprintk_slb(a, ...) do { } while(0) 47#define dprintk_slb(a, ...) do { } while(0)
48#endif 48#endif
49 49
50static void invalidate_pte(struct hpte_cache *pte) 50void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
51{ 51{
52 dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n",
53 pte->pte.eaddr, pte->pte.vpage, pte->host_va);
54
55 ppc_md.hpte_invalidate(pte->slot, pte->host_va, 52 ppc_md.hpte_invalidate(pte->slot, pte->host_va,
56 MMU_PAGE_4K, MMU_SEGSIZE_256M, 53 MMU_PAGE_4K, MMU_SEGSIZE_256M,
57 false); 54 false);
58 pte->host_va = 0;
59
60 if (pte->pte.may_write)
61 kvm_release_pfn_dirty(pte->pfn);
62 else
63 kvm_release_pfn_clean(pte->pfn);
64}
65
66void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
67{
68 int i;
69
70 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
71 vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
72 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
73
74 guest_ea &= ea_mask;
75 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
76 struct hpte_cache *pte;
77
78 pte = &vcpu->arch.hpte_cache[i];
79 if (!pte->host_va)
80 continue;
81
82 if ((pte->pte.eaddr & ea_mask) == guest_ea) {
83 invalidate_pte(pte);
84 }
85 }
86
87 /* Doing a complete flush -> start from scratch */
88 if (!ea_mask)
89 vcpu->arch.hpte_cache_offset = 0;
90}
91
92void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
93{
94 int i;
95
96 dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
97 vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
98 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
99
100 guest_vp &= vp_mask;
101 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
102 struct hpte_cache *pte;
103
104 pte = &vcpu->arch.hpte_cache[i];
105 if (!pte->host_va)
106 continue;
107
108 if ((pte->pte.vpage & vp_mask) == guest_vp) {
109 invalidate_pte(pte);
110 }
111 }
112}
113
114void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
115{
116 int i;
117
118 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx & 0x%lx\n",
119 vcpu->arch.hpte_cache_offset, pa_start, pa_end);
120 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
121
122 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
123 struct hpte_cache *pte;
124
125 pte = &vcpu->arch.hpte_cache[i];
126 if (!pte->host_va)
127 continue;
128
129 if ((pte->pte.raddr >= pa_start) &&
130 (pte->pte.raddr < pa_end)) {
131 invalidate_pte(pte);
132 }
133 }
134}
135
136static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
137{
138 if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM)
139 kvmppc_mmu_pte_flush(vcpu, 0, 0);
140
141 return vcpu->arch.hpte_cache_offset++;
142} 55}
143 56
144/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using 57/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
@@ -246,8 +159,7 @@ map_again:
246 attempt++; 159 attempt++;
247 goto map_again; 160 goto map_again;
248 } else { 161 } else {
249 int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu); 162 struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
250 struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id];
251 163
252 dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n", 164 dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n",
253 ((rflags & HPTE_R_PP) == 3) ? '-' : 'w', 165 ((rflags & HPTE_R_PP) == 3) ? '-' : 'w',
@@ -265,6 +177,8 @@ map_again:
265 pte->host_va = va; 177 pte->host_va = va;
266 pte->pte = *orig_pte; 178 pte->pte = *orig_pte;
267 pte->pfn = hpaddr >> PAGE_SHIFT; 179 pte->pfn = hpaddr >> PAGE_SHIFT;
180
181 kvmppc_mmu_hpte_cache_map(vcpu, pte);
268 } 182 }
269 183
270 return 0; 184 return 0;
@@ -391,7 +305,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
391 305
392void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 306void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
393{ 307{
394 kvmppc_mmu_pte_flush(vcpu, 0, 0); 308 kvmppc_mmu_hpte_destroy(vcpu);
395 __destroy_context(to_book3s(vcpu)->context_id); 309 __destroy_context(to_book3s(vcpu)->context_id);
396} 310}
397 311
@@ -409,5 +323,7 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
409 vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS; 323 vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS;
410 vcpu3s->vsid_next = vcpu3s->vsid_first; 324 vcpu3s->vsid_next = vcpu3s->vsid_first;
411 325
326 kvmppc_mmu_hpte_init(vcpu);
327
412 return 0; 328 return 0;
413} 329}