aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-06-30 09:18:45 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:47:27 -0400
commit7741909bf19d9437a6aa3559c0470a640f637dce (patch)
tree023e6930fb85730c3f39b6186e19e88412ed037e /arch/powerpc/kvm
parent84754cd8fca66ed476585eabad68cacf42834199 (diff)
KVM: PPC: Add generic hpte management functions
Currently the shadow paging code keeps an array of entries it knows about. Whenever the guest invalidates an entry, we loop through that entry, trying to invalidate matching parts. While this is a really simple implementation, it is probably the most ineffective one possible. So instead, let's keep an array of lists around that are indexed by a hash. This way each PTE can be added by 4 list_add, removed by 4 list_del invocations and the search only needs to loop through entries that share the same hash. This patch implements said lookup and exports generic functions that both the 32-bit and 64-bit backend can use. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c277
1 files changed, 277 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
new file mode 100644
index 00000000000..4868d4a7ebc
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -0,0 +1,277 @@
1/*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/hash.h>
23#include <linux/slab.h>
24
25#include <asm/kvm_ppc.h>
26#include <asm/kvm_book3s.h>
27#include <asm/machdep.h>
28#include <asm/mmu_context.h>
29#include <asm/hw_irq.h>
30
31#define PTE_SIZE 12
32
33/* #define DEBUG_MMU */
34
35#ifdef DEBUG_MMU
36#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
37#else
38#define dprintk_mmu(a, ...) do { } while(0)
39#endif
40
41static struct kmem_cache *hpte_cache;
42
43static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
44{
45 return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
46}
47
48static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
49{
50 return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
51}
52
53static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
54{
55 return hash_64((vpage & 0xffffff000ULL) >> 12,
56 HPTEG_HASH_BITS_VPTE_LONG);
57}
58
59void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
60{
61 u64 index;
62
63 /* Add to ePTE list */
64 index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
65 hlist_add_head(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]);
66
67 /* Add to vPTE list */
68 index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
69 hlist_add_head(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]);
70
71 /* Add to vPTE_long list */
72 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
73 hlist_add_head(&pte->list_vpte_long,
74 &vcpu->arch.hpte_hash_vpte_long[index]);
75}
76
77static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
78{
79 dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n",
80 pte->pte.eaddr, pte->pte.vpage, pte->host_va);
81
82 /* Different for 32 and 64 bit */
83 kvmppc_mmu_invalidate_pte(vcpu, pte);
84
85 if (pte->pte.may_write)
86 kvm_release_pfn_dirty(pte->pfn);
87 else
88 kvm_release_pfn_clean(pte->pfn);
89
90 hlist_del(&pte->list_pte);
91 hlist_del(&pte->list_vpte);
92 hlist_del(&pte->list_vpte_long);
93
94 vcpu->arch.hpte_cache_count--;
95 kmem_cache_free(hpte_cache, pte);
96}
97
98static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
99{
100 struct hpte_cache *pte;
101 struct hlist_node *node, *tmp;
102 int i;
103
104 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
105 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
106
107 hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long)
108 invalidate_pte(vcpu, pte);
109 }
110}
111
112static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
113{
114 struct hlist_head *list;
115 struct hlist_node *node, *tmp;
116 struct hpte_cache *pte;
117
118 /* Find the list of entries in the map */
119 list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
120
121 /* Check the list for matching entries and invalidate */
122 hlist_for_each_entry_safe(pte, node, tmp, list, list_pte)
123 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
124 invalidate_pte(vcpu, pte);
125}
126
127void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
128{
129 u64 i;
130
131 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
132 vcpu->arch.hpte_cache_count, guest_ea, ea_mask);
133
134 guest_ea &= ea_mask;
135
136 switch (ea_mask) {
137 case ~0xfffUL:
138 kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
139 break;
140 case 0x0ffff000:
141 /* 32-bit flush w/o segment, go through all possible segments */
142 for (i = 0; i < 0x100000000ULL; i += 0x10000000ULL)
143 kvmppc_mmu_pte_flush(vcpu, guest_ea | i, ~0xfffUL);
144 break;
145 case 0:
146 /* Doing a complete flush -> start from scratch */
147 kvmppc_mmu_pte_flush_all(vcpu);
148 break;
149 default:
150 WARN_ON(1);
151 break;
152 }
153}
154
155/* Flush with mask 0xfffffffff */
156static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
157{
158 struct hlist_head *list;
159 struct hlist_node *node, *tmp;
160 struct hpte_cache *pte;
161 u64 vp_mask = 0xfffffffffULL;
162
163 list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
164
165 /* Check the list for matching entries and invalidate */
166 hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte)
167 if ((pte->pte.vpage & vp_mask) == guest_vp)
168 invalidate_pte(vcpu, pte);
169}
170
171/* Flush with mask 0xffffff000 */
172static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
173{
174 struct hlist_head *list;
175 struct hlist_node *node, *tmp;
176 struct hpte_cache *pte;
177 u64 vp_mask = 0xffffff000ULL;
178
179 list = &vcpu->arch.hpte_hash_vpte_long[
180 kvmppc_mmu_hash_vpte_long(guest_vp)];
181
182 /* Check the list for matching entries and invalidate */
183 hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long)
184 if ((pte->pte.vpage & vp_mask) == guest_vp)
185 invalidate_pte(vcpu, pte);
186}
187
188void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
189{
190 dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
191 vcpu->arch.hpte_cache_count, guest_vp, vp_mask);
192 guest_vp &= vp_mask;
193
194 switch(vp_mask) {
195 case 0xfffffffffULL:
196 kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
197 break;
198 case 0xffffff000ULL:
199 kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
200 break;
201 default:
202 WARN_ON(1);
203 return;
204 }
205}
206
207void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
208{
209 struct hlist_node *node, *tmp;
210 struct hpte_cache *pte;
211 int i;
212
213 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n",
214 vcpu->arch.hpte_cache_count, pa_start, pa_end);
215
216 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
217 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
218
219 hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long)
220 if ((pte->pte.raddr >= pa_start) &&
221 (pte->pte.raddr < pa_end))
222 invalidate_pte(vcpu, pte);
223 }
224}
225
226struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
227{
228 struct hpte_cache *pte;
229
230 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
231 vcpu->arch.hpte_cache_count++;
232
233 if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM)
234 kvmppc_mmu_pte_flush_all(vcpu);
235
236 return pte;
237}
238
239void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
240{
241 kvmppc_mmu_pte_flush(vcpu, 0, 0);
242}
243
244static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
245{
246 int i;
247
248 for (i = 0; i < len; i++)
249 INIT_HLIST_HEAD(&hash_list[i]);
250}
251
252int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
253{
254 /* init hpte lookup hashes */
255 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte,
256 ARRAY_SIZE(vcpu->arch.hpte_hash_pte));
257 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte,
258 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte));
259 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long,
260 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long));
261
262 return 0;
263}
264
265int kvmppc_mmu_hpte_sysinit(void)
266{
267 /* init hpte slab cache */
268 hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
269 sizeof(struct hpte_cache), 0, NULL);
270
271 return 0;
272}
273
274void kvmppc_mmu_hpte_sysexit(void)
275{
276 kmem_cache_destroy(hpte_cache);
277}