aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_mmu_hpte.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_mmu_hpte.c')
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c71
1 files changed, 42 insertions, 29 deletions
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 79751d8dd131..41cb0017e757 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -21,7 +21,6 @@
21#include <linux/kvm_host.h> 21#include <linux/kvm_host.h>
22#include <linux/hash.h> 22#include <linux/hash.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include "trace.h"
25 24
26#include <asm/kvm_ppc.h> 25#include <asm/kvm_ppc.h>
27#include <asm/kvm_book3s.h> 26#include <asm/kvm_book3s.h>
@@ -29,6 +28,8 @@
29#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
30#include <asm/hw_irq.h> 29#include <asm/hw_irq.h>
31 30
31#include "trace.h"
32
32#define PTE_SIZE 12 33#define PTE_SIZE 12
33 34
34static struct kmem_cache *hpte_cache; 35static struct kmem_cache *hpte_cache;
@@ -58,30 +59,31 @@ static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
58void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 59void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
59{ 60{
60 u64 index; 61 u64 index;
62 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
61 63
62 trace_kvm_book3s_mmu_map(pte); 64 trace_kvm_book3s_mmu_map(pte);
63 65
64 spin_lock(&vcpu->arch.mmu_lock); 66 spin_lock(&vcpu3s->mmu_lock);
65 67
66 /* Add to ePTE list */ 68 /* Add to ePTE list */
67 index = kvmppc_mmu_hash_pte(pte->pte.eaddr); 69 index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
68 hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); 70 hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
69 71
70 /* Add to ePTE_long list */ 72 /* Add to ePTE_long list */
71 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); 73 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
72 hlist_add_head_rcu(&pte->list_pte_long, 74 hlist_add_head_rcu(&pte->list_pte_long,
73 &vcpu->arch.hpte_hash_pte_long[index]); 75 &vcpu3s->hpte_hash_pte_long[index]);
74 76
75 /* Add to vPTE list */ 77 /* Add to vPTE list */
76 index = kvmppc_mmu_hash_vpte(pte->pte.vpage); 78 index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
77 hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); 79 hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
78 80
79 /* Add to vPTE_long list */ 81 /* Add to vPTE_long list */
80 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); 82 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
81 hlist_add_head_rcu(&pte->list_vpte_long, 83 hlist_add_head_rcu(&pte->list_vpte_long,
82 &vcpu->arch.hpte_hash_vpte_long[index]); 84 &vcpu3s->hpte_hash_vpte_long[index]);
83 85
84 spin_unlock(&vcpu->arch.mmu_lock); 86 spin_unlock(&vcpu3s->mmu_lock);
85} 87}
86 88
87static void free_pte_rcu(struct rcu_head *head) 89static void free_pte_rcu(struct rcu_head *head)
@@ -92,16 +94,18 @@ static void free_pte_rcu(struct rcu_head *head)
92 94
93static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 95static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
94{ 96{
97 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
98
95 trace_kvm_book3s_mmu_invalidate(pte); 99 trace_kvm_book3s_mmu_invalidate(pte);
96 100
97 /* Different for 32 and 64 bit */ 101 /* Different for 32 and 64 bit */
98 kvmppc_mmu_invalidate_pte(vcpu, pte); 102 kvmppc_mmu_invalidate_pte(vcpu, pte);
99 103
100 spin_lock(&vcpu->arch.mmu_lock); 104 spin_lock(&vcpu3s->mmu_lock);
101 105
102 /* pte already invalidated in between? */ 106 /* pte already invalidated in between? */
103 if (hlist_unhashed(&pte->list_pte)) { 107 if (hlist_unhashed(&pte->list_pte)) {
104 spin_unlock(&vcpu->arch.mmu_lock); 108 spin_unlock(&vcpu3s->mmu_lock);
105 return; 109 return;
106 } 110 }
107 111
@@ -115,14 +119,15 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
115 else 119 else
116 kvm_release_pfn_clean(pte->pfn); 120 kvm_release_pfn_clean(pte->pfn);
117 121
118 spin_unlock(&vcpu->arch.mmu_lock); 122 spin_unlock(&vcpu3s->mmu_lock);
119 123
120 vcpu->arch.hpte_cache_count--; 124 vcpu3s->hpte_cache_count--;
121 call_rcu(&pte->rcu_head, free_pte_rcu); 125 call_rcu(&pte->rcu_head, free_pte_rcu);
122} 126}
123 127
124static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) 128static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
125{ 129{
130 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
126 struct hpte_cache *pte; 131 struct hpte_cache *pte;
127 struct hlist_node *node; 132 struct hlist_node *node;
128 int i; 133 int i;
@@ -130,7 +135,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
130 rcu_read_lock(); 135 rcu_read_lock();
131 136
132 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 137 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
133 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; 138 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
134 139
135 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 140 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
136 invalidate_pte(vcpu, pte); 141 invalidate_pte(vcpu, pte);
@@ -141,12 +146,13 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
141 146
142static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) 147static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
143{ 148{
149 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
144 struct hlist_head *list; 150 struct hlist_head *list;
145 struct hlist_node *node; 151 struct hlist_node *node;
146 struct hpte_cache *pte; 152 struct hpte_cache *pte;
147 153
148 /* Find the list of entries in the map */ 154 /* Find the list of entries in the map */
149 list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; 155 list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
150 156
151 rcu_read_lock(); 157 rcu_read_lock();
152 158
@@ -160,12 +166,13 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
160 166
161static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea) 167static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
162{ 168{
169 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
163 struct hlist_head *list; 170 struct hlist_head *list;
164 struct hlist_node *node; 171 struct hlist_node *node;
165 struct hpte_cache *pte; 172 struct hpte_cache *pte;
166 173
167 /* Find the list of entries in the map */ 174 /* Find the list of entries in the map */
168 list = &vcpu->arch.hpte_hash_pte_long[ 175 list = &vcpu3s->hpte_hash_pte_long[
169 kvmppc_mmu_hash_pte_long(guest_ea)]; 176 kvmppc_mmu_hash_pte_long(guest_ea)];
170 177
171 rcu_read_lock(); 178 rcu_read_lock();
@@ -203,12 +210,13 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
203/* Flush with mask 0xfffffffff */ 210/* Flush with mask 0xfffffffff */
204static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) 211static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
205{ 212{
213 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
206 struct hlist_head *list; 214 struct hlist_head *list;
207 struct hlist_node *node; 215 struct hlist_node *node;
208 struct hpte_cache *pte; 216 struct hpte_cache *pte;
209 u64 vp_mask = 0xfffffffffULL; 217 u64 vp_mask = 0xfffffffffULL;
210 218
211 list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; 219 list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
212 220
213 rcu_read_lock(); 221 rcu_read_lock();
214 222
@@ -223,12 +231,13 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
223/* Flush with mask 0xffffff000 */ 231/* Flush with mask 0xffffff000 */
224static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) 232static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
225{ 233{
234 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
226 struct hlist_head *list; 235 struct hlist_head *list;
227 struct hlist_node *node; 236 struct hlist_node *node;
228 struct hpte_cache *pte; 237 struct hpte_cache *pte;
229 u64 vp_mask = 0xffffff000ULL; 238 u64 vp_mask = 0xffffff000ULL;
230 239
231 list = &vcpu->arch.hpte_hash_vpte_long[ 240 list = &vcpu3s->hpte_hash_vpte_long[
232 kvmppc_mmu_hash_vpte_long(guest_vp)]; 241 kvmppc_mmu_hash_vpte_long(guest_vp)];
233 242
234 rcu_read_lock(); 243 rcu_read_lock();
@@ -261,6 +270,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
261 270
262void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) 271void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
263{ 272{
273 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
264 struct hlist_node *node; 274 struct hlist_node *node;
265 struct hpte_cache *pte; 275 struct hpte_cache *pte;
266 int i; 276 int i;
@@ -270,7 +280,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
270 rcu_read_lock(); 280 rcu_read_lock();
271 281
272 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 282 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
273 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; 283 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
274 284
275 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 285 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
276 if ((pte->pte.raddr >= pa_start) && 286 if ((pte->pte.raddr >= pa_start) &&
@@ -283,12 +293,13 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
283 293
284struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) 294struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
285{ 295{
296 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
286 struct hpte_cache *pte; 297 struct hpte_cache *pte;
287 298
288 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL); 299 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
289 vcpu->arch.hpte_cache_count++; 300 vcpu3s->hpte_cache_count++;
290 301
291 if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM) 302 if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
292 kvmppc_mmu_pte_flush_all(vcpu); 303 kvmppc_mmu_pte_flush_all(vcpu);
293 304
294 return pte; 305 return pte;
@@ -309,17 +320,19 @@ static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
309 320
310int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu) 321int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
311{ 322{
323 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
324
312 /* init hpte lookup hashes */ 325 /* init hpte lookup hashes */
313 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte, 326 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
314 ARRAY_SIZE(vcpu->arch.hpte_hash_pte)); 327 ARRAY_SIZE(vcpu3s->hpte_hash_pte));
315 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long, 328 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
316 ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long)); 329 ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
317 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte, 330 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
318 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte)); 331 ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
319 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, 332 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
320 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long)); 333 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
321 334
322 spin_lock_init(&vcpu->arch.mmu_lock); 335 spin_lock_init(&vcpu3s->mmu_lock);
323 336
324 return 0; 337 return 0;
325} 338}