diff options
Diffstat (limited to 'arch/powerpc/kvm/44x_tlb.c')
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 71 |
1 files changed, 55 insertions, 16 deletions
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index ee2461860bcf..d49dc66ab3c3 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -28,6 +28,13 @@ | |||
28 | 28 | ||
29 | #include "44x_tlb.h" | 29 | #include "44x_tlb.h" |
30 | 30 | ||
31 | #ifndef PPC44x_TLBE_SIZE | ||
32 | #define PPC44x_TLBE_SIZE PPC44x_TLB_4K | ||
33 | #endif | ||
34 | |||
35 | #define PAGE_SIZE_4K (1<<12) | ||
36 | #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1)) | ||
37 | |||
31 | #define PPC44x_TLB_UATTR_MASK \ | 38 | #define PPC44x_TLB_UATTR_MASK \ |
32 | (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3) | 39 | (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3) |
33 | #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) | 40 | #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) |
@@ -179,15 +186,26 @@ void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) | |||
179 | vcpu_44x->shadow_tlb_mod[i] = 1; | 186 | vcpu_44x->shadow_tlb_mod[i] = 1; |
180 | } | 187 | } |
181 | 188 | ||
182 | /* Caller must ensure that the specified guest TLB entry is safe to insert into | 189 | /** |
183 | * the shadow TLB. */ | 190 | * kvmppc_mmu_map -- create a host mapping for guest memory |
184 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | 191 | * |
185 | u32 flags) | 192 | * If the guest wanted a larger page than the host supports, only the first |
193 | * host page is mapped here and the rest are demand faulted. | ||
194 | * | ||
195 | * If the guest wanted a smaller page than the host page size, we map only the | ||
196 | * guest-size page (i.e. not a full host page mapping). | ||
197 | * | ||
198 | * Caller must ensure that the specified guest TLB entry is safe to insert into | ||
199 | * the shadow TLB. | ||
200 | */ | ||
201 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, | ||
202 | u32 flags, u32 max_bytes) | ||
186 | { | 203 | { |
187 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | 204 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
188 | struct page *new_page; | 205 | struct page *new_page; |
189 | struct kvmppc_44x_tlbe *stlbe; | 206 | struct kvmppc_44x_tlbe *stlbe; |
190 | hpa_t hpaddr; | 207 | hpa_t hpaddr; |
208 | gfn_t gfn; | ||
191 | unsigned int victim; | 209 | unsigned int victim; |
192 | 210 | ||
193 | /* Future optimization: don't overwrite the TLB entry containing the | 211 | /* Future optimization: don't overwrite the TLB entry containing the |
@@ -198,6 +216,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | |||
198 | stlbe = &vcpu_44x->shadow_tlb[victim]; | 216 | stlbe = &vcpu_44x->shadow_tlb[victim]; |
199 | 217 | ||
200 | /* Get reference to new page. */ | 218 | /* Get reference to new page. */ |
219 | gfn = gpaddr >> PAGE_SHIFT; | ||
201 | new_page = gfn_to_page(vcpu->kvm, gfn); | 220 | new_page = gfn_to_page(vcpu->kvm, gfn); |
202 | if (is_error_page(new_page)) { | 221 | if (is_error_page(new_page)) { |
203 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); | 222 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); |
@@ -220,10 +239,25 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | |||
220 | stlbe->tid = !(asid & 0xff); | 239 | stlbe->tid = !(asid & 0xff); |
221 | 240 | ||
222 | /* Force TS=1 for all guest mappings. */ | 241 | /* Force TS=1 for all guest mappings. */ |
223 | /* For now we hardcode 4KB mappings, but it will be important to | 242 | stlbe->word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS; |
224 | * use host large pages in the future. */ | 243 | |
225 | stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS | 244 | if (max_bytes >= PAGE_SIZE) { |
226 | | PPC44x_TLB_4K; | 245 | /* Guest mapping is larger than or equal to host page size. We can use |
246 | * a "native" host mapping. */ | ||
247 | stlbe->word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE; | ||
248 | } else { | ||
249 | /* Guest mapping is smaller than host page size. We must restrict the | ||
250 | * size of the mapping to be at most the smaller of the two, but for | ||
251 | * simplicity we fall back to a 4K mapping (this is probably what the | ||
252 | * guest is using anyways). */ | ||
253 | stlbe->word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K; | ||
254 | |||
255 | /* 'hpaddr' is a host page, which is larger than the mapping we're | ||
256 | * inserting here. To compensate, we must add the in-page offset to the | ||
257 | * sub-page. */ | ||
258 | hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K); | ||
259 | } | ||
260 | |||
227 | stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); | 261 | stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); |
228 | stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags, | 262 | stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags, |
229 | vcpu->arch.msr & MSR_PR); | 263 | vcpu->arch.msr & MSR_PR); |
@@ -322,10 +356,8 @@ static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | |||
322 | int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | 356 | int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) |
323 | { | 357 | { |
324 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | 358 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
325 | u64 eaddr; | 359 | gva_t eaddr; |
326 | u64 raddr; | ||
327 | u64 asid; | 360 | u64 asid; |
328 | u32 flags; | ||
329 | struct kvmppc_44x_tlbe *tlbe; | 361 | struct kvmppc_44x_tlbe *tlbe; |
330 | unsigned int index; | 362 | unsigned int index; |
331 | 363 | ||
@@ -364,15 +396,22 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | |||
364 | } | 396 | } |
365 | 397 | ||
366 | if (tlbe_is_host_safe(vcpu, tlbe)) { | 398 | if (tlbe_is_host_safe(vcpu, tlbe)) { |
399 | gpa_t gpaddr; | ||
400 | u32 flags; | ||
401 | u32 bytes; | ||
402 | |||
367 | eaddr = get_tlb_eaddr(tlbe); | 403 | eaddr = get_tlb_eaddr(tlbe); |
368 | raddr = get_tlb_raddr(tlbe); | 404 | gpaddr = get_tlb_raddr(tlbe); |
405 | |||
406 | /* Use the advertised page size to mask effective and real addrs. */ | ||
407 | bytes = get_tlb_bytes(tlbe); | ||
408 | eaddr &= ~(bytes - 1); | ||
409 | gpaddr &= ~(bytes - 1); | ||
410 | |||
369 | asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; | 411 | asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; |
370 | flags = tlbe->word2 & 0xffff; | 412 | flags = tlbe->word2 & 0xffff; |
371 | 413 | ||
372 | /* Create a 4KB mapping on the host. If the guest wanted a | 414 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes); |
373 | * large page, only the first 4KB is mapped here and the rest | ||
374 | * are mapped on the fly. */ | ||
375 | kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags); | ||
376 | } | 415 | } |
377 | 416 | ||
378 | KVMTRACE_5D(GTLB_WRITE, vcpu, index, | 417 | KVMTRACE_5D(GTLB_WRITE, vcpu, index, |