diff options
| author | Ingo Molnar <mingo@elte.hu> | 2009-01-05 07:53:39 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-01-05 07:53:39 -0500 |
| commit | 5359c32eb7402124abc9964d5d53639fe0739cea (patch) | |
| tree | d77b6967fe8420678bb9d1d936855ac0699c196a /arch/powerpc/kvm/44x_tlb.c | |
| parent | 8916edef5888c5d8fe283714416a9ca95b4c3431 (diff) | |
| parent | fe0bdec68b77020281dc814805edfe594ae89e0f (diff) | |
Merge branch 'linus' into sched/urgent
Diffstat (limited to 'arch/powerpc/kvm/44x_tlb.c')
| -rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 463 |
1 files changed, 356 insertions, 107 deletions
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index ad72c6f9811f..9a34b8edb9e2 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
| @@ -22,20 +22,103 @@ | |||
| 22 | #include <linux/kvm.h> | 22 | #include <linux/kvm.h> |
| 23 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
| 24 | #include <linux/highmem.h> | 24 | #include <linux/highmem.h> |
| 25 | |||
| 26 | #include <asm/tlbflush.h> | ||
| 25 | #include <asm/mmu-44x.h> | 27 | #include <asm/mmu-44x.h> |
| 26 | #include <asm/kvm_ppc.h> | 28 | #include <asm/kvm_ppc.h> |
| 29 | #include <asm/kvm_44x.h> | ||
| 30 | #include "timing.h" | ||
| 27 | 31 | ||
| 28 | #include "44x_tlb.h" | 32 | #include "44x_tlb.h" |
| 29 | 33 | ||
| 34 | #ifndef PPC44x_TLBE_SIZE | ||
| 35 | #define PPC44x_TLBE_SIZE PPC44x_TLB_4K | ||
| 36 | #endif | ||
| 37 | |||
| 38 | #define PAGE_SIZE_4K (1<<12) | ||
| 39 | #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1)) | ||
| 40 | |||
| 41 | #define PPC44x_TLB_UATTR_MASK \ | ||
| 42 | (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3) | ||
| 30 | #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) | 43 | #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) |
| 31 | #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) | 44 | #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) |
| 32 | 45 | ||
| 33 | static unsigned int kvmppc_tlb_44x_pos; | 46 | #ifdef DEBUG |
| 47 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) | ||
| 48 | { | ||
| 49 | struct kvmppc_44x_tlbe *tlbe; | ||
| 50 | int i; | ||
| 51 | |||
| 52 | printk("vcpu %d TLB dump:\n", vcpu->vcpu_id); | ||
| 53 | printk("| %2s | %3s | %8s | %8s | %8s |\n", | ||
| 54 | "nr", "tid", "word0", "word1", "word2"); | ||
| 55 | |||
| 56 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { | ||
| 57 | tlbe = &vcpu_44x->guest_tlb[i]; | ||
| 58 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
| 59 | printk(" G%2d | %02X | %08X | %08X | %08X |\n", | ||
| 60 | i, tlbe->tid, tlbe->word0, tlbe->word1, | ||
| 61 | tlbe->word2); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | #endif | ||
| 65 | |||
| 66 | static inline void kvmppc_44x_tlbie(unsigned int index) | ||
| 67 | { | ||
| 68 | /* 0 <= index < 64, so the V bit is clear and we can use the index as | ||
| 69 | * word0. */ | ||
| 70 | asm volatile( | ||
| 71 | "tlbwe %[index], %[index], 0\n" | ||
| 72 | : | ||
| 73 | : [index] "r"(index) | ||
| 74 | ); | ||
| 75 | } | ||
| 76 | |||
| 77 | static inline void kvmppc_44x_tlbre(unsigned int index, | ||
| 78 | struct kvmppc_44x_tlbe *tlbe) | ||
| 79 | { | ||
| 80 | asm volatile( | ||
| 81 | "tlbre %[word0], %[index], 0\n" | ||
| 82 | "mfspr %[tid], %[sprn_mmucr]\n" | ||
| 83 | "andi. %[tid], %[tid], 0xff\n" | ||
| 84 | "tlbre %[word1], %[index], 1\n" | ||
| 85 | "tlbre %[word2], %[index], 2\n" | ||
| 86 | : [word0] "=r"(tlbe->word0), | ||
| 87 | [word1] "=r"(tlbe->word1), | ||
| 88 | [word2] "=r"(tlbe->word2), | ||
| 89 | [tid] "=r"(tlbe->tid) | ||
| 90 | : [index] "r"(index), | ||
| 91 | [sprn_mmucr] "i"(SPRN_MMUCR) | ||
| 92 | : "cc" | ||
| 93 | ); | ||
| 94 | } | ||
| 95 | |||
| 96 | static inline void kvmppc_44x_tlbwe(unsigned int index, | ||
| 97 | struct kvmppc_44x_tlbe *stlbe) | ||
| 98 | { | ||
| 99 | unsigned long tmp; | ||
| 100 | |||
| 101 | asm volatile( | ||
| 102 | "mfspr %[tmp], %[sprn_mmucr]\n" | ||
| 103 | "rlwimi %[tmp], %[tid], 0, 0xff\n" | ||
| 104 | "mtspr %[sprn_mmucr], %[tmp]\n" | ||
| 105 | "tlbwe %[word0], %[index], 0\n" | ||
| 106 | "tlbwe %[word1], %[index], 1\n" | ||
| 107 | "tlbwe %[word2], %[index], 2\n" | ||
| 108 | : [tmp] "=&r"(tmp) | ||
| 109 | : [word0] "r"(stlbe->word0), | ||
| 110 | [word1] "r"(stlbe->word1), | ||
| 111 | [word2] "r"(stlbe->word2), | ||
| 112 | [tid] "r"(stlbe->tid), | ||
| 113 | [index] "r"(index), | ||
| 114 | [sprn_mmucr] "i"(SPRN_MMUCR) | ||
| 115 | ); | ||
| 116 | } | ||
| 34 | 117 | ||
| 35 | static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) | 118 | static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) |
| 36 | { | 119 | { |
| 37 | /* Mask off reserved bits. */ | 120 | /* We only care about the guest's permission and user bits. */ |
| 38 | attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK; | 121 | attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK; |
| 39 | 122 | ||
| 40 | if (!usermode) { | 123 | if (!usermode) { |
| 41 | /* Guest is in supervisor mode, so we need to translate guest | 124 | /* Guest is in supervisor mode, so we need to translate guest |
| @@ -47,18 +130,60 @@ static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) | |||
| 47 | /* Make sure host can always access this memory. */ | 130 | /* Make sure host can always access this memory. */ |
| 48 | attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW; | 131 | attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW; |
| 49 | 132 | ||
| 133 | /* WIMGE = 0b00100 */ | ||
| 134 | attrib |= PPC44x_TLB_M; | ||
| 135 | |||
| 50 | return attrib; | 136 | return attrib; |
| 51 | } | 137 | } |
| 52 | 138 | ||
| 139 | /* Load shadow TLB back into hardware. */ | ||
| 140 | void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu) | ||
| 141 | { | ||
| 142 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
| 143 | int i; | ||
| 144 | |||
| 145 | for (i = 0; i <= tlb_44x_hwater; i++) { | ||
| 146 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | ||
| 147 | |||
| 148 | if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) | ||
| 149 | kvmppc_44x_tlbwe(i, stlbe); | ||
| 150 | } | ||
| 151 | } | ||
| 152 | |||
| 153 | static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x, | ||
| 154 | unsigned int i) | ||
| 155 | { | ||
| 156 | vcpu_44x->shadow_tlb_mod[i] = 1; | ||
| 157 | } | ||
| 158 | |||
| 159 | /* Save hardware TLB to the vcpu, and invalidate all guest mappings. */ | ||
| 160 | void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu) | ||
| 161 | { | ||
| 162 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
| 163 | int i; | ||
| 164 | |||
| 165 | for (i = 0; i <= tlb_44x_hwater; i++) { | ||
| 166 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | ||
| 167 | |||
| 168 | if (vcpu_44x->shadow_tlb_mod[i]) | ||
| 169 | kvmppc_44x_tlbre(i, stlbe); | ||
| 170 | |||
| 171 | if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) | ||
| 172 | kvmppc_44x_tlbie(i); | ||
| 173 | } | ||
| 174 | } | ||
| 175 | |||
| 176 | |||
| 53 | /* Search the guest TLB for a matching entry. */ | 177 | /* Search the guest TLB for a matching entry. */ |
| 54 | int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, | 178 | int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, |
| 55 | unsigned int as) | 179 | unsigned int as) |
| 56 | { | 180 | { |
| 181 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
| 57 | int i; | 182 | int i; |
| 58 | 183 | ||
| 59 | /* XXX Replace loop with fancy data structures. */ | 184 | /* XXX Replace loop with fancy data structures. */ |
| 60 | for (i = 0; i < PPC44x_TLB_SIZE; i++) { | 185 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { |
| 61 | struct tlbe *tlbe = &vcpu->arch.guest_tlb[i]; | 186 | struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i]; |
| 62 | unsigned int tid; | 187 | unsigned int tid; |
| 63 | 188 | ||
| 64 | if (eaddr < get_tlb_eaddr(tlbe)) | 189 | if (eaddr < get_tlb_eaddr(tlbe)) |
| @@ -83,78 +208,89 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, | |||
| 83 | return -1; | 208 | return -1; |
| 84 | } | 209 | } |
| 85 | 210 | ||
| 86 | struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr) | 211 | int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
| 87 | { | 212 | { |
| 88 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); | 213 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); |
| 89 | unsigned int index; | ||
| 90 | 214 | ||
| 91 | index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | 215 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
| 92 | if (index == -1) | ||
| 93 | return NULL; | ||
| 94 | return &vcpu->arch.guest_tlb[index]; | ||
| 95 | } | 216 | } |
| 96 | 217 | ||
| 97 | struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr) | 218 | int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
| 98 | { | 219 | { |
| 99 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); | 220 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); |
| 100 | unsigned int index; | ||
| 101 | 221 | ||
| 102 | index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | 222 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
| 103 | if (index == -1) | ||
| 104 | return NULL; | ||
| 105 | return &vcpu->arch.guest_tlb[index]; | ||
| 106 | } | 223 | } |
| 107 | 224 | ||
| 108 | static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe) | 225 | static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, |
| 226 | unsigned int stlb_index) | ||
| 109 | { | 227 | { |
| 110 | return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW); | 228 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index]; |
| 111 | } | ||
| 112 | 229 | ||
| 113 | static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, | 230 | if (!ref->page) |
| 114 | unsigned int index) | 231 | return; |
| 115 | { | ||
| 116 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index]; | ||
| 117 | struct page *page = vcpu->arch.shadow_pages[index]; | ||
| 118 | 232 | ||
| 119 | if (get_tlb_v(stlbe)) { | 233 | /* Discard from the TLB. */ |
| 120 | if (kvmppc_44x_tlbe_is_writable(stlbe)) | 234 | /* Note: we could actually invalidate a host mapping, if the host overwrote |
| 121 | kvm_release_page_dirty(page); | 235 | * this TLB entry since we inserted a guest mapping. */ |
| 122 | else | 236 | kvmppc_44x_tlbie(stlb_index); |
| 123 | kvm_release_page_clean(page); | 237 | |
| 124 | } | 238 | /* Now release the page. */ |
| 239 | if (ref->writeable) | ||
| 240 | kvm_release_page_dirty(ref->page); | ||
| 241 | else | ||
| 242 | kvm_release_page_clean(ref->page); | ||
| 243 | |||
| 244 | ref->page = NULL; | ||
| 245 | |||
| 246 | /* XXX set tlb_44x_index to stlb_index? */ | ||
| 247 | |||
| 248 | KVMTRACE_1D(STLB_INVAL, &vcpu_44x->vcpu, stlb_index, handler); | ||
| 125 | } | 249 | } |
| 126 | 250 | ||
| 127 | void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) | 251 | void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) |
| 128 | { | 252 | { |
| 253 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
| 129 | int i; | 254 | int i; |
| 130 | 255 | ||
| 131 | for (i = 0; i <= tlb_44x_hwater; i++) | 256 | for (i = 0; i <= tlb_44x_hwater; i++) |
| 132 | kvmppc_44x_shadow_release(vcpu, i); | 257 | kvmppc_44x_shadow_release(vcpu_44x, i); |
| 133 | } | ||
| 134 | |||
| 135 | void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) | ||
| 136 | { | ||
| 137 | vcpu->arch.shadow_tlb_mod[i] = 1; | ||
| 138 | } | 258 | } |
| 139 | 259 | ||
| 140 | /* Caller must ensure that the specified guest TLB entry is safe to insert into | 260 | /** |
| 141 | * the shadow TLB. */ | 261 | * kvmppc_mmu_map -- create a host mapping for guest memory |
| 142 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | 262 | * |
| 143 | u32 flags) | 263 | * If the guest wanted a larger page than the host supports, only the first |
| 264 | * host page is mapped here and the rest are demand faulted. | ||
| 265 | * | ||
| 266 | * If the guest wanted a smaller page than the host page size, we map only the | ||
| 267 | * guest-size page (i.e. not a full host page mapping). | ||
| 268 | * | ||
| 269 | * Caller must ensure that the specified guest TLB entry is safe to insert into | ||
| 270 | * the shadow TLB. | ||
| 271 | */ | ||
| 272 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, | ||
| 273 | u32 flags, u32 max_bytes, unsigned int gtlb_index) | ||
| 144 | { | 274 | { |
| 275 | struct kvmppc_44x_tlbe stlbe; | ||
| 276 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
| 277 | struct kvmppc_44x_shadow_ref *ref; | ||
| 145 | struct page *new_page; | 278 | struct page *new_page; |
| 146 | struct tlbe *stlbe; | ||
| 147 | hpa_t hpaddr; | 279 | hpa_t hpaddr; |
| 280 | gfn_t gfn; | ||
| 148 | unsigned int victim; | 281 | unsigned int victim; |
| 149 | 282 | ||
| 150 | /* Future optimization: don't overwrite the TLB entry containing the | 283 | /* Select TLB entry to clobber. Indirectly guard against races with the TLB |
| 151 | * current PC (or stack?). */ | 284 | * miss handler by disabling interrupts. */ |
| 152 | victim = kvmppc_tlb_44x_pos++; | 285 | local_irq_disable(); |
| 153 | if (kvmppc_tlb_44x_pos > tlb_44x_hwater) | 286 | victim = ++tlb_44x_index; |
| 154 | kvmppc_tlb_44x_pos = 0; | 287 | if (victim > tlb_44x_hwater) |
| 155 | stlbe = &vcpu->arch.shadow_tlb[victim]; | 288 | victim = 0; |
| 289 | tlb_44x_index = victim; | ||
| 290 | local_irq_enable(); | ||
| 156 | 291 | ||
| 157 | /* Get reference to new page. */ | 292 | /* Get reference to new page. */ |
| 293 | gfn = gpaddr >> PAGE_SHIFT; | ||
| 158 | new_page = gfn_to_page(vcpu->kvm, gfn); | 294 | new_page = gfn_to_page(vcpu->kvm, gfn); |
| 159 | if (is_error_page(new_page)) { | 295 | if (is_error_page(new_page)) { |
| 160 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); | 296 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); |
| @@ -163,10 +299,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | |||
| 163 | } | 299 | } |
| 164 | hpaddr = page_to_phys(new_page); | 300 | hpaddr = page_to_phys(new_page); |
| 165 | 301 | ||
| 166 | /* Drop reference to old page. */ | 302 | /* Invalidate any previous shadow mappings. */ |
| 167 | kvmppc_44x_shadow_release(vcpu, victim); | 303 | kvmppc_44x_shadow_release(vcpu_44x, victim); |
| 168 | |||
| 169 | vcpu->arch.shadow_pages[victim] = new_page; | ||
| 170 | 304 | ||
| 171 | /* XXX Make sure (va, size) doesn't overlap any other | 305 | /* XXX Make sure (va, size) doesn't overlap any other |
| 172 | * entries. 440x6 user manual says the result would be | 306 | * entries. 440x6 user manual says the result would be |
| @@ -174,78 +308,193 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | |||
| 174 | 308 | ||
| 175 | /* XXX what about AS? */ | 309 | /* XXX what about AS? */ |
| 176 | 310 | ||
| 177 | stlbe->tid = !(asid & 0xff); | ||
| 178 | |||
| 179 | /* Force TS=1 for all guest mappings. */ | 311 | /* Force TS=1 for all guest mappings. */ |
| 180 | /* For now we hardcode 4KB mappings, but it will be important to | 312 | stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS; |
| 181 | * use host large pages in the future. */ | 313 | |
| 182 | stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS | 314 | if (max_bytes >= PAGE_SIZE) { |
| 183 | | PPC44x_TLB_4K; | 315 | /* Guest mapping is larger than or equal to host page size. We can use |
| 184 | stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); | 316 | * a "native" host mapping. */ |
| 185 | stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags, | 317 | stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE; |
| 186 | vcpu->arch.msr & MSR_PR); | 318 | } else { |
| 187 | kvmppc_tlbe_set_modified(vcpu, victim); | 319 | /* Guest mapping is smaller than host page size. We must restrict the |
| 320 | * size of the mapping to be at most the smaller of the two, but for | ||
| 321 | * simplicity we fall back to a 4K mapping (this is probably what the | ||
| 322 | * guest is using anyways). */ | ||
| 323 | stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K; | ||
| 324 | |||
| 325 | /* 'hpaddr' is a host page, which is larger than the mapping we're | ||
| 326 | * inserting here. To compensate, we must add the in-page offset to the | ||
| 327 | * sub-page. */ | ||
| 328 | hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K); | ||
| 329 | } | ||
| 188 | 330 | ||
| 189 | KVMTRACE_5D(STLB_WRITE, vcpu, victim, | 331 | stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); |
| 190 | stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2, | 332 | stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, |
| 191 | handler); | 333 | vcpu->arch.msr & MSR_PR); |
| 334 | stlbe.tid = !(asid & 0xff); | ||
| 335 | |||
| 336 | /* Keep track of the reference so we can properly release it later. */ | ||
| 337 | ref = &vcpu_44x->shadow_refs[victim]; | ||
| 338 | ref->page = new_page; | ||
| 339 | ref->gtlb_index = gtlb_index; | ||
| 340 | ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW); | ||
| 341 | ref->tid = stlbe.tid; | ||
| 342 | |||
| 343 | /* Insert shadow mapping into hardware TLB. */ | ||
| 344 | kvmppc_44x_tlbe_set_modified(vcpu_44x, victim); | ||
| 345 | kvmppc_44x_tlbwe(victim, &stlbe); | ||
| 346 | KVMTRACE_5D(STLB_WRITE, vcpu, victim, stlbe.tid, stlbe.word0, stlbe.word1, | ||
| 347 | stlbe.word2, handler); | ||
| 192 | } | 348 | } |
| 193 | 349 | ||
| 194 | void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, | 350 | /* For a particular guest TLB entry, invalidate the corresponding host TLB |
| 195 | gva_t eend, u32 asid) | 351 | * mappings and release the host pages. */ |
| 352 | static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu, | ||
| 353 | unsigned int gtlb_index) | ||
| 196 | { | 354 | { |
| 197 | unsigned int pid = !(asid & 0xff); | 355 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
| 198 | int i; | 356 | int i; |
| 199 | 357 | ||
| 200 | /* XXX Replace loop with fancy data structures. */ | 358 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { |
| 201 | for (i = 0; i <= tlb_44x_hwater; i++) { | 359 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; |
| 202 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; | 360 | if (ref->gtlb_index == gtlb_index) |
| 203 | unsigned int tid; | 361 | kvmppc_44x_shadow_release(vcpu_44x, i); |
| 362 | } | ||
| 363 | } | ||
| 204 | 364 | ||
| 205 | if (!get_tlb_v(stlbe)) | 365 | void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) |
| 206 | continue; | 366 | { |
| 367 | vcpu->arch.shadow_pid = !usermode; | ||
| 368 | } | ||
| 207 | 369 | ||
| 208 | if (eend < get_tlb_eaddr(stlbe)) | 370 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid) |
| 209 | continue; | 371 | { |
| 372 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
| 373 | int i; | ||
| 210 | 374 | ||
| 211 | if (eaddr > get_tlb_end(stlbe)) | 375 | if (unlikely(vcpu->arch.pid == new_pid)) |
| 212 | continue; | 376 | return; |
| 213 | 377 | ||
| 214 | tid = get_tlb_tid(stlbe); | 378 | vcpu->arch.pid = new_pid; |
| 215 | if (tid && (tid != pid)) | ||
| 216 | continue; | ||
| 217 | 379 | ||
| 218 | kvmppc_44x_shadow_release(vcpu, i); | 380 | /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it |
| 219 | stlbe->word0 = 0; | 381 | * can't access guest kernel mappings (TID=1). When we switch to a new |
| 220 | kvmppc_tlbe_set_modified(vcpu, i); | 382 | * guest PID, which will also use host PID=0, we must discard the old guest |
| 221 | KVMTRACE_5D(STLB_INVAL, vcpu, i, | 383 | * userspace mappings. */ |
| 222 | stlbe->tid, stlbe->word0, stlbe->word1, | 384 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { |
| 223 | stlbe->word2, handler); | 385 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; |
| 386 | |||
| 387 | if (ref->tid == 0) | ||
| 388 | kvmppc_44x_shadow_release(vcpu_44x, i); | ||
| 224 | } | 389 | } |
| 225 | } | 390 | } |
| 226 | 391 | ||
| 227 | /* Invalidate all mappings on the privilege switch after PID has been changed. | 392 | static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, |
| 228 | * The guest always runs with PID=1, so we must clear the entire TLB when | 393 | const struct kvmppc_44x_tlbe *tlbe) |
| 229 | * switching address spaces. */ | ||
| 230 | void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) | ||
| 231 | { | 394 | { |
| 232 | int i; | 395 | gpa_t gpa; |
| 233 | 396 | ||
| 234 | if (vcpu->arch.swap_pid) { | 397 | if (!get_tlb_v(tlbe)) |
| 235 | /* XXX Replace loop with fancy data structures. */ | 398 | return 0; |
| 236 | for (i = 0; i <= tlb_44x_hwater; i++) { | 399 | |
| 237 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; | 400 | /* Does it match current guest AS? */ |
| 238 | 401 | /* XXX what about IS != DS? */ | |
| 239 | /* Future optimization: clear only userspace mappings. */ | 402 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) |
| 240 | kvmppc_44x_shadow_release(vcpu, i); | 403 | return 0; |
| 241 | stlbe->word0 = 0; | 404 | |
| 242 | kvmppc_tlbe_set_modified(vcpu, i); | 405 | gpa = get_tlb_raddr(tlbe); |
| 243 | KVMTRACE_5D(STLB_INVAL, vcpu, i, | 406 | if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) |
| 244 | stlbe->tid, stlbe->word0, stlbe->word1, | 407 | /* Mapping is not for RAM. */ |
| 245 | stlbe->word2, handler); | 408 | return 0; |
| 246 | } | 409 | |
| 247 | vcpu->arch.swap_pid = 0; | 410 | return 1; |
| 411 | } | ||
| 412 | |||
| 413 | int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | ||
| 414 | { | ||
| 415 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
| 416 | struct kvmppc_44x_tlbe *tlbe; | ||
| 417 | unsigned int gtlb_index; | ||
| 418 | |||
| 419 | gtlb_index = vcpu->arch.gpr[ra]; | ||
| 420 | if (gtlb_index > KVM44x_GUEST_TLB_SIZE) { | ||
| 421 | printk("%s: index %d\n", __func__, gtlb_index); | ||
| 422 | kvmppc_dump_vcpu(vcpu); | ||
| 423 | return EMULATE_FAIL; | ||
| 248 | } | 424 | } |
| 249 | 425 | ||
| 250 | vcpu->arch.shadow_pid = !usermode; | 426 | tlbe = &vcpu_44x->guest_tlb[gtlb_index]; |
| 427 | |||
| 428 | /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */ | ||
| 429 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
| 430 | kvmppc_44x_invalidate(vcpu, gtlb_index); | ||
| 431 | |||
| 432 | switch (ws) { | ||
| 433 | case PPC44x_TLB_PAGEID: | ||
| 434 | tlbe->tid = get_mmucr_stid(vcpu); | ||
| 435 | tlbe->word0 = vcpu->arch.gpr[rs]; | ||
| 436 | break; | ||
| 437 | |||
| 438 | case PPC44x_TLB_XLAT: | ||
| 439 | tlbe->word1 = vcpu->arch.gpr[rs]; | ||
| 440 | break; | ||
| 441 | |||
| 442 | case PPC44x_TLB_ATTRIB: | ||
| 443 | tlbe->word2 = vcpu->arch.gpr[rs]; | ||
| 444 | break; | ||
| 445 | |||
| 446 | default: | ||
| 447 | return EMULATE_FAIL; | ||
| 448 | } | ||
| 449 | |||
| 450 | if (tlbe_is_host_safe(vcpu, tlbe)) { | ||
| 451 | u64 asid; | ||
| 452 | gva_t eaddr; | ||
| 453 | gpa_t gpaddr; | ||
| 454 | u32 flags; | ||
| 455 | u32 bytes; | ||
| 456 | |||
| 457 | eaddr = get_tlb_eaddr(tlbe); | ||
| 458 | gpaddr = get_tlb_raddr(tlbe); | ||
| 459 | |||
| 460 | /* Use the advertised page size to mask effective and real addrs. */ | ||
| 461 | bytes = get_tlb_bytes(tlbe); | ||
| 462 | eaddr &= ~(bytes - 1); | ||
| 463 | gpaddr &= ~(bytes - 1); | ||
| 464 | |||
| 465 | asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; | ||
| 466 | flags = tlbe->word2 & 0xffff; | ||
| 467 | |||
| 468 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes, gtlb_index); | ||
| 469 | } | ||
| 470 | |||
| 471 | KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0, | ||
| 472 | tlbe->word1, tlbe->word2, handler); | ||
| 473 | |||
| 474 | kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); | ||
| 475 | return EMULATE_DONE; | ||
| 476 | } | ||
| 477 | |||
| 478 | int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) | ||
| 479 | { | ||
| 480 | u32 ea; | ||
| 481 | int gtlb_index; | ||
| 482 | unsigned int as = get_mmucr_sts(vcpu); | ||
| 483 | unsigned int pid = get_mmucr_stid(vcpu); | ||
| 484 | |||
| 485 | ea = vcpu->arch.gpr[rb]; | ||
| 486 | if (ra) | ||
| 487 | ea += vcpu->arch.gpr[ra]; | ||
| 488 | |||
| 489 | gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); | ||
| 490 | if (rc) { | ||
| 491 | if (gtlb_index < 0) | ||
| 492 | vcpu->arch.cr &= ~0x20000000; | ||
| 493 | else | ||
| 494 | vcpu->arch.cr |= 0x20000000; | ||
| 495 | } | ||
| 496 | vcpu->arch.gpr[rt] = gtlb_index; | ||
| 497 | |||
| 498 | kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); | ||
| 499 | return EMULATE_DONE; | ||
| 251 | } | 500 | } |
