diff options
Diffstat (limited to 'arch/powerpc/kvm/44x_tlb.c')
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 53 |
1 files changed, 36 insertions, 17 deletions
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 5a5602da5091..2e227a412bc2 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <linux/string.h> | 21 | #include <linux/string.h> |
22 | #include <linux/kvm.h> | ||
22 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
23 | #include <linux/highmem.h> | 24 | #include <linux/highmem.h> |
24 | #include <asm/mmu-44x.h> | 25 | #include <asm/mmu-44x.h> |
@@ -109,7 +110,6 @@ static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe) | |||
109 | return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW); | 110 | return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW); |
110 | } | 111 | } |
111 | 112 | ||
112 | /* Must be called with mmap_sem locked for writing. */ | ||
113 | static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, | 113 | static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, |
114 | unsigned int index) | 114 | unsigned int index) |
115 | { | 115 | { |
@@ -124,6 +124,11 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, | |||
124 | } | 124 | } |
125 | } | 125 | } |
126 | 126 | ||
127 | void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) | ||
128 | { | ||
129 | vcpu->arch.shadow_tlb_mod[i] = 1; | ||
130 | } | ||
131 | |||
127 | /* Caller must ensure that the specified guest TLB entry is safe to insert into | 132 | /* Caller must ensure that the specified guest TLB entry is safe to insert into |
128 | * the shadow TLB. */ | 133 | * the shadow TLB. */ |
129 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | 134 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, |
@@ -142,19 +147,16 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | |||
142 | stlbe = &vcpu->arch.shadow_tlb[victim]; | 147 | stlbe = &vcpu->arch.shadow_tlb[victim]; |
143 | 148 | ||
144 | /* Get reference to new page. */ | 149 | /* Get reference to new page. */ |
145 | down_read(¤t->mm->mmap_sem); | ||
146 | new_page = gfn_to_page(vcpu->kvm, gfn); | 150 | new_page = gfn_to_page(vcpu->kvm, gfn); |
147 | if (is_error_page(new_page)) { | 151 | if (is_error_page(new_page)) { |
148 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); | 152 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); |
149 | kvm_release_page_clean(new_page); | 153 | kvm_release_page_clean(new_page); |
150 | up_read(¤t->mm->mmap_sem); | ||
151 | return; | 154 | return; |
152 | } | 155 | } |
153 | hpaddr = page_to_phys(new_page); | 156 | hpaddr = page_to_phys(new_page); |
154 | 157 | ||
155 | /* Drop reference to old page. */ | 158 | /* Drop reference to old page. */ |
156 | kvmppc_44x_shadow_release(vcpu, victim); | 159 | kvmppc_44x_shadow_release(vcpu, victim); |
157 | up_read(¤t->mm->mmap_sem); | ||
158 | 160 | ||
159 | vcpu->arch.shadow_pages[victim] = new_page; | 161 | vcpu->arch.shadow_pages[victim] = new_page; |
160 | 162 | ||
@@ -164,27 +166,30 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | |||
164 | 166 | ||
165 | /* XXX what about AS? */ | 167 | /* XXX what about AS? */ |
166 | 168 | ||
167 | stlbe->tid = asid & 0xff; | 169 | stlbe->tid = !(asid & 0xff); |
168 | 170 | ||
169 | /* Force TS=1 for all guest mappings. */ | 171 | /* Force TS=1 for all guest mappings. */ |
170 | /* For now we hardcode 4KB mappings, but it will be important to | 172 | /* For now we hardcode 4KB mappings, but it will be important to |
171 | * use host large pages in the future. */ | 173 | * use host large pages in the future. */ |
172 | stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS | 174 | stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS |
173 | | PPC44x_TLB_4K; | 175 | | PPC44x_TLB_4K; |
174 | |||
175 | stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); | 176 | stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); |
176 | stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags, | 177 | stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags, |
177 | vcpu->arch.msr & MSR_PR); | 178 | vcpu->arch.msr & MSR_PR); |
179 | kvmppc_tlbe_set_modified(vcpu, victim); | ||
180 | |||
181 | KVMTRACE_5D(STLB_WRITE, vcpu, victim, | ||
182 | stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2, | ||
183 | handler); | ||
178 | } | 184 | } |
179 | 185 | ||
180 | void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, | 186 | void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, |
181 | gva_t eend, u32 asid) | 187 | gva_t eend, u32 asid) |
182 | { | 188 | { |
183 | unsigned int pid = asid & 0xff; | 189 | unsigned int pid = !(asid & 0xff); |
184 | int i; | 190 | int i; |
185 | 191 | ||
186 | /* XXX Replace loop with fancy data structures. */ | 192 | /* XXX Replace loop with fancy data structures. */ |
187 | down_write(¤t->mm->mmap_sem); | ||
188 | for (i = 0; i <= tlb_44x_hwater; i++) { | 193 | for (i = 0; i <= tlb_44x_hwater; i++) { |
189 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; | 194 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; |
190 | unsigned int tid; | 195 | unsigned int tid; |
@@ -204,21 +209,35 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
204 | 209 | ||
205 | kvmppc_44x_shadow_release(vcpu, i); | 210 | kvmppc_44x_shadow_release(vcpu, i); |
206 | stlbe->word0 = 0; | 211 | stlbe->word0 = 0; |
212 | kvmppc_tlbe_set_modified(vcpu, i); | ||
213 | KVMTRACE_5D(STLB_INVAL, vcpu, i, | ||
214 | stlbe->tid, stlbe->word0, stlbe->word1, | ||
215 | stlbe->word2, handler); | ||
207 | } | 216 | } |
208 | up_write(¤t->mm->mmap_sem); | ||
209 | } | 217 | } |
210 | 218 | ||
211 | /* Invalidate all mappings, so that when they fault back in they will get the | 219 | /* Invalidate all mappings on the privilege switch after PID has been changed. |
212 | * proper permission bits. */ | 220 | * The guest always runs with PID=1, so we must clear the entire TLB when |
221 | * switching address spaces. */ | ||
213 | void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) | 222 | void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) |
214 | { | 223 | { |
215 | int i; | 224 | int i; |
216 | 225 | ||
217 | /* XXX Replace loop with fancy data structures. */ | 226 | if (vcpu->arch.swap_pid) { |
218 | down_write(¤t->mm->mmap_sem); | 227 | /* XXX Replace loop with fancy data structures. */ |
219 | for (i = 0; i <= tlb_44x_hwater; i++) { | 228 | for (i = 0; i <= tlb_44x_hwater; i++) { |
220 | kvmppc_44x_shadow_release(vcpu, i); | 229 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; |
221 | vcpu->arch.shadow_tlb[i].word0 = 0; | 230 | |
231 | /* Future optimization: clear only userspace mappings. */ | ||
232 | kvmppc_44x_shadow_release(vcpu, i); | ||
233 | stlbe->word0 = 0; | ||
234 | kvmppc_tlbe_set_modified(vcpu, i); | ||
235 | KVMTRACE_5D(STLB_INVAL, vcpu, i, | ||
236 | stlbe->tid, stlbe->word0, stlbe->word1, | ||
237 | stlbe->word2, handler); | ||
238 | } | ||
239 | vcpu->arch.swap_pid = 0; | ||
222 | } | 240 | } |
223 | up_write(¤t->mm->mmap_sem); | 241 | |
242 | vcpu->arch.shadow_pid = !usermode; | ||
224 | } | 243 | } |