aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_64_mmu_hv.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-12-12 07:32:27 -0500
committerAvi Kivity <avi@redhat.com>2012-03-05 07:52:37 -0500
commit9d0ef5ea043d1242897d15c71bd1a15da79b4a5d (patch)
tree2847b3bd444b999f3c2a32d4cbb220d0e788e93a /arch/powerpc/kvm/book3s_64_mmu_hv.c
parentda9d1d7f2875cc8c1ffbce8f3501d0b33f4e7a4d (diff)
KVM: PPC: Allow I/O mappings in memory slots
This provides for the case where userspace maps an I/O device into the address range of a memory slot using a VM_PFNMAP mapping. In that case, we work out the pfn from vma->vm_pgoff, and record the cache enable bits from vma->vm_page_prot in two low-order bits in the slot_phys array entries. Then, in kvmppc_h_enter() we check that the cache bits in the HPTE that the guest wants to insert match the cache bits in the slot_phys array entry. However, we do allow the guest to create what it thinks is a non-cacheable or write-through mapping to memory that is actually cacheable, so that we can use normal system memory as part of an emulated device later on. In that case the actual HPTE we insert is a cacheable HPTE. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c65
1 files changed, 44 insertions, 21 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index cc18f3d67a57..b904c40a17bc 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -199,7 +199,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
199 struct page *page, *hpage, *pages[1]; 199 struct page *page, *hpage, *pages[1];
200 unsigned long s, pgsize; 200 unsigned long s, pgsize;
201 unsigned long *physp; 201 unsigned long *physp;
202 unsigned int got, pgorder; 202 unsigned int is_io, got, pgorder;
203 struct vm_area_struct *vma;
203 unsigned long pfn, i, npages; 204 unsigned long pfn, i, npages;
204 205
205 physp = kvm->arch.slot_phys[memslot->id]; 206 physp = kvm->arch.slot_phys[memslot->id];
@@ -208,34 +209,51 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
208 if (physp[gfn - memslot->base_gfn]) 209 if (physp[gfn - memslot->base_gfn])
209 return 0; 210 return 0;
210 211
212 is_io = 0;
213 got = 0;
211 page = NULL; 214 page = NULL;
212 pgsize = psize; 215 pgsize = psize;
216 err = -EINVAL;
213 start = gfn_to_hva_memslot(memslot, gfn); 217 start = gfn_to_hva_memslot(memslot, gfn);
214 218
215 /* Instantiate and get the page we want access to */ 219 /* Instantiate and get the page we want access to */
216 np = get_user_pages_fast(start, 1, 1, pages); 220 np = get_user_pages_fast(start, 1, 1, pages);
217 if (np != 1) 221 if (np != 1) {
218 return -EINVAL; 222 /* Look up the vma for the page */
219 page = pages[0]; 223 down_read(&current->mm->mmap_sem);
220 got = KVMPPC_GOT_PAGE; 224 vma = find_vma(current->mm, start);
225 if (!vma || vma->vm_start > start ||
226 start + psize > vma->vm_end ||
227 !(vma->vm_flags & VM_PFNMAP))
228 goto up_err;
229 is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
230 pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
231 /* check alignment of pfn vs. requested page size */
232 if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
233 goto up_err;
234 up_read(&current->mm->mmap_sem);
221 235
222 /* See if this is a large page */ 236 } else {
223 s = PAGE_SIZE; 237 page = pages[0];
224 if (PageHuge(page)) { 238 got = KVMPPC_GOT_PAGE;
225 hpage = compound_head(page); 239
226 s <<= compound_order(hpage); 240 /* See if this is a large page */
227 /* Get the whole large page if slot alignment is ok */ 241 s = PAGE_SIZE;
228 if (s > psize && slot_is_aligned(memslot, s) && 242 if (PageHuge(page)) {
229 !(memslot->userspace_addr & (s - 1))) { 243 hpage = compound_head(page);
230 start &= ~(s - 1); 244 s <<= compound_order(hpage);
231 pgsize = s; 245 /* Get the whole large page if slot alignment is ok */
232 page = hpage; 246 if (s > psize && slot_is_aligned(memslot, s) &&
247 !(memslot->userspace_addr & (s - 1))) {
248 start &= ~(s - 1);
249 pgsize = s;
250 page = hpage;
251 }
233 } 252 }
253 if (s < psize)
254 goto out;
255 pfn = page_to_pfn(page);
234 } 256 }
235 err = -EINVAL;
236 if (s < psize)
237 goto out;
238 pfn = page_to_pfn(page);
239 257
240 npages = pgsize >> PAGE_SHIFT; 258 npages = pgsize >> PAGE_SHIFT;
241 pgorder = __ilog2(npages); 259 pgorder = __ilog2(npages);
@@ -243,7 +261,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
243 spin_lock(&kvm->arch.slot_phys_lock); 261 spin_lock(&kvm->arch.slot_phys_lock);
244 for (i = 0; i < npages; ++i) { 262 for (i = 0; i < npages; ++i) {
245 if (!physp[i]) { 263 if (!physp[i]) {
246 physp[i] = ((pfn + i) << PAGE_SHIFT) + got + pgorder; 264 physp[i] = ((pfn + i) << PAGE_SHIFT) +
265 got + is_io + pgorder;
247 got = 0; 266 got = 0;
248 } 267 }
249 } 268 }
@@ -257,6 +276,10 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
257 put_page(page); 276 put_page(page);
258 } 277 }
259 return err; 278 return err;
279
280 up_err:
281 up_read(&current->mm->mmap_sem);
282 return err;
260} 283}
261 284
262/* 285/*