aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2012-09-11 09:27:46 -0400
committerAlexander Graf <agraf@suse.de>2012-10-05 17:38:51 -0400
commita66b48c3a39fa1c4223d4f847fdc7a04ed1618de (patch)
tree2ee5d3e474001b19053b9f905ad4f4fd26c74551 /arch/powerpc/kvm/book3s_hv.c
parent2c9097e4c1340208ef93371abd4b3bd7e989381b (diff)
KVM: PPC: Move kvm->arch.slot_phys into memslot.arch
Now that we have an architecture-specific field in the kvm_memory_slot structure, we can use it to store the array of page physical addresses that we need for Book3S HV KVM on PPC970 processors. This reduces the size of struct kvm_arch for Book3S HV, and also reduces the size of struct kvm_arch_memory_slot for other PPC KVM variants since the fields in it are now only compiled in for Book3S HV. This necessitates making the kvm_arch_create_memslot and kvm_arch_free_memslot operations specific to each PPC KVM variant. That in turn means that we now don't allocate the rmap arrays on Book3S PR and Book E. Since we now unpin pages and free the slot_phys array in kvmppc_core_free_memslot, we no longer need to do it in kvmppc_core_destroy_vm, since the generic code takes care to free all the memslots when destroying a VM. We now need the new memslot to be passed in to kvmppc_core_prepare_memory_region, since we need to initialize its arch.slot_phys member on Book3S HV. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c104
1 files changed, 61 insertions, 43 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 48b0d4a73b9d..817837de7362 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1314,48 +1314,67 @@ static unsigned long slb_pgsize_encoding(unsigned long psize)
1314 return senc; 1314 return senc;
1315} 1315}
1316 1316
1317int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1317static void unpin_slot(struct kvm_memory_slot *memslot)
1318 struct kvm_userspace_memory_region *mem)
1319{ 1318{
1320 unsigned long npages; 1319 unsigned long *physp;
1321 unsigned long *phys; 1320 unsigned long j, npages, pfn;
1321 struct page *page;
1322 1322
1323 /* Allocate a slot_phys array */ 1323 physp = memslot->arch.slot_phys;
1324 phys = kvm->arch.slot_phys[mem->slot]; 1324 npages = memslot->npages;
1325 if (!kvm->arch.using_mmu_notifiers && !phys) { 1325 if (!physp)
1326 npages = mem->memory_size >> PAGE_SHIFT; 1326 return;
1327 phys = vzalloc(npages * sizeof(unsigned long)); 1327 for (j = 0; j < npages; j++) {
1328 if (!phys) 1328 if (!(physp[j] & KVMPPC_GOT_PAGE))
1329 return -ENOMEM; 1329 continue;
1330 kvm->arch.slot_phys[mem->slot] = phys; 1330 pfn = physp[j] >> PAGE_SHIFT;
1331 kvm->arch.slot_npages[mem->slot] = npages; 1331 page = pfn_to_page(pfn);
1332 SetPageDirty(page);
1333 put_page(page);
1334 }
1335}
1336
1337void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1338 struct kvm_memory_slot *dont)
1339{
1340 if (!dont || free->arch.rmap != dont->arch.rmap) {
1341 vfree(free->arch.rmap);
1342 free->arch.rmap = NULL;
1332 } 1343 }
1344 if (!dont || free->arch.slot_phys != dont->arch.slot_phys) {
1345 unpin_slot(free);
1346 vfree(free->arch.slot_phys);
1347 free->arch.slot_phys = NULL;
1348 }
1349}
1350
1351int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1352 unsigned long npages)
1353{
1354 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
1355 if (!slot->arch.rmap)
1356 return -ENOMEM;
1357 slot->arch.slot_phys = NULL;
1333 1358
1334 return 0; 1359 return 0;
1335} 1360}
1336 1361
1337static void unpin_slot(struct kvm *kvm, int slot_id) 1362int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1363 struct kvm_memory_slot *memslot,
1364 struct kvm_userspace_memory_region *mem)
1338{ 1365{
1339 unsigned long *physp; 1366 unsigned long *phys;
1340 unsigned long j, npages, pfn;
1341 struct page *page;
1342 1367
1343 physp = kvm->arch.slot_phys[slot_id]; 1368 /* Allocate a slot_phys array if needed */
1344 npages = kvm->arch.slot_npages[slot_id]; 1369 phys = memslot->arch.slot_phys;
1345 if (physp) { 1370 if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) {
1346 spin_lock(&kvm->arch.slot_phys_lock); 1371 phys = vzalloc(memslot->npages * sizeof(unsigned long));
1347 for (j = 0; j < npages; j++) { 1372 if (!phys)
1348 if (!(physp[j] & KVMPPC_GOT_PAGE)) 1373 return -ENOMEM;
1349 continue; 1374 memslot->arch.slot_phys = phys;
1350 pfn = physp[j] >> PAGE_SHIFT;
1351 page = pfn_to_page(pfn);
1352 SetPageDirty(page);
1353 put_page(page);
1354 }
1355 kvm->arch.slot_phys[slot_id] = NULL;
1356 spin_unlock(&kvm->arch.slot_phys_lock);
1357 vfree(physp);
1358 } 1375 }
1376
1377 return 0;
1359} 1378}
1360 1379
1361void kvmppc_core_commit_memory_region(struct kvm *kvm, 1380void kvmppc_core_commit_memory_region(struct kvm *kvm,
@@ -1482,11 +1501,16 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1482 /* Initialize phys addrs of pages in RMO */ 1501 /* Initialize phys addrs of pages in RMO */
1483 npages = ri->npages; 1502 npages = ri->npages;
1484 porder = __ilog2(npages); 1503 porder = __ilog2(npages);
1485 physp = kvm->arch.slot_phys[memslot->id]; 1504 physp = memslot->arch.slot_phys;
1486 spin_lock(&kvm->arch.slot_phys_lock); 1505 if (physp) {
1487 for (i = 0; i < npages; ++i) 1506 if (npages > memslot->npages)
1488 physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder; 1507 npages = memslot->npages;
1489 spin_unlock(&kvm->arch.slot_phys_lock); 1508 spin_lock(&kvm->arch.slot_phys_lock);
1509 for (i = 0; i < npages; ++i)
1510 physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) +
1511 porder;
1512 spin_unlock(&kvm->arch.slot_phys_lock);
1513 }
1490 } 1514 }
1491 1515
1492 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ 1516 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
@@ -1547,12 +1571,6 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1547 1571
1548void kvmppc_core_destroy_vm(struct kvm *kvm) 1572void kvmppc_core_destroy_vm(struct kvm *kvm)
1549{ 1573{
1550 unsigned long i;
1551
1552 if (!kvm->arch.using_mmu_notifiers)
1553 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
1554 unpin_slot(kvm, i);
1555
1556 if (kvm->arch.rma) { 1574 if (kvm->arch.rma) {
1557 kvm_release_rma(kvm->arch.rma); 1575 kvm_release_rma(kvm->arch.rma);
1558 kvm->arch.rma = NULL; 1576 kvm->arch.rma = NULL;