aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/kvm_host.h9
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h5
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv.c104
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c2
-rw-r--r--arch/powerpc/kvm/book3s_pr.c12
-rw-r--r--arch/powerpc/kvm/booke.c12
-rw-r--r--arch/powerpc/kvm/powerpc.c13
8 files changed, 102 insertions, 61 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index f20a5ef1c7e8..68f5a308737a 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -204,7 +204,7 @@ struct revmap_entry {
204}; 204};
205 205
206/* 206/*
207 * We use the top bit of each memslot->rmap entry as a lock bit, 207 * We use the top bit of each memslot->arch.rmap entry as a lock bit,
208 * and bit 32 as a present flag. The bottom 32 bits are the 208 * and bit 32 as a present flag. The bottom 32 bits are the
209 * index in the guest HPT of a HPTE that points to the page. 209 * index in the guest HPT of a HPTE that points to the page.
210 */ 210 */
@@ -215,14 +215,17 @@ struct revmap_entry {
215#define KVMPPC_RMAP_PRESENT 0x100000000ul 215#define KVMPPC_RMAP_PRESENT 0x100000000ul
216#define KVMPPC_RMAP_INDEX 0xfffffffful 216#define KVMPPC_RMAP_INDEX 0xfffffffful
217 217
218/* Low-order bits in kvm->arch.slot_phys[][] */ 218/* Low-order bits in memslot->arch.slot_phys[] */
219#define KVMPPC_PAGE_ORDER_MASK 0x1f 219#define KVMPPC_PAGE_ORDER_MASK 0x1f
220#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */ 220#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
221#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */ 221#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
222#define KVMPPC_GOT_PAGE 0x80 222#define KVMPPC_GOT_PAGE 0x80
223 223
224struct kvm_arch_memory_slot { 224struct kvm_arch_memory_slot {
225#ifdef CONFIG_KVM_BOOK3S_64_HV
225 unsigned long *rmap; 226 unsigned long *rmap;
227 unsigned long *slot_phys;
228#endif /* CONFIG_KVM_BOOK3S_64_HV */
226}; 229};
227 230
228struct kvm_arch { 231struct kvm_arch {
@@ -246,8 +249,6 @@ struct kvm_arch {
246 unsigned long hpt_npte; 249 unsigned long hpt_npte;
247 unsigned long hpt_mask; 250 unsigned long hpt_mask;
248 spinlock_t slot_phys_lock; 251 spinlock_t slot_phys_lock;
249 unsigned long *slot_phys[KVM_MEM_SLOTS_NUM];
250 int slot_npages[KVM_MEM_SLOTS_NUM];
251 unsigned short last_vcpu[NR_CPUS]; 252 unsigned short last_vcpu[NR_CPUS];
252 struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; 253 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
253 struct kvmppc_linear_info *hpt_li; 254 struct kvmppc_linear_info *hpt_li;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index c06a64b53362..41a00eae68c7 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -143,7 +143,12 @@ extern struct kvmppc_linear_info *kvm_alloc_hpt(void);
143extern void kvm_release_hpt(struct kvmppc_linear_info *li); 143extern void kvm_release_hpt(struct kvmppc_linear_info *li);
144extern int kvmppc_core_init_vm(struct kvm *kvm); 144extern int kvmppc_core_init_vm(struct kvm *kvm);
145extern void kvmppc_core_destroy_vm(struct kvm *kvm); 145extern void kvmppc_core_destroy_vm(struct kvm *kvm);
146extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
147 struct kvm_memory_slot *dont);
148extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
149 unsigned long npages);
146extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, 150extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
151 struct kvm_memory_slot *memslot,
147 struct kvm_userspace_memory_region *mem); 152 struct kvm_userspace_memory_region *mem);
148extern void kvmppc_core_commit_memory_region(struct kvm *kvm, 153extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
149 struct kvm_userspace_memory_region *mem); 154 struct kvm_userspace_memory_region *mem);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 0f031c07f7e5..a389cc62b16c 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -261,7 +261,7 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
261 261
262/* 262/*
263 * This is called to get a reference to a guest page if there isn't 263 * This is called to get a reference to a guest page if there isn't
264 * one already in the kvm->arch.slot_phys[][] arrays. 264 * one already in the memslot->arch.slot_phys[] array.
265 */ 265 */
266static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, 266static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
267 struct kvm_memory_slot *memslot, 267 struct kvm_memory_slot *memslot,
@@ -276,7 +276,7 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
276 struct vm_area_struct *vma; 276 struct vm_area_struct *vma;
277 unsigned long pfn, i, npages; 277 unsigned long pfn, i, npages;
278 278
279 physp = kvm->arch.slot_phys[memslot->id]; 279 physp = memslot->arch.slot_phys;
280 if (!physp) 280 if (!physp)
281 return -EINVAL; 281 return -EINVAL;
282 if (physp[gfn - memslot->base_gfn]) 282 if (physp[gfn - memslot->base_gfn])
@@ -1065,7 +1065,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
1065 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) 1065 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
1066 goto err; 1066 goto err;
1067 if (!kvm->arch.using_mmu_notifiers) { 1067 if (!kvm->arch.using_mmu_notifiers) {
1068 physp = kvm->arch.slot_phys[memslot->id]; 1068 physp = memslot->arch.slot_phys;
1069 if (!physp) 1069 if (!physp)
1070 goto err; 1070 goto err;
1071 physp += gfn - memslot->base_gfn; 1071 physp += gfn - memslot->base_gfn;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 48b0d4a73b9d..817837de7362 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1314,48 +1314,67 @@ static unsigned long slb_pgsize_encoding(unsigned long psize)
1314 return senc; 1314 return senc;
1315} 1315}
1316 1316
1317int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1317static void unpin_slot(struct kvm_memory_slot *memslot)
1318 struct kvm_userspace_memory_region *mem)
1319{ 1318{
1320 unsigned long npages; 1319 unsigned long *physp;
1321 unsigned long *phys; 1320 unsigned long j, npages, pfn;
1321 struct page *page;
1322 1322
1323 /* Allocate a slot_phys array */ 1323 physp = memslot->arch.slot_phys;
1324 phys = kvm->arch.slot_phys[mem->slot]; 1324 npages = memslot->npages;
1325 if (!kvm->arch.using_mmu_notifiers && !phys) { 1325 if (!physp)
1326 npages = mem->memory_size >> PAGE_SHIFT; 1326 return;
1327 phys = vzalloc(npages * sizeof(unsigned long)); 1327 for (j = 0; j < npages; j++) {
1328 if (!phys) 1328 if (!(physp[j] & KVMPPC_GOT_PAGE))
1329 return -ENOMEM; 1329 continue;
1330 kvm->arch.slot_phys[mem->slot] = phys; 1330 pfn = physp[j] >> PAGE_SHIFT;
1331 kvm->arch.slot_npages[mem->slot] = npages; 1331 page = pfn_to_page(pfn);
1332 SetPageDirty(page);
1333 put_page(page);
1334 }
1335}
1336
1337void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1338 struct kvm_memory_slot *dont)
1339{
1340 if (!dont || free->arch.rmap != dont->arch.rmap) {
1341 vfree(free->arch.rmap);
1342 free->arch.rmap = NULL;
1332 } 1343 }
1344 if (!dont || free->arch.slot_phys != dont->arch.slot_phys) {
1345 unpin_slot(free);
1346 vfree(free->arch.slot_phys);
1347 free->arch.slot_phys = NULL;
1348 }
1349}
1350
1351int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1352 unsigned long npages)
1353{
1354 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
1355 if (!slot->arch.rmap)
1356 return -ENOMEM;
1357 slot->arch.slot_phys = NULL;
1333 1358
1334 return 0; 1359 return 0;
1335} 1360}
1336 1361
1337static void unpin_slot(struct kvm *kvm, int slot_id) 1362int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1363 struct kvm_memory_slot *memslot,
1364 struct kvm_userspace_memory_region *mem)
1338{ 1365{
1339 unsigned long *physp; 1366 unsigned long *phys;
1340 unsigned long j, npages, pfn;
1341 struct page *page;
1342 1367
1343 physp = kvm->arch.slot_phys[slot_id]; 1368 /* Allocate a slot_phys array if needed */
1344 npages = kvm->arch.slot_npages[slot_id]; 1369 phys = memslot->arch.slot_phys;
1345 if (physp) { 1370 if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) {
1346 spin_lock(&kvm->arch.slot_phys_lock); 1371 phys = vzalloc(memslot->npages * sizeof(unsigned long));
1347 for (j = 0; j < npages; j++) { 1372 if (!phys)
1348 if (!(physp[j] & KVMPPC_GOT_PAGE)) 1373 return -ENOMEM;
1349 continue; 1374 memslot->arch.slot_phys = phys;
1350 pfn = physp[j] >> PAGE_SHIFT;
1351 page = pfn_to_page(pfn);
1352 SetPageDirty(page);
1353 put_page(page);
1354 }
1355 kvm->arch.slot_phys[slot_id] = NULL;
1356 spin_unlock(&kvm->arch.slot_phys_lock);
1357 vfree(physp);
1358 } 1375 }
1376
1377 return 0;
1359} 1378}
1360 1379
1361void kvmppc_core_commit_memory_region(struct kvm *kvm, 1380void kvmppc_core_commit_memory_region(struct kvm *kvm,
@@ -1482,11 +1501,16 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1482 /* Initialize phys addrs of pages in RMO */ 1501 /* Initialize phys addrs of pages in RMO */
1483 npages = ri->npages; 1502 npages = ri->npages;
1484 porder = __ilog2(npages); 1503 porder = __ilog2(npages);
1485 physp = kvm->arch.slot_phys[memslot->id]; 1504 physp = memslot->arch.slot_phys;
1486 spin_lock(&kvm->arch.slot_phys_lock); 1505 if (physp) {
1487 for (i = 0; i < npages; ++i) 1506 if (npages > memslot->npages)
1488 physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder; 1507 npages = memslot->npages;
1489 spin_unlock(&kvm->arch.slot_phys_lock); 1508 spin_lock(&kvm->arch.slot_phys_lock);
1509 for (i = 0; i < npages; ++i)
1510 physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) +
1511 porder;
1512 spin_unlock(&kvm->arch.slot_phys_lock);
1513 }
1490 } 1514 }
1491 1515
1492 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ 1516 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
@@ -1547,12 +1571,6 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1547 1571
1548void kvmppc_core_destroy_vm(struct kvm *kvm) 1572void kvmppc_core_destroy_vm(struct kvm *kvm)
1549{ 1573{
1550 unsigned long i;
1551
1552 if (!kvm->arch.using_mmu_notifiers)
1553 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
1554 unpin_slot(kvm, i);
1555
1556 if (kvm->arch.rma) { 1574 if (kvm->arch.rma) {
1557 kvm_release_rma(kvm->arch.rma); 1575 kvm_release_rma(kvm->arch.rma);
1558 kvm->arch.rma = NULL; 1576 kvm->arch.rma = NULL;
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index fb0e821622d4..63eb94e63cc3 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -183,7 +183,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
183 rmap = &memslot->arch.rmap[slot_fn]; 183 rmap = &memslot->arch.rmap[slot_fn];
184 184
185 if (!kvm->arch.using_mmu_notifiers) { 185 if (!kvm->arch.using_mmu_notifiers) {
186 physp = kvm->arch.slot_phys[memslot->id]; 186 physp = memslot->arch.slot_phys;
187 if (!physp) 187 if (!physp)
188 return H_PARAMETER; 188 return H_PARAMETER;
189 physp += slot_fn; 189 physp += slot_fn;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index b3c584f94cb3..fdadc9e57da2 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1220,7 +1220,19 @@ int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
1220} 1220}
1221#endif /* CONFIG_PPC64 */ 1221#endif /* CONFIG_PPC64 */
1222 1222
1223void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1224 struct kvm_memory_slot *dont)
1225{
1226}
1227
1228int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1229 unsigned long npages)
1230{
1231 return 0;
1232}
1233
1223int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1234int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1235 struct kvm_memory_slot *memslot,
1224 struct kvm_userspace_memory_region *mem) 1236 struct kvm_userspace_memory_region *mem)
1225{ 1237{
1226 return 0; 1238 return 0;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 5f0476a602d8..514405752988 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1438,7 +1438,19 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1438 return -ENOTSUPP; 1438 return -ENOTSUPP;
1439} 1439}
1440 1440
1441void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1442 struct kvm_memory_slot *dont)
1443{
1444}
1445
1446int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1447 unsigned long npages)
1448{
1449 return 0;
1450}
1451
1441int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1452int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1453 struct kvm_memory_slot *memslot,
1442 struct kvm_userspace_memory_region *mem) 1454 struct kvm_userspace_memory_region *mem)
1443{ 1455{
1444 return 0; 1456 return 0;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 0ffd7d17adc7..33122dd89da9 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -389,19 +389,12 @@ long kvm_arch_dev_ioctl(struct file *filp,
389void kvm_arch_free_memslot(struct kvm_memory_slot *free, 389void kvm_arch_free_memslot(struct kvm_memory_slot *free,
390 struct kvm_memory_slot *dont) 390 struct kvm_memory_slot *dont)
391{ 391{
392 if (!dont || free->arch.rmap != dont->arch.rmap) { 392 kvmppc_core_free_memslot(free, dont);
393 vfree(free->arch.rmap);
394 free->arch.rmap = NULL;
395 }
396} 393}
397 394
398int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 395int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
399{ 396{
400 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); 397 return kvmppc_core_create_memslot(slot, npages);
401 if (!slot->arch.rmap)
402 return -ENOMEM;
403
404 return 0;
405} 398}
406 399
407int kvm_arch_prepare_memory_region(struct kvm *kvm, 400int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -410,7 +403,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
410 struct kvm_userspace_memory_region *mem, 403 struct kvm_userspace_memory_region *mem,
411 int user_alloc) 404 int user_alloc)
412{ 405{
413 return kvmppc_core_prepare_memory_region(kvm, mem); 406 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
414} 407}
415 408
416void kvm_arch_commit_memory_region(struct kvm *kvm, 409void kvm_arch_commit_memory_region(struct kvm *kvm,