diff options
| -rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu.c | 1 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 90 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 2 |
3 files changed, 64 insertions, 29 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 29ebe2fd5867..a93d719edc90 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
| @@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
| 235 | gpte->may_read = true; | 235 | gpte->may_read = true; |
| 236 | gpte->may_write = true; | 236 | gpte->may_write = true; |
| 237 | gpte->page_size = MMU_PAGE_4K; | 237 | gpte->page_size = MMU_PAGE_4K; |
| 238 | gpte->wimg = HPTE_R_M; | ||
| 238 | 239 | ||
| 239 | return 0; | 240 | return 0; |
| 240 | } | 241 | } |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 966097232d21..b73dbc9e797d 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
| @@ -65,11 +65,17 @@ struct kvm_resize_hpt { | |||
| 65 | u32 order; | 65 | u32 order; |
| 66 | 66 | ||
| 67 | /* These fields protected by kvm->lock */ | 67 | /* These fields protected by kvm->lock */ |
| 68 | |||
| 69 | /* Possible values and their usage: | ||
| 70 | * <0 an error occurred during allocation, | ||
| 71 | * -EBUSY allocation is in the progress, | ||
| 72 | * 0 allocation made successfuly. | ||
| 73 | */ | ||
| 68 | int error; | 74 | int error; |
| 69 | bool prepare_done; | ||
| 70 | 75 | ||
| 71 | /* Private to the work thread, until prepare_done is true, | 76 | /* Private to the work thread, until error != -EBUSY, |
| 72 | * then protected by kvm->resize_hpt_sem */ | 77 | * then protected by kvm->lock. |
| 78 | */ | ||
| 73 | struct kvm_hpt_info hpt; | 79 | struct kvm_hpt_info hpt; |
| 74 | }; | 80 | }; |
| 75 | 81 | ||
| @@ -159,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) | |||
| 159 | * Reset all the reverse-mapping chains for all memslots | 165 | * Reset all the reverse-mapping chains for all memslots |
| 160 | */ | 166 | */ |
| 161 | kvmppc_rmap_reset(kvm); | 167 | kvmppc_rmap_reset(kvm); |
| 162 | /* Ensure that each vcpu will flush its TLB on next entry. */ | ||
| 163 | cpumask_setall(&kvm->arch.need_tlb_flush); | ||
| 164 | err = 0; | 168 | err = 0; |
| 165 | goto out; | 169 | goto out; |
| 166 | } | 170 | } |
| @@ -176,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) | |||
| 176 | kvmppc_set_hpt(kvm, &info); | 180 | kvmppc_set_hpt(kvm, &info); |
| 177 | 181 | ||
| 178 | out: | 182 | out: |
| 183 | if (err == 0) | ||
| 184 | /* Ensure that each vcpu will flush its TLB on next entry. */ | ||
| 185 | cpumask_setall(&kvm->arch.need_tlb_flush); | ||
| 186 | |||
| 179 | mutex_unlock(&kvm->lock); | 187 | mutex_unlock(&kvm->lock); |
| 180 | return err; | 188 | return err; |
| 181 | } | 189 | } |
| @@ -1413,16 +1421,20 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize) | |||
| 1413 | 1421 | ||
| 1414 | static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) | 1422 | static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) |
| 1415 | { | 1423 | { |
| 1416 | BUG_ON(kvm->arch.resize_hpt != resize); | 1424 | if (WARN_ON(!mutex_is_locked(&kvm->lock))) |
| 1425 | return; | ||
| 1417 | 1426 | ||
| 1418 | if (!resize) | 1427 | if (!resize) |
| 1419 | return; | 1428 | return; |
| 1420 | 1429 | ||
| 1421 | if (resize->hpt.virt) | 1430 | if (resize->error != -EBUSY) { |
| 1422 | kvmppc_free_hpt(&resize->hpt); | 1431 | if (resize->hpt.virt) |
| 1432 | kvmppc_free_hpt(&resize->hpt); | ||
| 1433 | kfree(resize); | ||
| 1434 | } | ||
| 1423 | 1435 | ||
| 1424 | kvm->arch.resize_hpt = NULL; | 1436 | if (kvm->arch.resize_hpt == resize) |
| 1425 | kfree(resize); | 1437 | kvm->arch.resize_hpt = NULL; |
| 1426 | } | 1438 | } |
| 1427 | 1439 | ||
| 1428 | static void resize_hpt_prepare_work(struct work_struct *work) | 1440 | static void resize_hpt_prepare_work(struct work_struct *work) |
| @@ -1431,17 +1443,41 @@ static void resize_hpt_prepare_work(struct work_struct *work) | |||
| 1431 | struct kvm_resize_hpt, | 1443 | struct kvm_resize_hpt, |
| 1432 | work); | 1444 | work); |
| 1433 | struct kvm *kvm = resize->kvm; | 1445 | struct kvm *kvm = resize->kvm; |
| 1434 | int err; | 1446 | int err = 0; |
| 1435 | 1447 | ||
| 1436 | resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", | 1448 | if (WARN_ON(resize->error != -EBUSY)) |
| 1437 | resize->order); | 1449 | return; |
| 1438 | |||
| 1439 | err = resize_hpt_allocate(resize); | ||
| 1440 | 1450 | ||
| 1441 | mutex_lock(&kvm->lock); | 1451 | mutex_lock(&kvm->lock); |
| 1442 | 1452 | ||
| 1453 | /* Request is still current? */ | ||
| 1454 | if (kvm->arch.resize_hpt == resize) { | ||
| 1455 | /* We may request large allocations here: | ||
| 1456 | * do not sleep with kvm->lock held for a while. | ||
| 1457 | */ | ||
| 1458 | mutex_unlock(&kvm->lock); | ||
| 1459 | |||
| 1460 | resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", | ||
| 1461 | resize->order); | ||
| 1462 | |||
| 1463 | err = resize_hpt_allocate(resize); | ||
| 1464 | |||
| 1465 | /* We have strict assumption about -EBUSY | ||
| 1466 | * when preparing for HPT resize. | ||
| 1467 | */ | ||
| 1468 | if (WARN_ON(err == -EBUSY)) | ||
| 1469 | err = -EINPROGRESS; | ||
| 1470 | |||
| 1471 | mutex_lock(&kvm->lock); | ||
| 1472 | /* It is possible that kvm->arch.resize_hpt != resize | ||
| 1473 | * after we grab kvm->lock again. | ||
| 1474 | */ | ||
| 1475 | } | ||
| 1476 | |||
| 1443 | resize->error = err; | 1477 | resize->error = err; |
| 1444 | resize->prepare_done = true; | 1478 | |
| 1479 | if (kvm->arch.resize_hpt != resize) | ||
| 1480 | resize_hpt_release(kvm, resize); | ||
| 1445 | 1481 | ||
| 1446 | mutex_unlock(&kvm->lock); | 1482 | mutex_unlock(&kvm->lock); |
| 1447 | } | 1483 | } |
| @@ -1466,14 +1502,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, | |||
| 1466 | 1502 | ||
| 1467 | if (resize) { | 1503 | if (resize) { |
| 1468 | if (resize->order == shift) { | 1504 | if (resize->order == shift) { |
| 1469 | /* Suitable resize in progress */ | 1505 | /* Suitable resize in progress? */ |
| 1470 | if (resize->prepare_done) { | 1506 | ret = resize->error; |
| 1471 | ret = resize->error; | 1507 | if (ret == -EBUSY) |
| 1472 | if (ret != 0) | ||
| 1473 | resize_hpt_release(kvm, resize); | ||
| 1474 | } else { | ||
| 1475 | ret = 100; /* estimated time in ms */ | 1508 | ret = 100; /* estimated time in ms */ |
| 1476 | } | 1509 | else if (ret) |
| 1510 | resize_hpt_release(kvm, resize); | ||
| 1477 | 1511 | ||
| 1478 | goto out; | 1512 | goto out; |
| 1479 | } | 1513 | } |
| @@ -1493,6 +1527,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, | |||
| 1493 | ret = -ENOMEM; | 1527 | ret = -ENOMEM; |
| 1494 | goto out; | 1528 | goto out; |
| 1495 | } | 1529 | } |
| 1530 | |||
| 1531 | resize->error = -EBUSY; | ||
| 1496 | resize->order = shift; | 1532 | resize->order = shift; |
| 1497 | resize->kvm = kvm; | 1533 | resize->kvm = kvm; |
| 1498 | INIT_WORK(&resize->work, resize_hpt_prepare_work); | 1534 | INIT_WORK(&resize->work, resize_hpt_prepare_work); |
| @@ -1547,16 +1583,12 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, | |||
| 1547 | if (!resize || (resize->order != shift)) | 1583 | if (!resize || (resize->order != shift)) |
| 1548 | goto out; | 1584 | goto out; |
| 1549 | 1585 | ||
| 1550 | ret = -EBUSY; | ||
| 1551 | if (!resize->prepare_done) | ||
| 1552 | goto out; | ||
| 1553 | |||
| 1554 | ret = resize->error; | 1586 | ret = resize->error; |
| 1555 | if (ret != 0) | 1587 | if (ret) |
| 1556 | goto out; | 1588 | goto out; |
| 1557 | 1589 | ||
| 1558 | ret = resize_hpt_rehash(resize); | 1590 | ret = resize_hpt_rehash(resize); |
| 1559 | if (ret != 0) | 1591 | if (ret) |
| 1560 | goto out; | 1592 | goto out; |
| 1561 | 1593 | ||
| 1562 | resize_hpt_pivot(resize); | 1594 | resize_hpt_pivot(resize); |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index d0dc8624198f..7deaeeb14b93 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
| @@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); | |||
| 60 | #define MSR_USER32 MSR_USER | 60 | #define MSR_USER32 MSR_USER |
| 61 | #define MSR_USER64 MSR_USER | 61 | #define MSR_USER64 MSR_USER |
| 62 | #define HW_PAGE_SIZE PAGE_SIZE | 62 | #define HW_PAGE_SIZE PAGE_SIZE |
| 63 | #define HPTE_R_M _PAGE_COHERENT | ||
| 63 | #endif | 64 | #endif |
| 64 | 65 | ||
| 65 | static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) | 66 | static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) |
| @@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 557 | pte.eaddr = eaddr; | 558 | pte.eaddr = eaddr; |
| 558 | pte.vpage = eaddr >> 12; | 559 | pte.vpage = eaddr >> 12; |
| 559 | pte.page_size = MMU_PAGE_64K; | 560 | pte.page_size = MMU_PAGE_64K; |
| 561 | pte.wimg = HPTE_R_M; | ||
| 560 | } | 562 | } |
| 561 | 563 | ||
| 562 | switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { | 564 | switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { |
