diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2015-03-20 05:39:42 -0400 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2015-04-21 09:21:29 -0400 |
commit | 31037ecad275e9ad9bc671c34f72b495cf708ca3 (patch) | |
tree | 211cc1252b34b398e7cab789546828ddb55f05e2 /arch | |
parent | e928e9cb3601ce240189bfea05b67ebd391c85ae (diff) |
KVM: PPC: Book3S HV: Remove RMA-related variables from code
We don't support real-mode areas now that 970 support is removed.
Remove the remaining details of rma from the code. Also rename
rma_setup_done to hpte_setup_done to better reflect the changes.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 28 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 10 |
3 files changed, 20 insertions, 21 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 8ef05121d3cd..015773f5bb33 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -228,9 +228,8 @@ struct kvm_arch { | |||
228 | int tlbie_lock; | 228 | int tlbie_lock; |
229 | unsigned long lpcr; | 229 | unsigned long lpcr; |
230 | unsigned long rmor; | 230 | unsigned long rmor; |
231 | struct kvm_rma_info *rma; | ||
232 | unsigned long vrma_slb_v; | 231 | unsigned long vrma_slb_v; |
233 | int rma_setup_done; | 232 | int hpte_setup_done; |
234 | u32 hpt_order; | 233 | u32 hpt_order; |
235 | atomic_t vcpus_running; | 234 | atomic_t vcpus_running; |
236 | u32 online_vcores; | 235 | u32 online_vcores; |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 534acb3c6c3d..dbf127168ca4 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -116,12 +116,12 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp) | |||
116 | long order; | 116 | long order; |
117 | 117 | ||
118 | mutex_lock(&kvm->lock); | 118 | mutex_lock(&kvm->lock); |
119 | if (kvm->arch.rma_setup_done) { | 119 | if (kvm->arch.hpte_setup_done) { |
120 | kvm->arch.rma_setup_done = 0; | 120 | kvm->arch.hpte_setup_done = 0; |
121 | /* order rma_setup_done vs. vcpus_running */ | 121 | /* order hpte_setup_done vs. vcpus_running */ |
122 | smp_mb(); | 122 | smp_mb(); |
123 | if (atomic_read(&kvm->arch.vcpus_running)) { | 123 | if (atomic_read(&kvm->arch.vcpus_running)) { |
124 | kvm->arch.rma_setup_done = 1; | 124 | kvm->arch.hpte_setup_done = 1; |
125 | goto out; | 125 | goto out; |
126 | } | 126 | } |
127 | } | 127 | } |
@@ -1339,20 +1339,20 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |||
1339 | unsigned long tmp[2]; | 1339 | unsigned long tmp[2]; |
1340 | ssize_t nb; | 1340 | ssize_t nb; |
1341 | long int err, ret; | 1341 | long int err, ret; |
1342 | int rma_setup; | 1342 | int hpte_setup; |
1343 | 1343 | ||
1344 | if (!access_ok(VERIFY_READ, buf, count)) | 1344 | if (!access_ok(VERIFY_READ, buf, count)) |
1345 | return -EFAULT; | 1345 | return -EFAULT; |
1346 | 1346 | ||
1347 | /* lock out vcpus from running while we're doing this */ | 1347 | /* lock out vcpus from running while we're doing this */ |
1348 | mutex_lock(&kvm->lock); | 1348 | mutex_lock(&kvm->lock); |
1349 | rma_setup = kvm->arch.rma_setup_done; | 1349 | hpte_setup = kvm->arch.hpte_setup_done; |
1350 | if (rma_setup) { | 1350 | if (hpte_setup) { |
1351 | kvm->arch.rma_setup_done = 0; /* temporarily */ | 1351 | kvm->arch.hpte_setup_done = 0; /* temporarily */ |
1352 | /* order rma_setup_done vs. vcpus_running */ | 1352 | /* order hpte_setup_done vs. vcpus_running */ |
1353 | smp_mb(); | 1353 | smp_mb(); |
1354 | if (atomic_read(&kvm->arch.vcpus_running)) { | 1354 | if (atomic_read(&kvm->arch.vcpus_running)) { |
1355 | kvm->arch.rma_setup_done = 1; | 1355 | kvm->arch.hpte_setup_done = 1; |
1356 | mutex_unlock(&kvm->lock); | 1356 | mutex_unlock(&kvm->lock); |
1357 | return -EBUSY; | 1357 | return -EBUSY; |
1358 | } | 1358 | } |
@@ -1405,7 +1405,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |||
1405 | "r=%lx\n", ret, i, v, r); | 1405 | "r=%lx\n", ret, i, v, r); |
1406 | goto out; | 1406 | goto out; |
1407 | } | 1407 | } |
1408 | if (!rma_setup && is_vrma_hpte(v)) { | 1408 | if (!hpte_setup && is_vrma_hpte(v)) { |
1409 | unsigned long psize = hpte_base_page_size(v, r); | 1409 | unsigned long psize = hpte_base_page_size(v, r); |
1410 | unsigned long senc = slb_pgsize_encoding(psize); | 1410 | unsigned long senc = slb_pgsize_encoding(psize); |
1411 | unsigned long lpcr; | 1411 | unsigned long lpcr; |
@@ -1414,7 +1414,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |||
1414 | (VRMA_VSID << SLB_VSID_SHIFT_1T); | 1414 | (VRMA_VSID << SLB_VSID_SHIFT_1T); |
1415 | lpcr = senc << (LPCR_VRMASD_SH - 4); | 1415 | lpcr = senc << (LPCR_VRMASD_SH - 4); |
1416 | kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); | 1416 | kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); |
1417 | rma_setup = 1; | 1417 | hpte_setup = 1; |
1418 | } | 1418 | } |
1419 | ++i; | 1419 | ++i; |
1420 | hptp += 2; | 1420 | hptp += 2; |
@@ -1430,9 +1430,9 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |||
1430 | } | 1430 | } |
1431 | 1431 | ||
1432 | out: | 1432 | out: |
1433 | /* Order HPTE updates vs. rma_setup_done */ | 1433 | /* Order HPTE updates vs. hpte_setup_done */ |
1434 | smp_wmb(); | 1434 | smp_wmb(); |
1435 | kvm->arch.rma_setup_done = rma_setup; | 1435 | kvm->arch.hpte_setup_done = hpte_setup; |
1436 | mutex_unlock(&kvm->lock); | 1436 | mutex_unlock(&kvm->lock); |
1437 | 1437 | ||
1438 | if (err) | 1438 | if (err) |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index b9c11a3abcb2..dde14fd64d8e 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -2044,11 +2044,11 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
2044 | } | 2044 | } |
2045 | 2045 | ||
2046 | atomic_inc(&vcpu->kvm->arch.vcpus_running); | 2046 | atomic_inc(&vcpu->kvm->arch.vcpus_running); |
2047 | /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */ | 2047 | /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */ |
2048 | smp_mb(); | 2048 | smp_mb(); |
2049 | 2049 | ||
2050 | /* On the first time here, set up HTAB and VRMA */ | 2050 | /* On the first time here, set up HTAB and VRMA */ |
2051 | if (!vcpu->kvm->arch.rma_setup_done) { | 2051 | if (!vcpu->kvm->arch.hpte_setup_done) { |
2052 | r = kvmppc_hv_setup_htab_rma(vcpu); | 2052 | r = kvmppc_hv_setup_htab_rma(vcpu); |
2053 | if (r) | 2053 | if (r) |
2054 | goto out; | 2054 | goto out; |
@@ -2250,7 +2250,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) | |||
2250 | int srcu_idx; | 2250 | int srcu_idx; |
2251 | 2251 | ||
2252 | mutex_lock(&kvm->lock); | 2252 | mutex_lock(&kvm->lock); |
2253 | if (kvm->arch.rma_setup_done) | 2253 | if (kvm->arch.hpte_setup_done) |
2254 | goto out; /* another vcpu beat us to it */ | 2254 | goto out; /* another vcpu beat us to it */ |
2255 | 2255 | ||
2256 | /* Allocate hashed page table (if not done already) and reset it */ | 2256 | /* Allocate hashed page table (if not done already) and reset it */ |
@@ -2301,9 +2301,9 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) | |||
2301 | 2301 | ||
2302 | kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); | 2302 | kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); |
2303 | 2303 | ||
2304 | /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ | 2304 | /* Order updates to kvm->arch.lpcr etc. vs. hpte_setup_done */ |
2305 | smp_wmb(); | 2305 | smp_wmb(); |
2306 | kvm->arch.rma_setup_done = 1; | 2306 | kvm->arch.hpte_setup_done = 1; |
2307 | err = 0; | 2307 | err = 0; |
2308 | out_srcu: | 2308 | out_srcu: |
2309 | srcu_read_unlock(&kvm->srcu, srcu_idx); | 2309 | srcu_read_unlock(&kvm->srcu, srcu_idx); |