aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2012-10-14 21:16:48 -0400
committerAlexander Graf <agraf@suse.de>2012-10-30 05:54:55 -0400
commit913d3ff9a3c3a13c3115eb4b3265aa35a9e0a7ad (patch)
tree93e66d12f6b475919ecc567dce9fc81313138d98
parent7b444c6710c6c4994e31eb19216ce055836e65c4 (diff)
KVM: PPC: Book3s HV: Don't access runnable threads list without vcore lock
There were a few places where we were traversing the list of runnable threads in a virtual core, i.e. vc->runnable_threads, without holding the vcore spinlock. This extends the places where we hold the vcore spinlock to cover everywhere that we traverse that list. Since we possibly need to sleep inside kvmppc_book3s_hv_page_fault, this moves the call of it from kvmppc_handle_exit out to kvmppc_vcpu_run, where we don't hold the vcore lock. In kvmppc_vcore_blocked, we don't actually need to check whether all vcpus are ceded and don't have any pending exceptions, since the caller has already done that. The caller (kvmppc_run_vcpu) wasn't actually checking for pending exceptions, so we add that. The change of if to while in kvmppc_run_vcpu is to make sure that we never call kvmppc_remove_runnable() when the vcore state is RUNNING or EXITING. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/kvm/book3s_hv.c67
2 files changed, 34 insertions, 34 deletions
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 76fdcfef0889..aabcdba8f6b0 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -118,6 +118,7 @@
118 118
119#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ 119#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
120#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ 120#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
121#define RESUME_FLAG_ARCH1 (1<<2)
121 122
122#define RESUME_GUEST 0 123#define RESUME_GUEST 0
123#define RESUME_GUEST_NV RESUME_FLAG_NV 124#define RESUME_GUEST_NV RESUME_FLAG_NV
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 77dec0f8a030..3a737a4bb8bf 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -57,6 +57,9 @@
57/* #define EXIT_DEBUG_SIMPLE */ 57/* #define EXIT_DEBUG_SIMPLE */
58/* #define EXIT_DEBUG_INT */ 58/* #define EXIT_DEBUG_INT */
59 59
60/* Used to indicate that a guest page fault needs to be handled */
61#define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1)
62
60static void kvmppc_end_cede(struct kvm_vcpu *vcpu); 63static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
61static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); 64static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
62 65
@@ -431,7 +434,6 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
431 struct task_struct *tsk) 434 struct task_struct *tsk)
432{ 435{
433 int r = RESUME_HOST; 436 int r = RESUME_HOST;
434 int srcu_idx;
435 437
436 vcpu->stat.sum_exits++; 438 vcpu->stat.sum_exits++;
437 439
@@ -491,16 +493,12 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
491 * have been handled already. 493 * have been handled already.
492 */ 494 */
493 case BOOK3S_INTERRUPT_H_DATA_STORAGE: 495 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
494 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 496 r = RESUME_PAGE_FAULT;
495 r = kvmppc_book3s_hv_page_fault(run, vcpu,
496 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
497 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
498 break; 497 break;
499 case BOOK3S_INTERRUPT_H_INST_STORAGE: 498 case BOOK3S_INTERRUPT_H_INST_STORAGE:
500 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 499 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
501 r = kvmppc_book3s_hv_page_fault(run, vcpu, 500 vcpu->arch.fault_dsisr = 0;
502 kvmppc_get_pc(vcpu), 0); 501 r = RESUME_PAGE_FAULT;
503 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
504 break; 502 break;
505 /* 503 /*
506 * This occurs if the guest executes an illegal instruction. 504 * This occurs if the guest executes an illegal instruction.
@@ -984,22 +982,24 @@ static int on_primary_thread(void)
984 * Run a set of guest threads on a physical core. 982 * Run a set of guest threads on a physical core.
985 * Called with vc->lock held. 983 * Called with vc->lock held.
986 */ 984 */
987static int kvmppc_run_core(struct kvmppc_vcore *vc) 985static void kvmppc_run_core(struct kvmppc_vcore *vc)
988{ 986{
989 struct kvm_vcpu *vcpu, *vcpu0, *vnext; 987 struct kvm_vcpu *vcpu, *vcpu0, *vnext;
990 long ret; 988 long ret;
991 u64 now; 989 u64 now;
992 int ptid, i, need_vpa_update; 990 int ptid, i, need_vpa_update;
993 int srcu_idx; 991 int srcu_idx;
992 struct kvm_vcpu *vcpus_to_update[threads_per_core];
994 993
995 /* don't start if any threads have a signal pending */ 994 /* don't start if any threads have a signal pending */
996 need_vpa_update = 0; 995 need_vpa_update = 0;
997 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { 996 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
998 if (signal_pending(vcpu->arch.run_task)) 997 if (signal_pending(vcpu->arch.run_task))
999 return 0; 998 return;
1000 need_vpa_update |= vcpu->arch.vpa.update_pending | 999 if (vcpu->arch.vpa.update_pending ||
1001 vcpu->arch.slb_shadow.update_pending | 1000 vcpu->arch.slb_shadow.update_pending ||
1002 vcpu->arch.dtl.update_pending; 1001 vcpu->arch.dtl.update_pending)
1002 vcpus_to_update[need_vpa_update++] = vcpu;
1003 } 1003 }
1004 1004
1005 /* 1005 /*
@@ -1019,8 +1019,8 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
1019 */ 1019 */
1020 if (need_vpa_update) { 1020 if (need_vpa_update) {
1021 spin_unlock(&vc->lock); 1021 spin_unlock(&vc->lock);
1022 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 1022 for (i = 0; i < need_vpa_update; ++i)
1023 kvmppc_update_vpas(vcpu); 1023 kvmppc_update_vpas(vcpus_to_update[i]);
1024 spin_lock(&vc->lock); 1024 spin_lock(&vc->lock);
1025 } 1025 }
1026 1026
@@ -1037,8 +1037,10 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
1037 vcpu->arch.ptid = ptid++; 1037 vcpu->arch.ptid = ptid++;
1038 } 1038 }
1039 } 1039 }
1040 if (!vcpu0) 1040 if (!vcpu0) {
1041 return 0; /* nothing to run */ 1041 vc->vcore_state = VCORE_INACTIVE;
1042 return; /* nothing to run; should never happen */
1043 }
1042 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 1044 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1043 if (vcpu->arch.ceded) 1045 if (vcpu->arch.ceded)
1044 vcpu->arch.ptid = ptid++; 1046 vcpu->arch.ptid = ptid++;
@@ -1091,6 +1093,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
1091 preempt_enable(); 1093 preempt_enable();
1092 kvm_resched(vcpu); 1094 kvm_resched(vcpu);
1093 1095
1096 spin_lock(&vc->lock);
1094 now = get_tb(); 1097 now = get_tb();
1095 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { 1098 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1096 /* cancel pending dec exception if dec is positive */ 1099 /* cancel pending dec exception if dec is positive */
@@ -1114,7 +1117,6 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
1114 } 1117 }
1115 } 1118 }
1116 1119
1117 spin_lock(&vc->lock);
1118 out: 1120 out:
1119 vc->vcore_state = VCORE_INACTIVE; 1121 vc->vcore_state = VCORE_INACTIVE;
1120 vc->preempt_tb = mftb(); 1122 vc->preempt_tb = mftb();
@@ -1125,8 +1127,6 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
1125 wake_up(&vcpu->arch.cpu_run); 1127 wake_up(&vcpu->arch.cpu_run);
1126 } 1128 }
1127 } 1129 }
1128
1129 return 1;
1130} 1130}
1131 1131
1132/* 1132/*
@@ -1150,20 +1150,11 @@ static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
1150static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) 1150static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
1151{ 1151{
1152 DEFINE_WAIT(wait); 1152 DEFINE_WAIT(wait);
1153 struct kvm_vcpu *v;
1154 int all_idle = 1;
1155 1153
1156 prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE); 1154 prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
1157 vc->vcore_state = VCORE_SLEEPING; 1155 vc->vcore_state = VCORE_SLEEPING;
1158 spin_unlock(&vc->lock); 1156 spin_unlock(&vc->lock);
1159 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) { 1157 schedule();
1160 if (!v->arch.ceded || v->arch.pending_exceptions) {
1161 all_idle = 0;
1162 break;
1163 }
1164 }
1165 if (all_idle)
1166 schedule();
1167 finish_wait(&vc->wq, &wait); 1158 finish_wait(&vc->wq, &wait);
1168 spin_lock(&vc->lock); 1159 spin_lock(&vc->lock);
1169 vc->vcore_state = VCORE_INACTIVE; 1160 vc->vcore_state = VCORE_INACTIVE;
@@ -1219,7 +1210,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1219 vc->runner = vcpu; 1210 vc->runner = vcpu;
1220 n_ceded = 0; 1211 n_ceded = 0;
1221 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) 1212 list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
1222 n_ceded += v->arch.ceded; 1213 if (!v->arch.pending_exceptions)
1214 n_ceded += v->arch.ceded;
1223 if (n_ceded == vc->n_runnable) 1215 if (n_ceded == vc->n_runnable)
1224 kvmppc_vcore_blocked(vc); 1216 kvmppc_vcore_blocked(vc);
1225 else 1217 else
@@ -1240,8 +1232,9 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1240 } 1232 }
1241 1233
1242 if (signal_pending(current)) { 1234 if (signal_pending(current)) {
1243 if (vc->vcore_state == VCORE_RUNNING || 1235 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
1244 vc->vcore_state == VCORE_EXITING) { 1236 (vc->vcore_state == VCORE_RUNNING ||
1237 vc->vcore_state == VCORE_EXITING)) {
1245 spin_unlock(&vc->lock); 1238 spin_unlock(&vc->lock);
1246 kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE); 1239 kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
1247 spin_lock(&vc->lock); 1240 spin_lock(&vc->lock);
@@ -1261,6 +1254,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1261int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) 1254int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
1262{ 1255{
1263 int r; 1256 int r;
1257 int srcu_idx;
1264 1258
1265 if (!vcpu->arch.sane) { 1259 if (!vcpu->arch.sane) {
1266 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1260 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -1299,6 +1293,11 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
1299 !(vcpu->arch.shregs.msr & MSR_PR)) { 1293 !(vcpu->arch.shregs.msr & MSR_PR)) {
1300 r = kvmppc_pseries_do_hcall(vcpu); 1294 r = kvmppc_pseries_do_hcall(vcpu);
1301 kvmppc_core_prepare_to_enter(vcpu); 1295 kvmppc_core_prepare_to_enter(vcpu);
1296 } else if (r == RESUME_PAGE_FAULT) {
1297 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1298 r = kvmppc_book3s_hv_page_fault(run, vcpu,
1299 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
1300 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1302 } 1301 }
1303 } while (r == RESUME_GUEST); 1302 } while (r == RESUME_GUEST);
1304 1303