aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c170
1 files changed, 169 insertions, 1 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 60b7300568c8..af862c30b70e 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -124,6 +124,158 @@ void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
124 vcpu->arch.last_inst); 124 vcpu->arch.last_inst);
125} 125}
126 126
127struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
128{
129 int r;
130 struct kvm_vcpu *v, *ret = NULL;
131
132 mutex_lock(&kvm->lock);
133 kvm_for_each_vcpu(r, v, kvm) {
134 if (v->vcpu_id == id) {
135 ret = v;
136 break;
137 }
138 }
139 mutex_unlock(&kvm->lock);
140 return ret;
141}
142
143static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
144{
145 vpa->shared_proc = 1;
146 vpa->yield_count = 1;
147}
148
149static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
150 unsigned long flags,
151 unsigned long vcpuid, unsigned long vpa)
152{
153 struct kvm *kvm = vcpu->kvm;
154 unsigned long pg_index, ra, len;
155 unsigned long pg_offset;
156 void *va;
157 struct kvm_vcpu *tvcpu;
158
159 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
160 if (!tvcpu)
161 return H_PARAMETER;
162
163 flags >>= 63 - 18;
164 flags &= 7;
165 if (flags == 0 || flags == 4)
166 return H_PARAMETER;
167 if (flags < 4) {
168 if (vpa & 0x7f)
169 return H_PARAMETER;
170 /* registering new area; convert logical addr to real */
171 pg_index = vpa >> kvm->arch.ram_porder;
172 pg_offset = vpa & (kvm->arch.ram_psize - 1);
173 if (pg_index >= kvm->arch.ram_npages)
174 return H_PARAMETER;
175 if (kvm->arch.ram_pginfo[pg_index].pfn == 0)
176 return H_PARAMETER;
177 ra = kvm->arch.ram_pginfo[pg_index].pfn << PAGE_SHIFT;
178 ra |= pg_offset;
179 va = __va(ra);
180 if (flags <= 1)
181 len = *(unsigned short *)(va + 4);
182 else
183 len = *(unsigned int *)(va + 4);
184 if (pg_offset + len > kvm->arch.ram_psize)
185 return H_PARAMETER;
186 switch (flags) {
187 case 1: /* register VPA */
188 if (len < 640)
189 return H_PARAMETER;
190 tvcpu->arch.vpa = va;
191 init_vpa(vcpu, va);
192 break;
193 case 2: /* register DTL */
194 if (len < 48)
195 return H_PARAMETER;
196 if (!tvcpu->arch.vpa)
197 return H_RESOURCE;
198 len -= len % 48;
199 tvcpu->arch.dtl = va;
200 tvcpu->arch.dtl_end = va + len;
201 break;
202 case 3: /* register SLB shadow buffer */
203 if (len < 8)
204 return H_PARAMETER;
205 if (!tvcpu->arch.vpa)
206 return H_RESOURCE;
207 tvcpu->arch.slb_shadow = va;
208 len = (len - 16) / 16;
209 tvcpu->arch.slb_shadow = va;
210 break;
211 }
212 } else {
213 switch (flags) {
214 case 5: /* unregister VPA */
215 if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
216 return H_RESOURCE;
217 tvcpu->arch.vpa = NULL;
218 break;
219 case 6: /* unregister DTL */
220 tvcpu->arch.dtl = NULL;
221 break;
222 case 7: /* unregister SLB shadow buffer */
223 tvcpu->arch.slb_shadow = NULL;
224 break;
225 }
226 }
227 return H_SUCCESS;
228}
229
230int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
231{
232 unsigned long req = kvmppc_get_gpr(vcpu, 3);
233 unsigned long target, ret = H_SUCCESS;
234 struct kvm_vcpu *tvcpu;
235
236 switch (req) {
237 case H_CEDE:
238 vcpu->arch.shregs.msr |= MSR_EE;
239 vcpu->arch.ceded = 1;
240 smp_mb();
241 if (!vcpu->arch.prodded)
242 kvmppc_vcpu_block(vcpu);
243 else
244 vcpu->arch.prodded = 0;
245 smp_mb();
246 vcpu->arch.ceded = 0;
247 break;
248 case H_PROD:
249 target = kvmppc_get_gpr(vcpu, 4);
250 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
251 if (!tvcpu) {
252 ret = H_PARAMETER;
253 break;
254 }
255 tvcpu->arch.prodded = 1;
256 smp_mb();
257 if (vcpu->arch.ceded) {
258 if (waitqueue_active(&vcpu->wq)) {
259 wake_up_interruptible(&vcpu->wq);
260 vcpu->stat.halt_wakeup++;
261 }
262 }
263 break;
264 case H_CONFER:
265 break;
266 case H_REGISTER_VPA:
267 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
268 kvmppc_get_gpr(vcpu, 5),
269 kvmppc_get_gpr(vcpu, 6));
270 break;
271 default:
272 return RESUME_HOST;
273 }
274 kvmppc_set_gpr(vcpu, 3, ret);
275 vcpu->arch.hcall_needed = 0;
276 return RESUME_GUEST;
277}
278
127static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 279static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
128 struct task_struct *tsk) 280 struct task_struct *tsk)
129{ 281{
@@ -318,7 +470,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
318 470
319extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 471extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
320 472
321int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) 473static int kvmppc_run_vcpu(struct kvm_run *run, struct kvm_vcpu *vcpu)
322{ 474{
323 u64 now; 475 u64 now;
324 476
@@ -370,6 +522,22 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
370 return -EBUSY; 522 return -EBUSY;
371} 523}
372 524
525int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
526{
527 int r;
528
529 do {
530 r = kvmppc_run_vcpu(run, vcpu);
531
532 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
533 !(vcpu->arch.shregs.msr & MSR_PR)) {
534 r = kvmppc_pseries_do_hcall(vcpu);
535 kvmppc_core_deliver_interrupts(vcpu);
536 }
537 } while (r == RESUME_GUEST);
538 return r;
539}
540
373int kvmppc_core_prepare_memory_region(struct kvm *kvm, 541int kvmppc_core_prepare_memory_region(struct kvm *kvm,
374 struct kvm_userspace_memory_region *mem) 542 struct kvm_userspace_memory_region *mem)
375{ 543{