aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-04-17 16:31:15 -0400
committerAlexander Graf <agraf@suse.de>2013-04-26 14:27:32 -0400
commite7d26f285b4be9466c9e393139e1c9cffe4cedfc (patch)
tree0afc30678671f87be82992d306cdec3b984bc6dd
parent54695c3088a74e25474db8eb6b490b45d1aeb0ca (diff)
KVM: PPC: Book3S HV: Add support for real mode ICP in XICS emulation
This adds an implementation of the XICS hypercalls in real mode for HV KVM, which allows us to avoid exiting the guest MMU context on all threads for a variety of operations such as fetching a pending interrupt, EOI of messages, IPIs, etc. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/kvm/Makefile5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c406
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S18
-rw-r--r--arch/powerpc/kvm/book3s_xics.c64
-rw-r--r--arch/powerpc/kvm/book3s_xics.h16
5 files changed, 490 insertions, 19 deletions
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index f9b87b540450..422de3f4d46c 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -72,12 +72,15 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
72 book3s_hv.o \ 72 book3s_hv.o \
73 book3s_hv_interrupts.o \ 73 book3s_hv_interrupts.o \
74 book3s_64_mmu_hv.o 74 book3s_64_mmu_hv.o
75kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
76 book3s_hv_rm_xics.o
75kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \ 77kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
76 book3s_hv_rmhandlers.o \ 78 book3s_hv_rmhandlers.o \
77 book3s_hv_rm_mmu.o \ 79 book3s_hv_rm_mmu.o \
78 book3s_64_vio_hv.o \ 80 book3s_64_vio_hv.o \
79 book3s_hv_ras.o \ 81 book3s_hv_ras.o \
80 book3s_hv_builtin.o 82 book3s_hv_builtin.o \
83 $(kvm-book3s_64-builtin-xics-objs-y)
81 84
82kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \ 85kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
83 book3s_xics.o 86 book3s_xics.o
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
new file mode 100644
index 000000000000..b4b0082f761c
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -0,0 +1,406 @@
1/*
2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/kvm_host.h>
12#include <linux/err.h>
13
14#include <asm/kvm_book3s.h>
15#include <asm/kvm_ppc.h>
16#include <asm/hvcall.h>
17#include <asm/xics.h>
18#include <asm/debug.h>
19#include <asm/synch.h>
20#include <asm/ppc-opcode.h>
21
22#include "book3s_xics.h"
23
24#define DEBUG_PASSUP
25
26static inline void rm_writeb(unsigned long paddr, u8 val)
27{
28 __asm__ __volatile__("sync; stbcix %0,0,%1"
29 : : "r" (val), "r" (paddr) : "memory");
30}
31
32static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
33 struct kvm_vcpu *this_vcpu)
34{
35 struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
36 unsigned long xics_phys;
37 int cpu;
38
39 /* Mark the target VCPU as having an interrupt pending */
40 vcpu->stat.queue_intr++;
41 set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
42
43 /* Kick self ? Just set MER and return */
44 if (vcpu == this_vcpu) {
45 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER);
46 return;
47 }
48
49 /* Check if the core is loaded, if not, too hard */
50 cpu = vcpu->cpu;
51 if (cpu < 0 || cpu >= nr_cpu_ids) {
52 this_icp->rm_action |= XICS_RM_KICK_VCPU;
53 this_icp->rm_kick_target = vcpu;
54 return;
55 }
56 /* In SMT cpu will always point to thread 0, we adjust it */
57 cpu += vcpu->arch.ptid;
58
59 /* Not too hard, then poke the target */
60 xics_phys = paca[cpu].kvm_hstate.xics_phys;
61 rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
62}
63
64static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
65{
66 /* Note: Only called on self ! */
67 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
68 &vcpu->arch.pending_exceptions);
69 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
70}
71
72static inline bool icp_rm_try_update(struct kvmppc_icp *icp,
73 union kvmppc_icp_state old,
74 union kvmppc_icp_state new)
75{
76 struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu;
77 bool success;
78
79 /* Calculate new output value */
80 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
81
82 /* Attempt atomic update */
83 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
84 if (!success)
85 goto bail;
86
87 /*
88 * Check for output state update
89 *
90 * Note that this is racy since another processor could be updating
91 * the state already. This is why we never clear the interrupt output
92 * here, we only ever set it. The clear only happens prior to doing
93 * an update and only by the processor itself. Currently we do it
94 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
95 *
96 * We also do not try to figure out whether the EE state has changed,
97 * we unconditionally set it if the new state calls for it. The reason
98 * for that is that we opportunistically remove the pending interrupt
99 * flag when raising CPPR, so we need to set it back here if an
100 * interrupt is still pending.
101 */
102 if (new.out_ee)
103 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu);
104
105 /* Expose the state change for debug purposes */
106 this_vcpu->arch.icp->rm_dbgstate = new;
107 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu;
108
109 bail:
110 return success;
111}
112
113static inline int check_too_hard(struct kvmppc_xics *xics,
114 struct kvmppc_icp *icp)
115{
116 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
117}
118
119static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
120 u8 new_cppr)
121{
122 union kvmppc_icp_state old_state, new_state;
123 bool resend;
124
125 /*
126 * This handles several related states in one operation:
127 *
128 * ICP State: Down_CPPR
129 *
130 * Load CPPR with new value and if the XISR is 0
131 * then check for resends:
132 *
133 * ICP State: Resend
134 *
135 * If MFRR is more favored than CPPR, check for IPIs
136 * and notify ICS of a potential resend. This is done
137 * asynchronously (when used in real mode, we will have
138 * to exit here).
139 *
140 * We do not handle the complete Check_IPI as documented
141 * here. In the PAPR, this state will be used for both
142 * Set_MFRR and Down_CPPR. However, we know that we aren't
143 * changing the MFRR state here so we don't need to handle
144 * the case of an MFRR causing a reject of a pending irq,
145 * this will have been handled when the MFRR was set in the
146 * first place.
147 *
148 * Thus we don't have to handle rejects, only resends.
149 *
150 * When implementing real mode for HV KVM, resend will lead to
151 * a H_TOO_HARD return and the whole transaction will be handled
152 * in virtual mode.
153 */
154 do {
155 old_state = new_state = ACCESS_ONCE(icp->state);
156
157 /* Down_CPPR */
158 new_state.cppr = new_cppr;
159
160 /*
161 * Cut down Resend / Check_IPI / IPI
162 *
163 * The logic is that we cannot have a pending interrupt
164 * trumped by an IPI at this point (see above), so we
165 * know that either the pending interrupt is already an
166 * IPI (in which case we don't care to override it) or
167 * it's either more favored than us or non existent
168 */
169 if (new_state.mfrr < new_cppr &&
170 new_state.mfrr <= new_state.pending_pri) {
171 new_state.pending_pri = new_state.mfrr;
172 new_state.xisr = XICS_IPI;
173 }
174
175 /* Latch/clear resend bit */
176 resend = new_state.need_resend;
177 new_state.need_resend = 0;
178
179 } while (!icp_rm_try_update(icp, old_state, new_state));
180
181 /*
182 * Now handle resend checks. Those are asynchronous to the ICP
183 * state update in HW (ie bus transactions) so we can handle them
184 * separately here as well.
185 */
186 if (resend)
187 icp->rm_action |= XICS_RM_CHECK_RESEND;
188}
189
190
191unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
192{
193 union kvmppc_icp_state old_state, new_state;
194 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
195 struct kvmppc_icp *icp = vcpu->arch.icp;
196 u32 xirr;
197
198 if (!xics || !xics->real_mode)
199 return H_TOO_HARD;
200
201 /* First clear the interrupt */
202 icp_rm_clr_vcpu_irq(icp->vcpu);
203
204 /*
205 * ICP State: Accept_Interrupt
206 *
207 * Return the pending interrupt (if any) along with the
208 * current CPPR, then clear the XISR & set CPPR to the
209 * pending priority
210 */
211 do {
212 old_state = new_state = ACCESS_ONCE(icp->state);
213
214 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
215 if (!old_state.xisr)
216 break;
217 new_state.cppr = new_state.pending_pri;
218 new_state.pending_pri = 0xff;
219 new_state.xisr = 0;
220
221 } while (!icp_rm_try_update(icp, old_state, new_state));
222
223 /* Return the result in GPR4 */
224 vcpu->arch.gpr[4] = xirr;
225
226 return check_too_hard(xics, icp);
227}
228
229int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
230 unsigned long mfrr)
231{
232 union kvmppc_icp_state old_state, new_state;
233 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
234 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp;
235 u32 reject;
236 bool resend;
237 bool local;
238
239 if (!xics || !xics->real_mode)
240 return H_TOO_HARD;
241
242 local = this_icp->server_num == server;
243 if (local)
244 icp = this_icp;
245 else
246 icp = kvmppc_xics_find_server(vcpu->kvm, server);
247 if (!icp)
248 return H_PARAMETER;
249
250 /*
251 * ICP state: Set_MFRR
252 *
253 * If the CPPR is more favored than the new MFRR, then
254 * nothing needs to be done as there can be no XISR to
255 * reject.
256 *
257 * If the CPPR is less favored, then we might be replacing
258 * an interrupt, and thus need to possibly reject it as in
259 *
260 * ICP state: Check_IPI
261 */
262 do {
263 old_state = new_state = ACCESS_ONCE(icp->state);
264
265 /* Set_MFRR */
266 new_state.mfrr = mfrr;
267
268 /* Check_IPI */
269 reject = 0;
270 resend = false;
271 if (mfrr < new_state.cppr) {
272 /* Reject a pending interrupt if not an IPI */
273 if (mfrr <= new_state.pending_pri)
274 reject = new_state.xisr;
275 new_state.pending_pri = mfrr;
276 new_state.xisr = XICS_IPI;
277 }
278
279 if (mfrr > old_state.mfrr && mfrr > new_state.cppr) {
280 resend = new_state.need_resend;
281 new_state.need_resend = 0;
282 }
283 } while (!icp_rm_try_update(icp, old_state, new_state));
284
285 /* Pass rejects to virtual mode */
286 if (reject && reject != XICS_IPI) {
287 this_icp->rm_action |= XICS_RM_REJECT;
288 this_icp->rm_reject = reject;
289 }
290
291 /* Pass resends to virtual mode */
292 if (resend)
293 this_icp->rm_action |= XICS_RM_CHECK_RESEND;
294
295 return check_too_hard(xics, this_icp);
296}
297
298int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
299{
300 union kvmppc_icp_state old_state, new_state;
301 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
302 struct kvmppc_icp *icp = vcpu->arch.icp;
303 u32 reject;
304
305 if (!xics || !xics->real_mode)
306 return H_TOO_HARD;
307
308 /*
309 * ICP State: Set_CPPR
310 *
311 * We can safely compare the new value with the current
312 * value outside of the transaction as the CPPR is only
313 * ever changed by the processor on itself
314 */
315 if (cppr > icp->state.cppr) {
316 icp_rm_down_cppr(xics, icp, cppr);
317 goto bail;
318 } else if (cppr == icp->state.cppr)
319 return H_SUCCESS;
320
321 /*
322 * ICP State: Up_CPPR
323 *
324 * The processor is raising its priority, this can result
325 * in a rejection of a pending interrupt:
326 *
327 * ICP State: Reject_Current
328 *
329 * We can remove EE from the current processor, the update
330 * transaction will set it again if needed
331 */
332 icp_rm_clr_vcpu_irq(icp->vcpu);
333
334 do {
335 old_state = new_state = ACCESS_ONCE(icp->state);
336
337 reject = 0;
338 new_state.cppr = cppr;
339
340 if (cppr <= new_state.pending_pri) {
341 reject = new_state.xisr;
342 new_state.xisr = 0;
343 new_state.pending_pri = 0xff;
344 }
345
346 } while (!icp_rm_try_update(icp, old_state, new_state));
347
348 /* Pass rejects to virtual mode */
349 if (reject && reject != XICS_IPI) {
350 icp->rm_action |= XICS_RM_REJECT;
351 icp->rm_reject = reject;
352 }
353 bail:
354 return check_too_hard(xics, icp);
355}
356
357int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
358{
359 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
360 struct kvmppc_icp *icp = vcpu->arch.icp;
361 struct kvmppc_ics *ics;
362 struct ics_irq_state *state;
363 u32 irq = xirr & 0x00ffffff;
364 u16 src;
365
366 if (!xics || !xics->real_mode)
367 return H_TOO_HARD;
368
369 /*
370 * ICP State: EOI
371 *
372 * Note: If EOI is incorrectly used by SW to lower the CPPR
373 * value (ie more favored), we do not check for rejection of
374 * a pending interrupt, this is a SW error and PAPR sepcifies
375 * that we don't have to deal with it.
376 *
377 * The sending of an EOI to the ICS is handled after the
378 * CPPR update
379 *
380 * ICP State: Down_CPPR which we handle
381 * in a separate function as it's shared with H_CPPR.
382 */
383 icp_rm_down_cppr(xics, icp, xirr >> 24);
384
385 /* IPIs have no EOI */
386 if (irq == XICS_IPI)
387 goto bail;
388 /*
389 * EOI handling: If the interrupt is still asserted, we need to
390 * resend it. We can take a lockless "peek" at the ICS state here.
391 *
392 * "Message" interrupts will never have "asserted" set
393 */
394 ics = kvmppc_xics_find_ics(xics, irq, &src);
395 if (!ics)
396 goto bail;
397 state = &ics->irq_state[src];
398
399 /* Still asserted, resend it, we make it look like a reject */
400 if (state->asserted) {
401 icp->rm_action |= XICS_RM_REJECT;
402 icp->rm_reject = irq;
403 }
404 bail:
405 return check_too_hard(xics, icp);
406}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 56f8927b0ddf..fd3b72d5dfe6 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1424,11 +1424,19 @@ hcall_real_table:
1424 .long 0 /* 0x58 */ 1424 .long 0 /* 0x58 */
1425 .long 0 /* 0x5c */ 1425 .long 0 /* 0x5c */
1426 .long 0 /* 0x60 */ 1426 .long 0 /* 0x60 */
1427 .long 0 /* 0x64 */ 1427#ifdef CONFIG_KVM_XICS
1428 .long 0 /* 0x68 */ 1428 .long .kvmppc_rm_h_eoi - hcall_real_table
1429 .long 0 /* 0x6c */ 1429 .long .kvmppc_rm_h_cppr - hcall_real_table
1430 .long 0 /* 0x70 */ 1430 .long .kvmppc_rm_h_ipi - hcall_real_table
1431 .long 0 /* 0x74 */ 1431 .long 0 /* 0x70 - H_IPOLL */
1432 .long .kvmppc_rm_h_xirr - hcall_real_table
1433#else
1434 .long 0 /* 0x64 - H_EOI */
1435 .long 0 /* 0x68 - H_CPPR */
1436 .long 0 /* 0x6c - H_IPI */
1437 .long 0 /* 0x70 - H_IPOLL */
1438 .long 0 /* 0x74 - H_XIRR */
1439#endif
1432 .long 0 /* 0x78 */ 1440 .long 0 /* 0x78 */
1433 .long 0 /* 0x7c */ 1441 .long 0 /* 0x7c */
1434 .long 0 /* 0x80 */ 1442 .long 0 /* 0x80 */
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 1417e65b6bbd..7fd247cbd0d1 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -30,6 +30,9 @@
30#define XICS_DBG(fmt...) trace_printk(fmt) 30#define XICS_DBG(fmt...) trace_printk(fmt)
31#endif 31#endif
32 32
33#define ENABLE_REALMODE true
34#define DEBUG_REALMODE false
35
33/* 36/*
34 * LOCKING 37 * LOCKING
35 * ======= 38 * =======
@@ -220,8 +223,10 @@ static inline bool icp_try_update(struct kvmppc_icp *icp,
220 * in Accept (H_XIRR) and Up_Cppr (H_XPPR). 223 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
221 * 224 *
222 * We also do not try to figure out whether the EE state has changed, 225 * We also do not try to figure out whether the EE state has changed,
223 * we unconditionally set it if the new state calls for it for the 226 * we unconditionally set it if the new state calls for it. The reason
224 * same reason. 227 * for that is that we opportunistically remove the pending interrupt
228 * flag when raising CPPR, so we need to set it back here if an
229 * interrupt is still pending.
225 */ 230 */
226 if (new.out_ee) { 231 if (new.out_ee) {
227 kvmppc_book3s_queue_irqprio(icp->vcpu, 232 kvmppc_book3s_queue_irqprio(icp->vcpu,
@@ -483,7 +488,7 @@ static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
483 icp_check_resend(xics, icp); 488 icp_check_resend(xics, icp);
484} 489}
485 490
486static noinline unsigned long h_xirr(struct kvm_vcpu *vcpu) 491static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
487{ 492{
488 union kvmppc_icp_state old_state, new_state; 493 union kvmppc_icp_state old_state, new_state;
489 struct kvmppc_icp *icp = vcpu->arch.icp; 494 struct kvmppc_icp *icp = vcpu->arch.icp;
@@ -517,8 +522,8 @@ static noinline unsigned long h_xirr(struct kvm_vcpu *vcpu)
517 return xirr; 522 return xirr;
518} 523}
519 524
520static noinline int h_ipi(struct kvm_vcpu *vcpu, unsigned long server, 525static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
521 unsigned long mfrr) 526 unsigned long mfrr)
522{ 527{
523 union kvmppc_icp_state old_state, new_state; 528 union kvmppc_icp_state old_state, new_state;
524 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 529 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
@@ -586,7 +591,7 @@ static noinline int h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
586 return H_SUCCESS; 591 return H_SUCCESS;
587} 592}
588 593
589static noinline void h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) 594static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
590{ 595{
591 union kvmppc_icp_state old_state, new_state; 596 union kvmppc_icp_state old_state, new_state;
592 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 597 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
@@ -643,7 +648,7 @@ static noinline void h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
643 icp_deliver_irq(xics, icp, reject); 648 icp_deliver_irq(xics, icp, reject);
644} 649}
645 650
646static noinline int h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) 651static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
647{ 652{
648 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 653 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
649 struct kvmppc_icp *icp = vcpu->arch.icp; 654 struct kvmppc_icp *icp = vcpu->arch.icp;
@@ -693,29 +698,54 @@ static noinline int h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
693 return H_SUCCESS; 698 return H_SUCCESS;
694} 699}
695 700
701static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
702{
703 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
704 struct kvmppc_icp *icp = vcpu->arch.icp;
705
706 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
707 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
708
709 if (icp->rm_action & XICS_RM_KICK_VCPU)
710 kvmppc_fast_vcpu_kick(icp->rm_kick_target);
711 if (icp->rm_action & XICS_RM_CHECK_RESEND)
712 icp_check_resend(xics, icp);
713 if (icp->rm_action & XICS_RM_REJECT)
714 icp_deliver_irq(xics, icp, icp->rm_reject);
715
716 icp->rm_action = 0;
717
718 return H_SUCCESS;
719}
720
696int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req) 721int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
697{ 722{
723 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
698 unsigned long res; 724 unsigned long res;
699 int rc = H_SUCCESS; 725 int rc = H_SUCCESS;
700 726
701 /* Check if we have an ICP */ 727 /* Check if we have an ICP */
702 if (!vcpu->arch.icp || !vcpu->kvm->arch.xics) 728 if (!xics || !vcpu->arch.icp)
703 return H_HARDWARE; 729 return H_HARDWARE;
704 730
731 /* Check for real mode returning too hard */
732 if (xics->real_mode)
733 return kvmppc_xics_rm_complete(vcpu, req);
734
705 switch (req) { 735 switch (req) {
706 case H_XIRR: 736 case H_XIRR:
707 res = h_xirr(vcpu); 737 res = kvmppc_h_xirr(vcpu);
708 kvmppc_set_gpr(vcpu, 4, res); 738 kvmppc_set_gpr(vcpu, 4, res);
709 break; 739 break;
710 case H_CPPR: 740 case H_CPPR:
711 h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4)); 741 kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
712 break; 742 break;
713 case H_EOI: 743 case H_EOI:
714 rc = h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4)); 744 rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
715 break; 745 break;
716 case H_IPI: 746 case H_IPI:
717 rc = h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4), 747 rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
718 kvmppc_get_gpr(vcpu, 5)); 748 kvmppc_get_gpr(vcpu, 5));
719 break; 749 break;
720 } 750 }
721 751
@@ -933,6 +963,14 @@ int kvm_xics_create(struct kvm *kvm, u32 type)
933 963
934 xics_debugfs_init(xics); 964 xics_debugfs_init(xics);
935 965
966#ifdef CONFIG_KVM_BOOK3S_64_HV
967 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
968 /* Enable real mode support */
969 xics->real_mode = ENABLE_REALMODE;
970 xics->real_mode_dbg = DEBUG_REALMODE;
971 }
972#endif /* CONFIG_KVM_BOOK3S_64_HV */
973
936 return 0; 974 return 0;
937} 975}
938 976
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
index 58ee190de5e5..c816c5a49c90 100644
--- a/arch/powerpc/kvm/book3s_xics.h
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -64,6 +64,20 @@ struct kvmppc_icp {
64 unsigned long server_num; 64 unsigned long server_num;
65 union kvmppc_icp_state state; 65 union kvmppc_icp_state state;
66 unsigned long resend_map[ICP_RESEND_MAP_SIZE]; 66 unsigned long resend_map[ICP_RESEND_MAP_SIZE];
67
68 /* Real mode might find something too hard, here's the action
69 * it might request from virtual mode
70 */
71#define XICS_RM_KICK_VCPU 0x1
72#define XICS_RM_CHECK_RESEND 0x2
73#define XICS_RM_REJECT 0x4
74 u32 rm_action;
75 struct kvm_vcpu *rm_kick_target;
76 u32 rm_reject;
77
78 /* Debug stuff for real mode */
79 union kvmppc_icp_state rm_dbgstate;
80 struct kvm_vcpu *rm_dbgtgt;
67}; 81};
68 82
69struct kvmppc_ics { 83struct kvmppc_ics {
@@ -76,6 +90,8 @@ struct kvmppc_xics {
76 struct kvm *kvm; 90 struct kvm *kvm;
77 struct dentry *dentry; 91 struct dentry *dentry;
78 u32 max_icsid; 92 u32 max_icsid;
93 bool real_mode;
94 bool real_mode_dbg;
79 struct kvmppc_ics *ics[KVMPPC_XICS_MAX_ICS_ID + 1]; 95 struct kvmppc_ics *ics[KVMPPC_XICS_MAX_ICS_ID + 1];
80}; 96};
81 97