aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2015-03-27 23:21:11 -0400
committerAlexander Graf <agraf@suse.de>2015-04-21 09:21:34 -0400
commiteddb60fb1443f85c5728f1b1cd4be608c6832a79 (patch)
treea4b4cb322d3e067ba4e53825152ff45efaba9022 /arch
parent6af27c847ad1b889c29a641dfc41f2d78c46a048 (diff)
KVM: PPC: Book3S HV: Translate kvmhv_commence_exit to C
This replaces the assembler code for kvmhv_commence_exit() with C code in book3s_hv_builtin.c. It also moves the IPI sending code that was in book3s_hv_rm_xics.c into a new kvmhv_rm_send_ipi() function so it can be used by kvmhv_commence_exit() as well as icp_rm_set_vcpu_irq(). Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h2
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c63
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c12
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S66
4 files changed, 75 insertions, 68 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 869c53fe02cd..2b84e485a181 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -438,6 +438,8 @@ static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
438 438
439extern void kvmppc_mmu_debugfs_init(struct kvm *kvm); 439extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
440 440
441extern void kvmhv_rm_send_ipi(int cpu);
442
441#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 443#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
442 444
443#endif /* __ASM_KVM_BOOK3S_64_H__ */ 445#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 275425142bb7..c42aa55b885f 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -22,6 +22,7 @@
22#include <asm/kvm_ppc.h> 22#include <asm/kvm_ppc.h>
23#include <asm/kvm_book3s.h> 23#include <asm/kvm_book3s.h>
24#include <asm/archrandom.h> 24#include <asm/archrandom.h>
25#include <asm/xics.h>
25 26
26#define KVM_CMA_CHUNK_ORDER 18 27#define KVM_CMA_CHUNK_ORDER 18
27 28
@@ -184,3 +185,65 @@ long kvmppc_h_random(struct kvm_vcpu *vcpu)
184 185
185 return H_HARDWARE; 186 return H_HARDWARE;
186} 187}
188
189static inline void rm_writeb(unsigned long paddr, u8 val)
190{
191 __asm__ __volatile__("stbcix %0,0,%1"
192 : : "r" (val), "r" (paddr) : "memory");
193}
194
195/*
196 * Send an interrupt to another CPU.
197 * This can only be called in real mode.
198 * The caller needs to include any barrier needed to order writes
199 * to memory vs. the IPI/message.
200 */
201void kvmhv_rm_send_ipi(int cpu)
202{
203 unsigned long xics_phys;
204
205 /* Poke the target */
206 xics_phys = paca[cpu].kvm_hstate.xics_phys;
207 rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
208}
209
210/*
211 * The following functions are called from the assembly code
212 * in book3s_hv_rmhandlers.S.
213 */
214static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
215{
216 int cpu = vc->pcpu;
217
218 /* Order setting of exit map vs. msgsnd/IPI */
219 smp_mb();
220 for (; active; active >>= 1, ++cpu)
221 if (active & 1)
222 kvmhv_rm_send_ipi(cpu);
223}
224
225void kvmhv_commence_exit(int trap)
226{
227 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
228 int ptid = local_paca->kvm_hstate.ptid;
229 int me, ee;
230
231 /* Set our bit in the threads-exiting-guest map in the 0xff00
232 bits of vcore->entry_exit_map */
233 me = 0x100 << ptid;
234 do {
235 ee = vc->entry_exit_map;
236 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
237
238 /* Are we the first here? */
239 if ((ee >> 8) != 0)
240 return;
241
242 /*
243 * Trigger the other threads in this vcore to exit the guest.
244 * If this is a hypervisor decrementer interrupt then they
245 * will be already on their way out of the guest.
246 */
247 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
248 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
249}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 6dded8c75234..00e45b6d4f24 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -26,12 +26,6 @@
26static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 26static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
27 u32 new_irq); 27 u32 new_irq);
28 28
29static inline void rm_writeb(unsigned long paddr, u8 val)
30{
31 __asm__ __volatile__("sync; stbcix %0,0,%1"
32 : : "r" (val), "r" (paddr) : "memory");
33}
34
35/* -- ICS routines -- */ 29/* -- ICS routines -- */
36static void ics_rm_check_resend(struct kvmppc_xics *xics, 30static void ics_rm_check_resend(struct kvmppc_xics *xics,
37 struct kvmppc_ics *ics, struct kvmppc_icp *icp) 31 struct kvmppc_ics *ics, struct kvmppc_icp *icp)
@@ -60,7 +54,6 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
60 struct kvm_vcpu *this_vcpu) 54 struct kvm_vcpu *this_vcpu)
61{ 55{
62 struct kvmppc_icp *this_icp = this_vcpu->arch.icp; 56 struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
63 unsigned long xics_phys;
64 int cpu; 57 int cpu;
65 58
66 /* Mark the target VCPU as having an interrupt pending */ 59 /* Mark the target VCPU as having an interrupt pending */
@@ -83,9 +76,8 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
83 /* In SMT cpu will always point to thread 0, we adjust it */ 76 /* In SMT cpu will always point to thread 0, we adjust it */
84 cpu += vcpu->arch.ptid; 77 cpu += vcpu->arch.ptid;
85 78
86 /* Not too hard, then poke the target */ 79 smp_mb();
87 xics_phys = paca[cpu].kvm_hstate.xics_phys; 80 kvmhv_rm_send_ipi(cpu);
88 rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
89} 81}
90 82
91static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu) 83static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 3f6fd78cccd2..fcf3a617cc8a 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -264,7 +264,11 @@ kvm_novcpu_exit:
264 addi r3, r4, VCPU_TB_RMEXIT 264 addi r3, r4, VCPU_TB_RMEXIT
265 bl kvmhv_accumulate_time 265 bl kvmhv_accumulate_time
266#endif 266#endif
26713: bl kvmhv_commence_exit 26713: mr r3, r12
268 stw r12, 112-4(r1)
269 bl kvmhv_commence_exit
270 nop
271 lwz r12, 112-4(r1)
268 b kvmhv_switch_to_host 272 b kvmhv_switch_to_host
269 273
270/* 274/*
@@ -1161,6 +1165,9 @@ mc_cont:
1161 1165
1162 /* Increment exit count, poke other threads to exit */ 1166 /* Increment exit count, poke other threads to exit */
1163 bl kvmhv_commence_exit 1167 bl kvmhv_commence_exit
1168 nop
1169 ld r9, HSTATE_KVM_VCPU(r13)
1170 lwz r12, VCPU_TRAP(r9)
1164 1171
1165 /* Save guest CTRL register, set runlatch to 1 */ 1172 /* Save guest CTRL register, set runlatch to 1 */
1166 mfspr r6,SPRN_CTRLF 1173 mfspr r6,SPRN_CTRLF
@@ -1614,63 +1621,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1614 mtlr r0 1621 mtlr r0
1615 blr 1622 blr
1616 1623
1617kvmhv_commence_exit: /* r12 = trap, r13 = paca, doesn't trash r9 */
1618 mflr r0
1619 std r0, PPC_LR_STKOFF(r1)
1620 stdu r1, -PPC_MIN_STKFRM(r1)
1621
1622 /* Set our bit in the threads-exiting-guest map in the 0xff00
1623 bits of vcore->entry_exit_map */
1624 ld r5, HSTATE_KVM_VCORE(r13)
1625 lbz r4, HSTATE_PTID(r13)
1626 li r7, 0x100
1627 sld r7, r7, r4
1628 addi r6, r5, VCORE_ENTRY_EXIT
162941: lwarx r3, 0, r6
1630 or r0, r3, r7
1631 stwcx. r0, 0, r6
1632 bne 41b
1633 isync /* order stwcx. vs. reading napping_threads */
1634
1635 /*
1636 * At this point we have an interrupt that we have to pass
1637 * up to the kernel or qemu; we can't handle it in real mode.
1638 * Thus we have to do a partition switch, so we have to
1639 * collect the other threads, if we are the first thread
1640 * to take an interrupt. To do this, we send a message or
1641 * IPI to all the threads that have their bit set in the entry
1642 * map in vcore->entry_exit_map (other than ourselves).
1643 * However, we don't need to bother if this is an HDEC
1644 * interrupt, since the other threads will already be on their
1645 * way here in that case.
1646 */
1647 cmpwi r3,0x100 /* Are we the first here? */
1648 bge 43f
1649 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1650 beq 43f
1651
1652 srwi r0,r7,8
1653 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1654 beq 43f
1655 /* Order entry/exit update vs. IPIs */
1656 sync
1657 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1658 subf r6,r4,r13
165942: andi. r0,r3,1
1660 beq 44f
1661 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
1662 li r0,IPI_PRIORITY
1663 li r7,XICS_MFRR
1664 stbcix r0,r7,r8 /* trigger the IPI */
166544: srdi. r3,r3,1
1666 addi r6,r6,PACA_SIZE
1667 bne 42b
1668
166943: ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
1670 addi r1, r1, PPC_MIN_STKFRM
1671 mtlr r0
1672 blr
1673
1674/* 1624/*
1675 * Check whether an HDSI is an HPTE not found fault or something else. 1625 * Check whether an HDSI is an HPTE not found fault or something else.
1676 * If it is an HPTE not found fault that is due to the guest accessing 1626 * If it is an HPTE not found fault that is due to the guest accessing