aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-09-24 17:19:45 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-09-24 17:19:45 -0400
commit00c027db0cc4b7387b258330482c6e5f5e836b18 (patch)
tree8f15b2721826ff5be2d8edea81cb785c5e5602eb
parentc24ae0dcd3e8695efa43e71704d1fc4bc7e29e9b (diff)
parent8d0eff6385640a9e6eed0b0c09113794b2bb74e9 (diff)
Merge tag 'signed-kvm-ppc-next' of git://github.com/agraf/linux-2.6 into kvm-next
Patch queue for ppc - 2014-09-24 New awesome things in this release: - E500: e6500 core support - E500: guest and remote debug support - Book3S: remote sw breakpoint support - Book3S: HV: Minor bugfixes Alexander Graf (1): KVM: PPC: Pass enum to kvmppc_get_last_inst Bharat Bhushan (8): KVM: PPC: BOOKE: allow debug interrupt at "debug level" KVM: PPC: BOOKE : Emulate rfdi instruction KVM: PPC: BOOKE: Allow guest to change MSR_DE KVM: PPC: BOOKE: Clear guest dbsr in userspace exit KVM_EXIT_DEBUG KVM: PPC: BOOKE: Guest and hardware visible debug registers are same KVM: PPC: BOOKE: Add one reg interface for DBSR KVM: PPC: BOOKE: Add one_reg documentation of SPRG9 and DBSR KVM: PPC: BOOKE: Emulate debug registers and exception Madhavan Srinivasan (2): powerpc/kvm: support to handle sw breakpoint powerpc/kvm: common sw breakpoint instr across ppc Michael Neuling (1): KVM: PPC: Book3S HV: Add register name when loading toc Mihai Caraman (10): powerpc/booke: Restrict SPE exception handlers to e200/e500 cores powerpc/booke: Revert SPE/AltiVec common defines for interrupt numbers KVM: PPC: Book3E: Increase FPU laziness KVM: PPC: Book3e: Add AltiVec support KVM: PPC: Make ONE_REG powerpc generic KVM: PPC: Move ONE_REG AltiVec support to powerpc KVM: PPC: Remove the tasklet used by the hrtimer KVM: PPC: Remove shared defines for SPE and AltiVec interrupts KVM: PPC: e500mc: Add support for single threaded vcpus on e6500 core KVM: PPC: Book3E: Enable e6500 core Paul Mackerras (2): KVM: PPC: Book3S HV: Increase timeout for grabbing secondary threads KVM: PPC: Book3S HV: Only accept host PVR value for guest PVR
-rw-r--r--Documentation/virtual/kvm/api.txt2
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h20
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h7
-rw-r--r--arch/powerpc/include/asm/kvm_host.h4
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h11
-rw-r--r--arch/powerpc/include/asm/reg_booke.h2
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h6
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S12
-rw-r--r--arch/powerpc/kernel/cputable.c5
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S4
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S26
-rw-r--r--arch/powerpc/kvm/book3s.c158
-rw-r--r--arch/powerpc/kvm/book3s_hv.c47
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S3
-rw-r--r--arch/powerpc/kvm/book3s_pr.c3
-rw-r--r--arch/powerpc/kvm/booke.c287
-rw-r--r--arch/powerpc/kvm/booke.h40
-rw-r--r--arch/powerpc/kvm/booke_emulate.c163
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S13
-rw-r--r--arch/powerpc/kvm/e500.h20
-rw-r--r--arch/powerpc/kvm/e500_emulate.c20
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c18
-rw-r--r--arch/powerpc/kvm/e500mc.c60
-rw-r--r--arch/powerpc/kvm/emulate.c17
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c2
-rw-r--r--arch/powerpc/kvm/powerpc.c107
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype6
27 files changed, 763 insertions, 300 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index f7735c72c128..7610eaa4d491 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1901,6 +1901,8 @@ registers, find a list below:
1901 PPC | KVM_REG_PPC_ARCH_COMPAT | 32 1901 PPC | KVM_REG_PPC_ARCH_COMPAT | 32
1902 PPC | KVM_REG_PPC_DABRX | 32 1902 PPC | KVM_REG_PPC_DABRX | 32
1903 PPC | KVM_REG_PPC_WORT | 64 1903 PPC | KVM_REG_PPC_WORT | 64
1904 PPC | KVM_REG_PPC_SPRG9 | 64
1905 PPC | KVM_REG_PPC_DBSR | 32
1904 PPC | KVM_REG_PPC_TM_GPR0 | 64 1906 PPC | KVM_REG_PPC_TM_GPR0 | 64
1905 ... 1907 ...
1906 PPC | KVM_REG_PPC_TM_GPR31 | 64 1908 PPC | KVM_REG_PPC_TM_GPR31 | 64
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 465dfcb82c92..5bca220bbb60 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -53,17 +53,17 @@
53#define BOOKE_INTERRUPT_DEBUG 15 53#define BOOKE_INTERRUPT_DEBUG 15
54 54
55/* E500 */ 55/* E500 */
56#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32 56#ifdef CONFIG_SPE_POSSIBLE
57#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33 57#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
58/* 58#define BOOKE_INTERRUPT_SPE_FP_DATA 33
59 * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
60 */
61#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
62#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
63#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
64#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
65 BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
66#define BOOKE_INTERRUPT_SPE_FP_ROUND 34 59#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
60#endif
61
62#ifdef CONFIG_PPC_E500MC
63#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 32
64#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 33
65#endif
66
67#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 67#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
68#define BOOKE_INTERRUPT_DOORBELL 36 68#define BOOKE_INTERRUPT_DOORBELL 36
69#define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37 69#define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index f7aa5cc395c4..3286f0d6a86c 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -23,15 +23,16 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
25 25
26/* LPIDs we support with this build -- runtime limit may be lower */ 26/*
27 * Number of available lpids. Only the low-order 6 bits of LPID rgister are
28 * implemented on e500mc+ cores.
29 */
27#define KVMPPC_NR_LPIDS 64 30#define KVMPPC_NR_LPIDS 64
28 31
29#define KVMPPC_INST_EHPRIV 0x7c00021c 32#define KVMPPC_INST_EHPRIV 0x7c00021c
30#define EHPRIV_OC_SHIFT 11 33#define EHPRIV_OC_SHIFT 11
31/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */ 34/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */
32#define EHPRIV_OC_DEBUG 1 35#define EHPRIV_OC_DEBUG 1
33#define KVMPPC_INST_EHPRIV_DEBUG (KVMPPC_INST_EHPRIV | \
34 (EHPRIV_OC_DEBUG << EHPRIV_OC_SHIFT))
35 36
36static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 37static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
37{ 38{
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 2cf6c1587d43..047855619cc4 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -144,6 +144,7 @@ enum kvm_exit_types {
144 EMULATED_TLBWE_EXITS, 144 EMULATED_TLBWE_EXITS,
145 EMULATED_RFI_EXITS, 145 EMULATED_RFI_EXITS,
146 EMULATED_RFCI_EXITS, 146 EMULATED_RFCI_EXITS,
147 EMULATED_RFDI_EXITS,
147 DEC_EXITS, 148 DEC_EXITS,
148 EXT_INTR_EXITS, 149 EXT_INTR_EXITS,
149 HALT_WAKEUP, 150 HALT_WAKEUP,
@@ -589,8 +590,6 @@ struct kvm_vcpu_arch {
589 u32 crit_save; 590 u32 crit_save;
590 /* guest debug registers*/ 591 /* guest debug registers*/
591 struct debug_reg dbg_reg; 592 struct debug_reg dbg_reg;
592 /* hardware visible debug registers when in guest state */
593 struct debug_reg shadow_dbg_reg;
594#endif 593#endif
595 gpa_t paddr_accessed; 594 gpa_t paddr_accessed;
596 gva_t vaddr_accessed; 595 gva_t vaddr_accessed;
@@ -612,7 +611,6 @@ struct kvm_vcpu_arch {
612 u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ 611 u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
613 612
614 struct hrtimer dec_timer; 613 struct hrtimer dec_timer;
615 struct tasklet_struct tasklet;
616 u64 dec_jiffies; 614 u64 dec_jiffies;
617 u64 dec_expires; 615 u64 dec_expires;
618 unsigned long pending_exceptions; 616 unsigned long pending_exceptions;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index d4a92d7cea6a..a6dcdb6d13c1 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -38,6 +38,12 @@
38#include <asm/paca.h> 38#include <asm/paca.h>
39#endif 39#endif
40 40
41/*
42 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
43 * for supporting software breakpoint.
44 */
45#define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
46
41enum emulation_result { 47enum emulation_result {
42 EMULATE_DONE, /* no further processing */ 48 EMULATE_DONE, /* no further processing */
43 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 49 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
@@ -89,7 +95,7 @@ extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
89extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); 95extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
90extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); 96extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
91extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); 97extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
92extern void kvmppc_decrementer_func(unsigned long data); 98extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
93extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu); 99extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
94extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu); 100extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
95extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu); 101extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
@@ -206,6 +212,9 @@ extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
206extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq); 212extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
207extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq); 213extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
208 214
215void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
216void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
217
209union kvmppc_one_reg { 218union kvmppc_one_reg {
210 u32 wval; 219 u32 wval;
211 u64 dval; 220 u64 dval;
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 1d653308a33c..16547efa2d5a 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -319,6 +319,8 @@
319 * DBSR bits which have conflicting definitions on true Book E versus IBM 40x. 319 * DBSR bits which have conflicting definitions on true Book E versus IBM 40x.
320 */ 320 */
321#ifdef CONFIG_BOOKE 321#ifdef CONFIG_BOOKE
322#define DBSR_IDE 0x80000000 /* Imprecise Debug Event */
323#define DBSR_MRR 0x30000000 /* Most Recent Reset */
322#define DBSR_IC 0x08000000 /* Instruction Completion */ 324#define DBSR_IC 0x08000000 /* Instruction Completion */
323#define DBSR_BT 0x04000000 /* Branch Taken */ 325#define DBSR_BT 0x04000000 /* Branch Taken */
324#define DBSR_IRPT 0x02000000 /* Exception Debug Event */ 326#define DBSR_IRPT 0x02000000 /* Exception Debug Event */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index e0e49dbb145d..ab4d4732c492 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -476,6 +476,11 @@ struct kvm_get_htab_header {
476 476
477/* FP and vector status/control registers */ 477/* FP and vector status/control registers */
478#define KVM_REG_PPC_FPSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80) 478#define KVM_REG_PPC_FPSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80)
479/*
480 * VSCR register is documented as a 32-bit register in the ISA, but it can
481 * only be accesses via a vector register. Expose VSCR as a 32-bit register
482 * even though the kernel represents it as a 128-bit vector.
483 */
479#define KVM_REG_PPC_VSCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81) 484#define KVM_REG_PPC_VSCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81)
480 485
481/* Virtual processor areas */ 486/* Virtual processor areas */
@@ -557,6 +562,7 @@ struct kvm_get_htab_header {
557#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8) 562#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
558#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9) 563#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9)
559#define KVM_REG_PPC_SPRG9 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba) 564#define KVM_REG_PPC_SPRG9 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
565#define KVM_REG_PPC_DBSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbb)
560 566
561/* Transactional Memory checkpointed state: 567/* Transactional Memory checkpointed state:
562 * This is all GPRs, all VSX regs and a subset of SPRs 568 * This is all GPRs, all VSX regs and a subset of SPRs
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 4f1393d20079..dddba3e94260 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -91,6 +91,7 @@ _GLOBAL(setup_altivec_idle)
91 91
92 blr 92 blr
93 93
94#ifdef CONFIG_PPC_E500MC
94_GLOBAL(__setup_cpu_e6500) 95_GLOBAL(__setup_cpu_e6500)
95 mflr r6 96 mflr r6
96#ifdef CONFIG_PPC64 97#ifdef CONFIG_PPC64
@@ -107,14 +108,20 @@ _GLOBAL(__setup_cpu_e6500)
107 bl __setup_cpu_e5500 108 bl __setup_cpu_e5500
108 mtlr r6 109 mtlr r6
109 blr 110 blr
111#endif /* CONFIG_PPC_E500MC */
110 112
111#ifdef CONFIG_PPC32 113#ifdef CONFIG_PPC32
114#ifdef CONFIG_E200
112_GLOBAL(__setup_cpu_e200) 115_GLOBAL(__setup_cpu_e200)
113 /* enable dedicated debug exception handling resources (Debug APU) */ 116 /* enable dedicated debug exception handling resources (Debug APU) */
114 mfspr r3,SPRN_HID0 117 mfspr r3,SPRN_HID0
115 ori r3,r3,HID0_DAPUEN@l 118 ori r3,r3,HID0_DAPUEN@l
116 mtspr SPRN_HID0,r3 119 mtspr SPRN_HID0,r3
117 b __setup_e200_ivors 120 b __setup_e200_ivors
121#endif /* CONFIG_E200 */
122
123#ifdef CONFIG_E500
124#ifndef CONFIG_PPC_E500MC
118_GLOBAL(__setup_cpu_e500v1) 125_GLOBAL(__setup_cpu_e500v1)
119_GLOBAL(__setup_cpu_e500v2) 126_GLOBAL(__setup_cpu_e500v2)
120 mflr r4 127 mflr r4
@@ -129,6 +136,7 @@ _GLOBAL(__setup_cpu_e500v2)
129#endif 136#endif
130 mtlr r4 137 mtlr r4
131 blr 138 blr
139#else /* CONFIG_PPC_E500MC */
132_GLOBAL(__setup_cpu_e500mc) 140_GLOBAL(__setup_cpu_e500mc)
133_GLOBAL(__setup_cpu_e5500) 141_GLOBAL(__setup_cpu_e5500)
134 mflr r5 142 mflr r5
@@ -159,7 +167,9 @@ _GLOBAL(__setup_cpu_e5500)
1592: 1672:
160 mtlr r5 168 mtlr r5
161 blr 169 blr
162#endif 170#endif /* CONFIG_PPC_E500MC */
171#endif /* CONFIG_E500 */
172#endif /* CONFIG_PPC32 */
163 173
164#ifdef CONFIG_PPC_BOOK3E_64 174#ifdef CONFIG_PPC_BOOK3E_64
165_GLOBAL(__restore_cpu_e6500) 175_GLOBAL(__restore_cpu_e6500)
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 9b6dcaaec1a3..808405906336 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1961,6 +1961,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1961#endif /* CONFIG_PPC32 */ 1961#endif /* CONFIG_PPC32 */
1962#ifdef CONFIG_E500 1962#ifdef CONFIG_E500
1963#ifdef CONFIG_PPC32 1963#ifdef CONFIG_PPC32
1964#ifndef CONFIG_PPC_E500MC
1964 { /* e500 */ 1965 { /* e500 */
1965 .pvr_mask = 0xffff0000, 1966 .pvr_mask = 0xffff0000,
1966 .pvr_value = 0x80200000, 1967 .pvr_value = 0x80200000,
@@ -2000,6 +2001,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
2000 .machine_check = machine_check_e500, 2001 .machine_check = machine_check_e500,
2001 .platform = "ppc8548", 2002 .platform = "ppc8548",
2002 }, 2003 },
2004#else
2003 { /* e500mc */ 2005 { /* e500mc */
2004 .pvr_mask = 0xffff0000, 2006 .pvr_mask = 0xffff0000,
2005 .pvr_value = 0x80230000, 2007 .pvr_value = 0x80230000,
@@ -2018,7 +2020,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
2018 .machine_check = machine_check_e500mc, 2020 .machine_check = machine_check_e500mc,
2019 .platform = "ppce500mc", 2021 .platform = "ppce500mc",
2020 }, 2022 },
2023#endif /* CONFIG_PPC_E500MC */
2021#endif /* CONFIG_PPC32 */ 2024#endif /* CONFIG_PPC32 */
2025#ifdef CONFIG_PPC_E500MC
2022 { /* e5500 */ 2026 { /* e5500 */
2023 .pvr_mask = 0xffff0000, 2027 .pvr_mask = 0xffff0000,
2024 .pvr_value = 0x80240000, 2028 .pvr_value = 0x80240000,
@@ -2062,6 +2066,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
2062 .machine_check = machine_check_e500mc, 2066 .machine_check = machine_check_e500mc,
2063 .platform = "ppce6500", 2067 .platform = "ppce6500",
2064 }, 2068 },
2069#endif /* CONFIG_PPC_E500MC */
2065#ifdef CONFIG_PPC32 2070#ifdef CONFIG_PPC32
2066 { /* default match */ 2071 { /* default match */
2067 .pvr_mask = 0x00000000, 2072 .pvr_mask = 0x00000000,
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index bb9cac6c8051..3e68d1c69718 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -635,7 +635,7 @@ interrupt_end_book3e:
635 635
636/* Altivec Unavailable Interrupt */ 636/* Altivec Unavailable Interrupt */
637 START_EXCEPTION(altivec_unavailable); 637 START_EXCEPTION(altivec_unavailable);
638 NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL, 638 NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL,
639 PROLOG_ADDITION_NONE) 639 PROLOG_ADDITION_NONE)
640 /* we can probably do a shorter exception entry for that one... */ 640 /* we can probably do a shorter exception entry for that one... */
641 EXCEPTION_COMMON(0x200) 641 EXCEPTION_COMMON(0x200)
@@ -658,7 +658,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
658/* AltiVec Assist */ 658/* AltiVec Assist */
659 START_EXCEPTION(altivec_assist); 659 START_EXCEPTION(altivec_assist);
660 NORMAL_EXCEPTION_PROLOG(0x220, 660 NORMAL_EXCEPTION_PROLOG(0x220,
661 BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST, 661 BOOKE_INTERRUPT_ALTIVEC_ASSIST,
662 PROLOG_ADDITION_NONE) 662 PROLOG_ADDITION_NONE)
663 EXCEPTION_COMMON(0x220) 663 EXCEPTION_COMMON(0x220)
664 INTS_DISABLE 664 INTS_DISABLE
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index b497188a94a1..fffd1f96bb1d 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -613,34 +613,36 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
613 mfspr r10, SPRN_SPRG_RSCRATCH0 613 mfspr r10, SPRN_SPRG_RSCRATCH0
614 b InstructionStorage 614 b InstructionStorage
615 615
616/* Define SPE handlers for e200 and e500v2 */
616#ifdef CONFIG_SPE 617#ifdef CONFIG_SPE
617 /* SPE Unavailable */ 618 /* SPE Unavailable */
618 START_EXCEPTION(SPEUnavailable) 619 START_EXCEPTION(SPEUnavailable)
619 NORMAL_EXCEPTION_PROLOG(SPE_ALTIVEC_UNAVAIL) 620 NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
620 beq 1f 621 beq 1f
621 bl load_up_spe 622 bl load_up_spe
622 b fast_exception_return 623 b fast_exception_return
6231: addi r3,r1,STACK_FRAME_OVERHEAD 6241: addi r3,r1,STACK_FRAME_OVERHEAD
624 EXC_XFER_EE_LITE(0x2010, KernelSPE) 625 EXC_XFER_EE_LITE(0x2010, KernelSPE)
625#else 626#elif defined(CONFIG_SPE_POSSIBLE)
626 EXCEPTION(0x2020, SPE_ALTIVEC_UNAVAIL, SPEUnavailable, \ 627 EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
627 unknown_exception, EXC_XFER_EE) 628 unknown_exception, EXC_XFER_EE)
628#endif /* CONFIG_SPE */ 629#endif /* CONFIG_SPE_POSSIBLE */
629 630
630 /* SPE Floating Point Data */ 631 /* SPE Floating Point Data */
631#ifdef CONFIG_SPE 632#ifdef CONFIG_SPE
632 EXCEPTION(0x2030, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData, 633 EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData,
633 SPEFloatingPointException, EXC_XFER_EE) 634 SPEFloatingPointException, EXC_XFER_EE)
634 635
635 /* SPE Floating Point Round */ 636 /* SPE Floating Point Round */
636 EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \ 637 EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
637 SPEFloatingPointRoundException, EXC_XFER_EE) 638 SPEFloatingPointRoundException, EXC_XFER_EE)
638#else 639#elif defined(CONFIG_SPE_POSSIBLE)
639 EXCEPTION(0x2040, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData, 640 EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData,
640 unknown_exception, EXC_XFER_EE) 641 unknown_exception, EXC_XFER_EE)
641 EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \ 642 EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
642 unknown_exception, EXC_XFER_EE) 643 unknown_exception, EXC_XFER_EE)
643#endif /* CONFIG_SPE */ 644#endif /* CONFIG_SPE_POSSIBLE */
645
644 646
645 /* Performance Monitor */ 647 /* Performance Monitor */
646 EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \ 648 EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
@@ -947,6 +949,7 @@ get_phys_addr:
947 * Global functions 949 * Global functions
948 */ 950 */
949 951
952#ifdef CONFIG_E200
950/* Adjust or setup IVORs for e200 */ 953/* Adjust or setup IVORs for e200 */
951_GLOBAL(__setup_e200_ivors) 954_GLOBAL(__setup_e200_ivors)
952 li r3,DebugDebug@l 955 li r3,DebugDebug@l
@@ -959,7 +962,10 @@ _GLOBAL(__setup_e200_ivors)
959 mtspr SPRN_IVOR34,r3 962 mtspr SPRN_IVOR34,r3
960 sync 963 sync
961 blr 964 blr
965#endif
962 966
967#ifdef CONFIG_E500
968#ifndef CONFIG_PPC_E500MC
963/* Adjust or setup IVORs for e500v1/v2 */ 969/* Adjust or setup IVORs for e500v1/v2 */
964_GLOBAL(__setup_e500_ivors) 970_GLOBAL(__setup_e500_ivors)
965 li r3,DebugCrit@l 971 li r3,DebugCrit@l
@@ -974,7 +980,7 @@ _GLOBAL(__setup_e500_ivors)
974 mtspr SPRN_IVOR35,r3 980 mtspr SPRN_IVOR35,r3
975 sync 981 sync
976 blr 982 blr
977 983#else
978/* Adjust or setup IVORs for e500mc */ 984/* Adjust or setup IVORs for e500mc */
979_GLOBAL(__setup_e500mc_ivors) 985_GLOBAL(__setup_e500mc_ivors)
980 li r3,DebugDebug@l 986 li r3,DebugDebug@l
@@ -1000,6 +1006,8 @@ _GLOBAL(__setup_ehv_ivors)
1000 mtspr SPRN_IVOR41,r3 1006 mtspr SPRN_IVOR41,r3
1001 sync 1007 sync
1002 blr 1008 blr
1009#endif /* CONFIG_PPC_E500MC */
1010#endif /* CONFIG_E500 */
1003 1011
1004#ifdef CONFIG_SPE 1012#ifdef CONFIG_SPE
1005/* 1013/*
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index c16cfbfeb781..b32db4b95361 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -535,174 +535,111 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
535 return -ENOTSUPP; 535 return -ENOTSUPP;
536} 536}
537 537
538int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 538int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
539 union kvmppc_one_reg *val)
539{ 540{
540 int r; 541 int r = 0;
541 union kvmppc_one_reg val;
542 int size;
543 long int i; 542 long int i;
544 543
545 size = one_reg_size(reg->id); 544 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
546 if (size > sizeof(val))
547 return -EINVAL;
548
549 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
550 if (r == -EINVAL) { 545 if (r == -EINVAL) {
551 r = 0; 546 r = 0;
552 switch (reg->id) { 547 switch (id) {
553 case KVM_REG_PPC_DAR: 548 case KVM_REG_PPC_DAR:
554 val = get_reg_val(reg->id, kvmppc_get_dar(vcpu)); 549 *val = get_reg_val(id, kvmppc_get_dar(vcpu));
555 break; 550 break;
556 case KVM_REG_PPC_DSISR: 551 case KVM_REG_PPC_DSISR:
557 val = get_reg_val(reg->id, kvmppc_get_dsisr(vcpu)); 552 *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
558 break; 553 break;
559 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 554 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
560 i = reg->id - KVM_REG_PPC_FPR0; 555 i = id - KVM_REG_PPC_FPR0;
561 val = get_reg_val(reg->id, VCPU_FPR(vcpu, i)); 556 *val = get_reg_val(id, VCPU_FPR(vcpu, i));
562 break; 557 break;
563 case KVM_REG_PPC_FPSCR: 558 case KVM_REG_PPC_FPSCR:
564 val = get_reg_val(reg->id, vcpu->arch.fp.fpscr); 559 *val = get_reg_val(id, vcpu->arch.fp.fpscr);
565 break;
566#ifdef CONFIG_ALTIVEC
567 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
568 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
569 r = -ENXIO;
570 break;
571 }
572 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
573 break;
574 case KVM_REG_PPC_VSCR:
575 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
576 r = -ENXIO;
577 break;
578 }
579 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
580 break; 560 break;
581 case KVM_REG_PPC_VRSAVE:
582 val = get_reg_val(reg->id, vcpu->arch.vrsave);
583 break;
584#endif /* CONFIG_ALTIVEC */
585#ifdef CONFIG_VSX 561#ifdef CONFIG_VSX
586 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 562 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
587 if (cpu_has_feature(CPU_FTR_VSX)) { 563 if (cpu_has_feature(CPU_FTR_VSX)) {
588 long int i = reg->id - KVM_REG_PPC_VSR0; 564 i = id - KVM_REG_PPC_VSR0;
589 val.vsxval[0] = vcpu->arch.fp.fpr[i][0]; 565 val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
590 val.vsxval[1] = vcpu->arch.fp.fpr[i][1]; 566 val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
591 } else { 567 } else {
592 r = -ENXIO; 568 r = -ENXIO;
593 } 569 }
594 break; 570 break;
595#endif /* CONFIG_VSX */ 571#endif /* CONFIG_VSX */
596 case KVM_REG_PPC_DEBUG_INST: { 572 case KVM_REG_PPC_DEBUG_INST:
597 u32 opcode = INS_TW; 573 *val = get_reg_val(id, INS_TW);
598 r = copy_to_user((u32 __user *)(long)reg->addr,
599 &opcode, sizeof(u32));
600 break; 574 break;
601 }
602#ifdef CONFIG_KVM_XICS 575#ifdef CONFIG_KVM_XICS
603 case KVM_REG_PPC_ICP_STATE: 576 case KVM_REG_PPC_ICP_STATE:
604 if (!vcpu->arch.icp) { 577 if (!vcpu->arch.icp) {
605 r = -ENXIO; 578 r = -ENXIO;
606 break; 579 break;
607 } 580 }
608 val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu)); 581 *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
609 break; 582 break;
610#endif /* CONFIG_KVM_XICS */ 583#endif /* CONFIG_KVM_XICS */
611 case KVM_REG_PPC_FSCR: 584 case KVM_REG_PPC_FSCR:
612 val = get_reg_val(reg->id, vcpu->arch.fscr); 585 *val = get_reg_val(id, vcpu->arch.fscr);
613 break; 586 break;
614 case KVM_REG_PPC_TAR: 587 case KVM_REG_PPC_TAR:
615 val = get_reg_val(reg->id, vcpu->arch.tar); 588 *val = get_reg_val(id, vcpu->arch.tar);
616 break; 589 break;
617 case KVM_REG_PPC_EBBHR: 590 case KVM_REG_PPC_EBBHR:
618 val = get_reg_val(reg->id, vcpu->arch.ebbhr); 591 *val = get_reg_val(id, vcpu->arch.ebbhr);
619 break; 592 break;
620 case KVM_REG_PPC_EBBRR: 593 case KVM_REG_PPC_EBBRR:
621 val = get_reg_val(reg->id, vcpu->arch.ebbrr); 594 *val = get_reg_val(id, vcpu->arch.ebbrr);
622 break; 595 break;
623 case KVM_REG_PPC_BESCR: 596 case KVM_REG_PPC_BESCR:
624 val = get_reg_val(reg->id, vcpu->arch.bescr); 597 *val = get_reg_val(id, vcpu->arch.bescr);
625 break; 598 break;
626 case KVM_REG_PPC_VTB: 599 case KVM_REG_PPC_VTB:
627 val = get_reg_val(reg->id, vcpu->arch.vtb); 600 *val = get_reg_val(id, vcpu->arch.vtb);
628 break; 601 break;
629 case KVM_REG_PPC_IC: 602 case KVM_REG_PPC_IC:
630 val = get_reg_val(reg->id, vcpu->arch.ic); 603 *val = get_reg_val(id, vcpu->arch.ic);
631 break; 604 break;
632 default: 605 default:
633 r = -EINVAL; 606 r = -EINVAL;
634 break; 607 break;
635 } 608 }
636 } 609 }
637 if (r)
638 return r;
639
640 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
641 r = -EFAULT;
642 610
643 return r; 611 return r;
644} 612}
645 613
646int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 614int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
615 union kvmppc_one_reg *val)
647{ 616{
648 int r; 617 int r = 0;
649 union kvmppc_one_reg val;
650 int size;
651 long int i; 618 long int i;
652 619
653 size = one_reg_size(reg->id); 620 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
654 if (size > sizeof(val))
655 return -EINVAL;
656
657 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
658 return -EFAULT;
659
660 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
661 if (r == -EINVAL) { 621 if (r == -EINVAL) {
662 r = 0; 622 r = 0;
663 switch (reg->id) { 623 switch (id) {
664 case KVM_REG_PPC_DAR: 624 case KVM_REG_PPC_DAR:
665 kvmppc_set_dar(vcpu, set_reg_val(reg->id, val)); 625 kvmppc_set_dar(vcpu, set_reg_val(id, *val));
666 break; 626 break;
667 case KVM_REG_PPC_DSISR: 627 case KVM_REG_PPC_DSISR:
668 kvmppc_set_dsisr(vcpu, set_reg_val(reg->id, val)); 628 kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
669 break; 629 break;
670 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 630 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
671 i = reg->id - KVM_REG_PPC_FPR0; 631 i = id - KVM_REG_PPC_FPR0;
672 VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val); 632 VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
673 break; 633 break;
674 case KVM_REG_PPC_FPSCR: 634 case KVM_REG_PPC_FPSCR:
675 vcpu->arch.fp.fpscr = set_reg_val(reg->id, val); 635 vcpu->arch.fp.fpscr = set_reg_val(id, *val);
676 break;
677#ifdef CONFIG_ALTIVEC
678 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
679 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
680 r = -ENXIO;
681 break;
682 }
683 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
684 break;
685 case KVM_REG_PPC_VSCR:
686 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
687 r = -ENXIO;
688 break;
689 }
690 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
691 break;
692 case KVM_REG_PPC_VRSAVE:
693 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
694 r = -ENXIO;
695 break;
696 }
697 vcpu->arch.vrsave = set_reg_val(reg->id, val);
698 break; 636 break;
699#endif /* CONFIG_ALTIVEC */
700#ifdef CONFIG_VSX 637#ifdef CONFIG_VSX
701 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 638 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
702 if (cpu_has_feature(CPU_FTR_VSX)) { 639 if (cpu_has_feature(CPU_FTR_VSX)) {
703 long int i = reg->id - KVM_REG_PPC_VSR0; 640 i = id - KVM_REG_PPC_VSR0;
704 vcpu->arch.fp.fpr[i][0] = val.vsxval[0]; 641 vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
705 vcpu->arch.fp.fpr[i][1] = val.vsxval[1]; 642 vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
706 } else { 643 } else {
707 r = -ENXIO; 644 r = -ENXIO;
708 } 645 }
@@ -715,29 +652,29 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
715 break; 652 break;
716 } 653 }
717 r = kvmppc_xics_set_icp(vcpu, 654 r = kvmppc_xics_set_icp(vcpu,
718 set_reg_val(reg->id, val)); 655 set_reg_val(id, *val));
719 break; 656 break;
720#endif /* CONFIG_KVM_XICS */ 657#endif /* CONFIG_KVM_XICS */
721 case KVM_REG_PPC_FSCR: 658 case KVM_REG_PPC_FSCR:
722 vcpu->arch.fscr = set_reg_val(reg->id, val); 659 vcpu->arch.fscr = set_reg_val(id, *val);
723 break; 660 break;
724 case KVM_REG_PPC_TAR: 661 case KVM_REG_PPC_TAR:
725 vcpu->arch.tar = set_reg_val(reg->id, val); 662 vcpu->arch.tar = set_reg_val(id, *val);
726 break; 663 break;
727 case KVM_REG_PPC_EBBHR: 664 case KVM_REG_PPC_EBBHR:
728 vcpu->arch.ebbhr = set_reg_val(reg->id, val); 665 vcpu->arch.ebbhr = set_reg_val(id, *val);
729 break; 666 break;
730 case KVM_REG_PPC_EBBRR: 667 case KVM_REG_PPC_EBBRR:
731 vcpu->arch.ebbrr = set_reg_val(reg->id, val); 668 vcpu->arch.ebbrr = set_reg_val(id, *val);
732 break; 669 break;
733 case KVM_REG_PPC_BESCR: 670 case KVM_REG_PPC_BESCR:
734 vcpu->arch.bescr = set_reg_val(reg->id, val); 671 vcpu->arch.bescr = set_reg_val(id, *val);
735 break; 672 break;
736 case KVM_REG_PPC_VTB: 673 case KVM_REG_PPC_VTB:
737 vcpu->arch.vtb = set_reg_val(reg->id, val); 674 vcpu->arch.vtb = set_reg_val(id, *val);
738 break; 675 break;
739 case KVM_REG_PPC_IC: 676 case KVM_REG_PPC_IC:
740 vcpu->arch.ic = set_reg_val(reg->id, val); 677 vcpu->arch.ic = set_reg_val(id, *val);
741 break; 678 break;
742 default: 679 default:
743 r = -EINVAL; 680 r = -EINVAL;
@@ -778,13 +715,12 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
778int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 715int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
779 struct kvm_guest_debug *dbg) 716 struct kvm_guest_debug *dbg)
780{ 717{
781 return -EINVAL; 718 vcpu->guest_debug = dbg->control;
719 return 0;
782} 720}
783 721
784void kvmppc_decrementer_func(unsigned long data) 722void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
785{ 723{
786 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
787
788 kvmppc_core_queue_dec(vcpu); 724 kvmppc_core_queue_dec(vcpu);
789 kvm_vcpu_kick(vcpu); 725 kvm_vcpu_kick(vcpu);
790} 726}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 27cced9c7249..e63587d30b70 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -725,6 +725,30 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
725 return kvmppc_hcall_impl_hv_realmode(cmd); 725 return kvmppc_hcall_impl_hv_realmode(cmd);
726} 726}
727 727
728static int kvmppc_emulate_debug_inst(struct kvm_run *run,
729 struct kvm_vcpu *vcpu)
730{
731 u32 last_inst;
732
733 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
734 EMULATE_DONE) {
735 /*
736 * Fetch failed, so return to guest and
737 * try executing it again.
738 */
739 return RESUME_GUEST;
740 }
741
742 if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
743 run->exit_reason = KVM_EXIT_DEBUG;
744 run->debug.arch.address = kvmppc_get_pc(vcpu);
745 return RESUME_HOST;
746 } else {
747 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
748 return RESUME_GUEST;
749 }
750}
751
728static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, 752static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
729 struct task_struct *tsk) 753 struct task_struct *tsk)
730{ 754{
@@ -807,12 +831,18 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
807 break; 831 break;
808 /* 832 /*
809 * This occurs if the guest executes an illegal instruction. 833 * This occurs if the guest executes an illegal instruction.
810 * We just generate a program interrupt to the guest, since 834 * If the guest debug is disabled, generate a program interrupt
811 * we don't emulate any guest instructions at this stage. 835 * to the guest. If guest debug is enabled, we need to check
836 * whether the instruction is a software breakpoint instruction.
837 * Accordingly return to Guest or Host.
812 */ 838 */
813 case BOOK3S_INTERRUPT_H_EMUL_ASSIST: 839 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
814 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 840 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
815 r = RESUME_GUEST; 841 r = kvmppc_emulate_debug_inst(run, vcpu);
842 } else {
843 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
844 r = RESUME_GUEST;
845 }
816 break; 846 break;
817 /* 847 /*
818 * This occurs if the guest (kernel or userspace), does something that 848 * This occurs if the guest (kernel or userspace), does something that
@@ -856,7 +886,9 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
856{ 886{
857 int i, j; 887 int i, j;
858 888
859 kvmppc_set_pvr_hv(vcpu, sregs->pvr); 889 /* Only accept the same PVR as the host's, since we can't spoof it */
890 if (sregs->pvr != vcpu->arch.pvr)
891 return -EINVAL;
860 892
861 j = 0; 893 j = 0;
862 for (i = 0; i < vcpu->arch.slb_nr; i++) { 894 for (i = 0; i < vcpu->arch.slb_nr; i++) {
@@ -922,6 +954,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
922 long int i; 954 long int i;
923 955
924 switch (id) { 956 switch (id) {
957 case KVM_REG_PPC_DEBUG_INST:
958 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
959 break;
925 case KVM_REG_PPC_HIOR: 960 case KVM_REG_PPC_HIOR:
926 *val = get_reg_val(id, 0); 961 *val = get_reg_val(id, 0);
927 break; 962 break;
@@ -1489,7 +1524,7 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
1489static int kvmppc_grab_hwthread(int cpu) 1524static int kvmppc_grab_hwthread(int cpu)
1490{ 1525{
1491 struct paca_struct *tpaca; 1526 struct paca_struct *tpaca;
1492 long timeout = 1000; 1527 long timeout = 10000;
1493 1528
1494 tpaca = &paca[cpu]; 1529 tpaca = &paca[cpu];
1495 1530
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index f0c4db7704c3..edb2ccdbb2ba 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -355,6 +355,7 @@ kvmppc_hv_entry:
355 * MSR = ~IR|DR 355 * MSR = ~IR|DR
356 * R13 = PACA 356 * R13 = PACA
357 * R1 = host R1 357 * R1 = host R1
358 * R2 = TOC
358 * all other volatile GPRS = free 359 * all other volatile GPRS = free
359 */ 360 */
360 mflr r0 361 mflr r0
@@ -503,7 +504,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
503toc_tlbie_lock: 504toc_tlbie_lock:
504 .tc native_tlbie_lock[TC],native_tlbie_lock 505 .tc native_tlbie_lock[TC],native_tlbie_lock
505 .previous 506 .previous
506 ld r3,toc_tlbie_lock@toc(2) 507 ld r3,toc_tlbie_lock@toc(r2)
507#ifdef __BIG_ENDIAN__ 508#ifdef __BIG_ENDIAN__
508 lwz r8,PACA_LOCK_TOKEN(r13) 509 lwz r8,PACA_LOCK_TOKEN(r13)
509#else 510#else
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 852fcd8951c4..cf2eb16846d1 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1320,6 +1320,9 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1320 int r = 0; 1320 int r = 0;
1321 1321
1322 switch (id) { 1322 switch (id) {
1323 case KVM_REG_PPC_DEBUG_INST:
1324 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1325 break;
1323 case KVM_REG_PPC_HIOR: 1326 case KVM_REG_PPC_HIOR:
1324 *val = get_reg_val(id, to_book3s(vcpu)->hior); 1327 *val = get_reg_val(id, to_book3s(vcpu)->hior);
1325 break; 1328 break;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index b4c89fa6f109..9b55dec2d6cc 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -124,6 +124,40 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
124} 124}
125#endif 125#endif
126 126
127/*
128 * Load up guest vcpu FP state if it's needed.
129 * It also set the MSR_FP in thread so that host know
130 * we're holding FPU, and then host can help to save
131 * guest vcpu FP state if other threads require to use FPU.
132 * This simulates an FP unavailable fault.
133 *
134 * It requires to be called with preemption disabled.
135 */
136static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
137{
138#ifdef CONFIG_PPC_FPU
139 if (!(current->thread.regs->msr & MSR_FP)) {
140 enable_kernel_fp();
141 load_fp_state(&vcpu->arch.fp);
142 current->thread.fp_save_area = &vcpu->arch.fp;
143 current->thread.regs->msr |= MSR_FP;
144 }
145#endif
146}
147
148/*
149 * Save guest vcpu FP state into thread.
150 * It requires to be called with preemption disabled.
151 */
152static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
153{
154#ifdef CONFIG_PPC_FPU
155 if (current->thread.regs->msr & MSR_FP)
156 giveup_fpu(current);
157 current->thread.fp_save_area = NULL;
158#endif
159}
160
127static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu) 161static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
128{ 162{
129#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV) 163#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
@@ -134,6 +168,40 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
134#endif 168#endif
135} 169}
136 170
171/*
172 * Simulate AltiVec unavailable fault to load guest state
173 * from thread to AltiVec unit.
174 * It requires to be called with preemption disabled.
175 */
176static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
177{
178#ifdef CONFIG_ALTIVEC
179 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
180 if (!(current->thread.regs->msr & MSR_VEC)) {
181 enable_kernel_altivec();
182 load_vr_state(&vcpu->arch.vr);
183 current->thread.vr_save_area = &vcpu->arch.vr;
184 current->thread.regs->msr |= MSR_VEC;
185 }
186 }
187#endif
188}
189
190/*
191 * Save guest vcpu AltiVec state into thread.
192 * It requires to be called with preemption disabled.
193 */
194static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
195{
196#ifdef CONFIG_ALTIVEC
197 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
198 if (current->thread.regs->msr & MSR_VEC)
199 giveup_altivec(current);
200 current->thread.vr_save_area = NULL;
201 }
202#endif
203}
204
137static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu) 205static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
138{ 206{
139 /* Synchronize guest's desire to get debug interrupts into shadow MSR */ 207 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
@@ -267,6 +335,16 @@ static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
267 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions); 335 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
268} 336}
269 337
338void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
339{
340 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
341}
342
343void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
344{
345 clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
346}
347
270static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 348static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
271{ 349{
272 kvmppc_set_srr0(vcpu, srr0); 350 kvmppc_set_srr0(vcpu, srr0);
@@ -341,9 +419,15 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
341 case BOOKE_IRQPRIO_ITLB_MISS: 419 case BOOKE_IRQPRIO_ITLB_MISS:
342 case BOOKE_IRQPRIO_SYSCALL: 420 case BOOKE_IRQPRIO_SYSCALL:
343 case BOOKE_IRQPRIO_FP_UNAVAIL: 421 case BOOKE_IRQPRIO_FP_UNAVAIL:
422#ifdef CONFIG_SPE_POSSIBLE
344 case BOOKE_IRQPRIO_SPE_UNAVAIL: 423 case BOOKE_IRQPRIO_SPE_UNAVAIL:
345 case BOOKE_IRQPRIO_SPE_FP_DATA: 424 case BOOKE_IRQPRIO_SPE_FP_DATA:
346 case BOOKE_IRQPRIO_SPE_FP_ROUND: 425 case BOOKE_IRQPRIO_SPE_FP_ROUND:
426#endif
427#ifdef CONFIG_ALTIVEC
428 case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
429 case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
430#endif
347 case BOOKE_IRQPRIO_AP_UNAVAIL: 431 case BOOKE_IRQPRIO_AP_UNAVAIL:
348 allowed = 1; 432 allowed = 1;
349 msr_mask = MSR_CE | MSR_ME | MSR_DE; 433 msr_mask = MSR_CE | MSR_ME | MSR_DE;
@@ -377,7 +461,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
377 allowed = vcpu->arch.shared->msr & MSR_DE; 461 allowed = vcpu->arch.shared->msr & MSR_DE;
378 allowed = allowed && !crit; 462 allowed = allowed && !crit;
379 msr_mask = MSR_ME; 463 msr_mask = MSR_ME;
380 int_class = INT_CLASS_CRIT; 464 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
465 int_class = INT_CLASS_DBG;
466 else
467 int_class = INT_CLASS_CRIT;
468
381 break; 469 break;
382 } 470 }
383 471
@@ -654,20 +742,27 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
654 742
655 /* 743 /*
656 * Since we can't trap on MSR_FP in GS-mode, we consider the guest 744 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
657 * as always using the FPU. Kernel usage of FP (via 745 * as always using the FPU.
658 * enable_kernel_fp()) in this thread must not occur while
659 * vcpu->fpu_active is set.
660 */ 746 */
661 vcpu->fpu_active = 1;
662
663 kvmppc_load_guest_fp(vcpu); 747 kvmppc_load_guest_fp(vcpu);
664#endif 748#endif
665 749
750#ifdef CONFIG_ALTIVEC
751 /* Save userspace AltiVec state in stack */
752 if (cpu_has_feature(CPU_FTR_ALTIVEC))
753 enable_kernel_altivec();
754 /*
755 * Since we can't trap on MSR_VEC in GS-mode, we consider the guest
756 * as always using the AltiVec.
757 */
758 kvmppc_load_guest_altivec(vcpu);
759#endif
760
666 /* Switch to guest debug context */ 761 /* Switch to guest debug context */
667 debug = vcpu->arch.shadow_dbg_reg; 762 debug = vcpu->arch.dbg_reg;
668 switch_booke_debug_regs(&debug); 763 switch_booke_debug_regs(&debug);
669 debug = current->thread.debug; 764 debug = current->thread.debug;
670 current->thread.debug = vcpu->arch.shadow_dbg_reg; 765 current->thread.debug = vcpu->arch.dbg_reg;
671 766
672 vcpu->arch.pgdir = current->mm->pgd; 767 vcpu->arch.pgdir = current->mm->pgd;
673 kvmppc_fix_ee_before_entry(); 768 kvmppc_fix_ee_before_entry();
@@ -683,8 +778,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
683 778
684#ifdef CONFIG_PPC_FPU 779#ifdef CONFIG_PPC_FPU
685 kvmppc_save_guest_fp(vcpu); 780 kvmppc_save_guest_fp(vcpu);
781#endif
686 782
687 vcpu->fpu_active = 0; 783#ifdef CONFIG_ALTIVEC
784 kvmppc_save_guest_altivec(vcpu);
688#endif 785#endif
689 786
690out: 787out:
@@ -728,9 +825,36 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
728 825
729static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu) 826static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
730{ 827{
731 struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg); 828 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
732 u32 dbsr = vcpu->arch.dbsr; 829 u32 dbsr = vcpu->arch.dbsr;
733 830
831 if (vcpu->guest_debug == 0) {
832 /*
833 * Debug resources belong to Guest.
834 * Imprecise debug event is not injected
835 */
836 if (dbsr & DBSR_IDE) {
837 dbsr &= ~DBSR_IDE;
838 if (!dbsr)
839 return RESUME_GUEST;
840 }
841
842 if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
843 (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
844 kvmppc_core_queue_debug(vcpu);
845
846 /* Inject a program interrupt if trap debug is not allowed */
847 if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
848 kvmppc_core_queue_program(vcpu, ESR_PTR);
849
850 return RESUME_GUEST;
851 }
852
853 /*
854 * Debug resource owned by userspace.
855 * Clear guest dbsr (vcpu->arch.dbsr)
856 */
857 vcpu->arch.dbsr = 0;
734 run->debug.arch.status = 0; 858 run->debug.arch.status = 0;
735 run->debug.arch.address = vcpu->arch.pc; 859 run->debug.arch.address = vcpu->arch.pc;
736 860
@@ -868,7 +992,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
868 case BOOKE_INTERRUPT_DATA_STORAGE: 992 case BOOKE_INTERRUPT_DATA_STORAGE:
869 case BOOKE_INTERRUPT_DTLB_MISS: 993 case BOOKE_INTERRUPT_DTLB_MISS:
870 case BOOKE_INTERRUPT_HV_PRIV: 994 case BOOKE_INTERRUPT_HV_PRIV:
871 emulated = kvmppc_get_last_inst(vcpu, false, &last_inst); 995 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
996 break;
997 case BOOKE_INTERRUPT_PROGRAM:
998 /* SW breakpoints arrive as illegal instructions on HV */
999 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1000 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
872 break; 1001 break;
873 default: 1002 default:
874 break; 1003 break;
@@ -947,6 +1076,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
947 break; 1076 break;
948 1077
949 case BOOKE_INTERRUPT_PROGRAM: 1078 case BOOKE_INTERRUPT_PROGRAM:
1079 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
1080 (last_inst == KVMPPC_INST_SW_BREAKPOINT)) {
1081 /*
1082 * We are here because of an SW breakpoint instr,
1083 * so lets return to host to handle.
1084 */
1085 r = kvmppc_handle_debug(run, vcpu);
1086 run->exit_reason = KVM_EXIT_DEBUG;
1087 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1088 break;
1089 }
1090
950 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) { 1091 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
951 /* 1092 /*
952 * Program traps generated by user-level software must 1093 * Program traps generated by user-level software must
@@ -991,7 +1132,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
991 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); 1132 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
992 r = RESUME_GUEST; 1133 r = RESUME_GUEST;
993 break; 1134 break;
994#else 1135#elif defined(CONFIG_SPE_POSSIBLE)
995 case BOOKE_INTERRUPT_SPE_UNAVAIL: 1136 case BOOKE_INTERRUPT_SPE_UNAVAIL:
996 /* 1137 /*
997 * Guest wants SPE, but host kernel doesn't support it. Send 1138 * Guest wants SPE, but host kernel doesn't support it. Send
@@ -1012,6 +1153,22 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1012 run->hw.hardware_exit_reason = exit_nr; 1153 run->hw.hardware_exit_reason = exit_nr;
1013 r = RESUME_HOST; 1154 r = RESUME_HOST;
1014 break; 1155 break;
1156#endif /* CONFIG_SPE_POSSIBLE */
1157
1158/*
1159 * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
1160 * see kvmppc_core_check_processor_compat().
1161 */
1162#ifdef CONFIG_ALTIVEC
1163 case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
1164 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
1165 r = RESUME_GUEST;
1166 break;
1167
1168 case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
1169 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
1170 r = RESUME_GUEST;
1171 break;
1015#endif 1172#endif
1016 1173
1017 case BOOKE_INTERRUPT_DATA_STORAGE: 1174 case BOOKE_INTERRUPT_DATA_STORAGE:
@@ -1188,6 +1345,8 @@ out:
1188 else { 1345 else {
1189 /* interrupts now hard-disabled */ 1346 /* interrupts now hard-disabled */
1190 kvmppc_fix_ee_before_entry(); 1347 kvmppc_fix_ee_before_entry();
1348 kvmppc_load_guest_fp(vcpu);
1349 kvmppc_load_guest_altivec(vcpu);
1191 } 1350 }
1192 } 1351 }
1193 1352
@@ -1243,6 +1402,11 @@ int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1243 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 1402 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1244 (unsigned long)vcpu); 1403 (unsigned long)vcpu);
1245 1404
1405 /*
1406 * Clear DBSR.MRR to avoid guest debug interrupt as
1407 * this is of host interest
1408 */
1409 mtspr(SPRN_DBSR, DBSR_MRR);
1246 return 0; 1410 return 0;
1247} 1411}
1248 1412
@@ -1457,144 +1621,125 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1457 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); 1621 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1458} 1622}
1459 1623
1460int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1624int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
1625 union kvmppc_one_reg *val)
1461{ 1626{
1462 int r = 0; 1627 int r = 0;
1463 union kvmppc_one_reg val;
1464 int size;
1465
1466 size = one_reg_size(reg->id);
1467 if (size > sizeof(val))
1468 return -EINVAL;
1469 1628
1470 switch (reg->id) { 1629 switch (id) {
1471 case KVM_REG_PPC_IAC1: 1630 case KVM_REG_PPC_IAC1:
1472 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1); 1631 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
1473 break; 1632 break;
1474 case KVM_REG_PPC_IAC2: 1633 case KVM_REG_PPC_IAC2:
1475 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2); 1634 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
1476 break; 1635 break;
1477#if CONFIG_PPC_ADV_DEBUG_IACS > 2 1636#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1478 case KVM_REG_PPC_IAC3: 1637 case KVM_REG_PPC_IAC3:
1479 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3); 1638 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
1480 break; 1639 break;
1481 case KVM_REG_PPC_IAC4: 1640 case KVM_REG_PPC_IAC4:
1482 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4); 1641 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
1483 break; 1642 break;
1484#endif 1643#endif
1485 case KVM_REG_PPC_DAC1: 1644 case KVM_REG_PPC_DAC1:
1486 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1); 1645 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
1487 break; 1646 break;
1488 case KVM_REG_PPC_DAC2: 1647 case KVM_REG_PPC_DAC2:
1489 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2); 1648 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
1490 break; 1649 break;
1491 case KVM_REG_PPC_EPR: { 1650 case KVM_REG_PPC_EPR: {
1492 u32 epr = kvmppc_get_epr(vcpu); 1651 u32 epr = kvmppc_get_epr(vcpu);
1493 val = get_reg_val(reg->id, epr); 1652 *val = get_reg_val(id, epr);
1494 break; 1653 break;
1495 } 1654 }
1496#if defined(CONFIG_64BIT) 1655#if defined(CONFIG_64BIT)
1497 case KVM_REG_PPC_EPCR: 1656 case KVM_REG_PPC_EPCR:
1498 val = get_reg_val(reg->id, vcpu->arch.epcr); 1657 *val = get_reg_val(id, vcpu->arch.epcr);
1499 break; 1658 break;
1500#endif 1659#endif
1501 case KVM_REG_PPC_TCR: 1660 case KVM_REG_PPC_TCR:
1502 val = get_reg_val(reg->id, vcpu->arch.tcr); 1661 *val = get_reg_val(id, vcpu->arch.tcr);
1503 break; 1662 break;
1504 case KVM_REG_PPC_TSR: 1663 case KVM_REG_PPC_TSR:
1505 val = get_reg_val(reg->id, vcpu->arch.tsr); 1664 *val = get_reg_val(id, vcpu->arch.tsr);
1506 break; 1665 break;
1507 case KVM_REG_PPC_DEBUG_INST: 1666 case KVM_REG_PPC_DEBUG_INST:
1508 val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG); 1667 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1509 break; 1668 break;
1510 case KVM_REG_PPC_VRSAVE: 1669 case KVM_REG_PPC_VRSAVE:
1511 val = get_reg_val(reg->id, vcpu->arch.vrsave); 1670 *val = get_reg_val(id, vcpu->arch.vrsave);
1512 break; 1671 break;
1513 default: 1672 default:
1514 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val); 1673 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
1515 break; 1674 break;
1516 } 1675 }
1517 1676
1518 if (r)
1519 return r;
1520
1521 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1522 r = -EFAULT;
1523
1524 return r; 1677 return r;
1525} 1678}
1526 1679
1527int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1680int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
1681 union kvmppc_one_reg *val)
1528{ 1682{
1529 int r = 0; 1683 int r = 0;
1530 union kvmppc_one_reg val;
1531 int size;
1532 1684
1533 size = one_reg_size(reg->id); 1685 switch (id) {
1534 if (size > sizeof(val))
1535 return -EINVAL;
1536
1537 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1538 return -EFAULT;
1539
1540 switch (reg->id) {
1541 case KVM_REG_PPC_IAC1: 1686 case KVM_REG_PPC_IAC1:
1542 vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val); 1687 vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
1543 break; 1688 break;
1544 case KVM_REG_PPC_IAC2: 1689 case KVM_REG_PPC_IAC2:
1545 vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val); 1690 vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
1546 break; 1691 break;
1547#if CONFIG_PPC_ADV_DEBUG_IACS > 2 1692#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1548 case KVM_REG_PPC_IAC3: 1693 case KVM_REG_PPC_IAC3:
1549 vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val); 1694 vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
1550 break; 1695 break;
1551 case KVM_REG_PPC_IAC4: 1696 case KVM_REG_PPC_IAC4:
1552 vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val); 1697 vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
1553 break; 1698 break;
1554#endif 1699#endif
1555 case KVM_REG_PPC_DAC1: 1700 case KVM_REG_PPC_DAC1:
1556 vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val); 1701 vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
1557 break; 1702 break;
1558 case KVM_REG_PPC_DAC2: 1703 case KVM_REG_PPC_DAC2:
1559 vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val); 1704 vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
1560 break; 1705 break;
1561 case KVM_REG_PPC_EPR: { 1706 case KVM_REG_PPC_EPR: {
1562 u32 new_epr = set_reg_val(reg->id, val); 1707 u32 new_epr = set_reg_val(id, *val);
1563 kvmppc_set_epr(vcpu, new_epr); 1708 kvmppc_set_epr(vcpu, new_epr);
1564 break; 1709 break;
1565 } 1710 }
1566#if defined(CONFIG_64BIT) 1711#if defined(CONFIG_64BIT)
1567 case KVM_REG_PPC_EPCR: { 1712 case KVM_REG_PPC_EPCR: {
1568 u32 new_epcr = set_reg_val(reg->id, val); 1713 u32 new_epcr = set_reg_val(id, *val);
1569 kvmppc_set_epcr(vcpu, new_epcr); 1714 kvmppc_set_epcr(vcpu, new_epcr);
1570 break; 1715 break;
1571 } 1716 }
1572#endif 1717#endif
1573 case KVM_REG_PPC_OR_TSR: { 1718 case KVM_REG_PPC_OR_TSR: {
1574 u32 tsr_bits = set_reg_val(reg->id, val); 1719 u32 tsr_bits = set_reg_val(id, *val);
1575 kvmppc_set_tsr_bits(vcpu, tsr_bits); 1720 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1576 break; 1721 break;
1577 } 1722 }
1578 case KVM_REG_PPC_CLEAR_TSR: { 1723 case KVM_REG_PPC_CLEAR_TSR: {
1579 u32 tsr_bits = set_reg_val(reg->id, val); 1724 u32 tsr_bits = set_reg_val(id, *val);
1580 kvmppc_clr_tsr_bits(vcpu, tsr_bits); 1725 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1581 break; 1726 break;
1582 } 1727 }
1583 case KVM_REG_PPC_TSR: { 1728 case KVM_REG_PPC_TSR: {
1584 u32 tsr = set_reg_val(reg->id, val); 1729 u32 tsr = set_reg_val(id, *val);
1585 kvmppc_set_tsr(vcpu, tsr); 1730 kvmppc_set_tsr(vcpu, tsr);
1586 break; 1731 break;
1587 } 1732 }
1588 case KVM_REG_PPC_TCR: { 1733 case KVM_REG_PPC_TCR: {
1589 u32 tcr = set_reg_val(reg->id, val); 1734 u32 tcr = set_reg_val(id, *val);
1590 kvmppc_set_tcr(vcpu, tcr); 1735 kvmppc_set_tcr(vcpu, tcr);
1591 break; 1736 break;
1592 } 1737 }
1593 case KVM_REG_PPC_VRSAVE: 1738 case KVM_REG_PPC_VRSAVE:
1594 vcpu->arch.vrsave = set_reg_val(reg->id, val); 1739 vcpu->arch.vrsave = set_reg_val(id, *val);
1595 break; 1740 break;
1596 default: 1741 default:
1597 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val); 1742 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
1598 break; 1743 break;
1599 } 1744 }
1600 1745
@@ -1694,10 +1839,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1694 update_timer_ints(vcpu); 1839 update_timer_ints(vcpu);
1695} 1840}
1696 1841
1697void kvmppc_decrementer_func(unsigned long data) 1842void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
1698{ 1843{
1699 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1700
1701 if (vcpu->arch.tcr & TCR_ARE) { 1844 if (vcpu->arch.tcr & TCR_ARE) {
1702 vcpu->arch.dec = vcpu->arch.decar; 1845 vcpu->arch.dec = vcpu->arch.decar;
1703 kvmppc_emulate_dec(vcpu); 1846 kvmppc_emulate_dec(vcpu);
@@ -1842,7 +1985,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1842 int n, b = 0, w = 0; 1985 int n, b = 0, w = 0;
1843 1986
1844 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) { 1987 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
1845 vcpu->arch.shadow_dbg_reg.dbcr0 = 0; 1988 vcpu->arch.dbg_reg.dbcr0 = 0;
1846 vcpu->guest_debug = 0; 1989 vcpu->guest_debug = 0;
1847 kvm_guest_protect_msr(vcpu, MSR_DE, false); 1990 kvm_guest_protect_msr(vcpu, MSR_DE, false);
1848 return 0; 1991 return 0;
@@ -1850,15 +1993,13 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1850 1993
1851 kvm_guest_protect_msr(vcpu, MSR_DE, true); 1994 kvm_guest_protect_msr(vcpu, MSR_DE, true);
1852 vcpu->guest_debug = dbg->control; 1995 vcpu->guest_debug = dbg->control;
1853 vcpu->arch.shadow_dbg_reg.dbcr0 = 0; 1996 vcpu->arch.dbg_reg.dbcr0 = 0;
1854 /* Set DBCR0_EDM in guest visible DBCR0 register. */
1855 vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
1856 1997
1857 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 1998 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1858 vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1999 vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1859 2000
1860 /* Code below handles only HW breakpoints */ 2001 /* Code below handles only HW breakpoints */
1861 dbg_reg = &(vcpu->arch.shadow_dbg_reg); 2002 dbg_reg = &(vcpu->arch.dbg_reg);
1862 2003
1863#ifdef CONFIG_KVM_BOOKE_HV 2004#ifdef CONFIG_KVM_BOOKE_HV
1864 /* 2005 /*
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index f753543c56fa..22ba08ea68e9 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -32,9 +32,15 @@
32#define BOOKE_IRQPRIO_ALIGNMENT 2 32#define BOOKE_IRQPRIO_ALIGNMENT 2
33#define BOOKE_IRQPRIO_PROGRAM 3 33#define BOOKE_IRQPRIO_PROGRAM 3
34#define BOOKE_IRQPRIO_FP_UNAVAIL 4 34#define BOOKE_IRQPRIO_FP_UNAVAIL 4
35#ifdef CONFIG_SPE_POSSIBLE
35#define BOOKE_IRQPRIO_SPE_UNAVAIL 5 36#define BOOKE_IRQPRIO_SPE_UNAVAIL 5
36#define BOOKE_IRQPRIO_SPE_FP_DATA 6 37#define BOOKE_IRQPRIO_SPE_FP_DATA 6
37#define BOOKE_IRQPRIO_SPE_FP_ROUND 7 38#define BOOKE_IRQPRIO_SPE_FP_ROUND 7
39#endif
40#ifdef CONFIG_PPC_E500MC
41#define BOOKE_IRQPRIO_ALTIVEC_UNAVAIL 5
42#define BOOKE_IRQPRIO_ALTIVEC_ASSIST 6
43#endif
38#define BOOKE_IRQPRIO_SYSCALL 8 44#define BOOKE_IRQPRIO_SYSCALL 8
39#define BOOKE_IRQPRIO_AP_UNAVAIL 9 45#define BOOKE_IRQPRIO_AP_UNAVAIL 9
40#define BOOKE_IRQPRIO_DTLB_MISS 10 46#define BOOKE_IRQPRIO_DTLB_MISS 10
@@ -116,40 +122,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
116extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, 122extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
117 ulong *spr_val); 123 ulong *spr_val);
118 124
119/*
120 * Load up guest vcpu FP state if it's needed.
121 * It also set the MSR_FP in thread so that host know
122 * we're holding FPU, and then host can help to save
123 * guest vcpu FP state if other threads require to use FPU.
124 * This simulates an FP unavailable fault.
125 *
126 * It requires to be called with preemption disabled.
127 */
128static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
129{
130#ifdef CONFIG_PPC_FPU
131 if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
132 enable_kernel_fp();
133 load_fp_state(&vcpu->arch.fp);
134 current->thread.fp_save_area = &vcpu->arch.fp;
135 current->thread.regs->msr |= MSR_FP;
136 }
137#endif
138}
139
140/*
141 * Save guest vcpu FP state into thread.
142 * It requires to be called with preemption disabled.
143 */
144static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
145{
146#ifdef CONFIG_PPC_FPU
147 if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
148 giveup_fpu(current);
149 current->thread.fp_save_area = NULL;
150#endif
151}
152
153static inline void kvmppc_clear_dbsr(void) 125static inline void kvmppc_clear_dbsr(void)
154{ 126{
155 mtspr(SPRN_DBSR, mfspr(SPRN_DBSR)); 127 mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 28c158881d23..a82f64502de1 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -25,6 +25,7 @@
25 25
26#define OP_19_XOP_RFI 50 26#define OP_19_XOP_RFI 50
27#define OP_19_XOP_RFCI 51 27#define OP_19_XOP_RFCI 51
28#define OP_19_XOP_RFDI 39
28 29
29#define OP_31_XOP_MFMSR 83 30#define OP_31_XOP_MFMSR 83
30#define OP_31_XOP_WRTEE 131 31#define OP_31_XOP_WRTEE 131
@@ -37,6 +38,12 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
37 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); 38 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
38} 39}
39 40
41static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu)
42{
43 vcpu->arch.pc = vcpu->arch.dsrr0;
44 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1);
45}
46
40static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) 47static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
41{ 48{
42 vcpu->arch.pc = vcpu->arch.csrr0; 49 vcpu->arch.pc = vcpu->arch.csrr0;
@@ -65,6 +72,12 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
65 *advance = 0; 72 *advance = 0;
66 break; 73 break;
67 74
75 case OP_19_XOP_RFDI:
76 kvmppc_emul_rfdi(vcpu);
77 kvmppc_set_exit_type(vcpu, EMULATED_RFDI_EXITS);
78 *advance = 0;
79 break;
80
68 default: 81 default:
69 emulated = EMULATE_FAIL; 82 emulated = EMULATE_FAIL;
70 break; 83 break;
@@ -118,6 +131,7 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
118int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 131int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
119{ 132{
120 int emulated = EMULATE_DONE; 133 int emulated = EMULATE_DONE;
134 bool debug_inst = false;
121 135
122 switch (sprn) { 136 switch (sprn) {
123 case SPRN_DEAR: 137 case SPRN_DEAR:
@@ -132,14 +146,128 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
132 case SPRN_CSRR1: 146 case SPRN_CSRR1:
133 vcpu->arch.csrr1 = spr_val; 147 vcpu->arch.csrr1 = spr_val;
134 break; 148 break;
149 case SPRN_DSRR0:
150 vcpu->arch.dsrr0 = spr_val;
151 break;
152 case SPRN_DSRR1:
153 vcpu->arch.dsrr1 = spr_val;
154 break;
155 case SPRN_IAC1:
156 /*
157 * If userspace is debugging guest then guest
158 * can not access debug registers.
159 */
160 if (vcpu->guest_debug)
161 break;
162
163 debug_inst = true;
164 vcpu->arch.dbg_reg.iac1 = spr_val;
165 break;
166 case SPRN_IAC2:
167 /*
168 * If userspace is debugging guest then guest
169 * can not access debug registers.
170 */
171 if (vcpu->guest_debug)
172 break;
173
174 debug_inst = true;
175 vcpu->arch.dbg_reg.iac2 = spr_val;
176 break;
177#if CONFIG_PPC_ADV_DEBUG_IACS > 2
178 case SPRN_IAC3:
179 /*
180 * If userspace is debugging guest then guest
181 * can not access debug registers.
182 */
183 if (vcpu->guest_debug)
184 break;
185
186 debug_inst = true;
187 vcpu->arch.dbg_reg.iac3 = spr_val;
188 break;
189 case SPRN_IAC4:
190 /*
191 * If userspace is debugging guest then guest
192 * can not access debug registers.
193 */
194 if (vcpu->guest_debug)
195 break;
196
197 debug_inst = true;
198 vcpu->arch.dbg_reg.iac4 = spr_val;
199 break;
200#endif
201 case SPRN_DAC1:
202 /*
203 * If userspace is debugging guest then guest
204 * can not access debug registers.
205 */
206 if (vcpu->guest_debug)
207 break;
208
209 debug_inst = true;
210 vcpu->arch.dbg_reg.dac1 = spr_val;
211 break;
212 case SPRN_DAC2:
213 /*
214 * If userspace is debugging guest then guest
215 * can not access debug registers.
216 */
217 if (vcpu->guest_debug)
218 break;
219
220 debug_inst = true;
221 vcpu->arch.dbg_reg.dac2 = spr_val;
222 break;
135 case SPRN_DBCR0: 223 case SPRN_DBCR0:
224 /*
225 * If userspace is debugging guest then guest
226 * can not access debug registers.
227 */
228 if (vcpu->guest_debug)
229 break;
230
231 debug_inst = true;
232 spr_val &= (DBCR0_IDM | DBCR0_IC | DBCR0_BT | DBCR0_TIE |
233 DBCR0_IAC1 | DBCR0_IAC2 | DBCR0_IAC3 | DBCR0_IAC4 |
234 DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W);
235
136 vcpu->arch.dbg_reg.dbcr0 = spr_val; 236 vcpu->arch.dbg_reg.dbcr0 = spr_val;
137 break; 237 break;
138 case SPRN_DBCR1: 238 case SPRN_DBCR1:
239 /*
240 * If userspace is debugging guest then guest
241 * can not access debug registers.
242 */
243 if (vcpu->guest_debug)
244 break;
245
246 debug_inst = true;
139 vcpu->arch.dbg_reg.dbcr1 = spr_val; 247 vcpu->arch.dbg_reg.dbcr1 = spr_val;
140 break; 248 break;
249 case SPRN_DBCR2:
250 /*
251 * If userspace is debugging guest then guest
252 * can not access debug registers.
253 */
254 if (vcpu->guest_debug)
255 break;
256
257 debug_inst = true;
258 vcpu->arch.dbg_reg.dbcr2 = spr_val;
259 break;
141 case SPRN_DBSR: 260 case SPRN_DBSR:
261 /*
262 * If userspace is debugging guest then guest
263 * can not access debug registers.
264 */
265 if (vcpu->guest_debug)
266 break;
267
142 vcpu->arch.dbsr &= ~spr_val; 268 vcpu->arch.dbsr &= ~spr_val;
269 if (!(vcpu->arch.dbsr & ~DBSR_IDE))
270 kvmppc_core_dequeue_debug(vcpu);
143 break; 271 break;
144 case SPRN_TSR: 272 case SPRN_TSR:
145 kvmppc_clr_tsr_bits(vcpu, spr_val); 273 kvmppc_clr_tsr_bits(vcpu, spr_val);
@@ -252,6 +380,10 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
252 emulated = EMULATE_FAIL; 380 emulated = EMULATE_FAIL;
253 } 381 }
254 382
383 if (debug_inst) {
384 current->thread.debug = vcpu->arch.dbg_reg;
385 switch_booke_debug_regs(&vcpu->arch.dbg_reg);
386 }
255 return emulated; 387 return emulated;
256} 388}
257 389
@@ -278,12 +410,43 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
278 case SPRN_CSRR1: 410 case SPRN_CSRR1:
279 *spr_val = vcpu->arch.csrr1; 411 *spr_val = vcpu->arch.csrr1;
280 break; 412 break;
413 case SPRN_DSRR0:
414 *spr_val = vcpu->arch.dsrr0;
415 break;
416 case SPRN_DSRR1:
417 *spr_val = vcpu->arch.dsrr1;
418 break;
419 case SPRN_IAC1:
420 *spr_val = vcpu->arch.dbg_reg.iac1;
421 break;
422 case SPRN_IAC2:
423 *spr_val = vcpu->arch.dbg_reg.iac2;
424 break;
425#if CONFIG_PPC_ADV_DEBUG_IACS > 2
426 case SPRN_IAC3:
427 *spr_val = vcpu->arch.dbg_reg.iac3;
428 break;
429 case SPRN_IAC4:
430 *spr_val = vcpu->arch.dbg_reg.iac4;
431 break;
432#endif
433 case SPRN_DAC1:
434 *spr_val = vcpu->arch.dbg_reg.dac1;
435 break;
436 case SPRN_DAC2:
437 *spr_val = vcpu->arch.dbg_reg.dac2;
438 break;
281 case SPRN_DBCR0: 439 case SPRN_DBCR0:
282 *spr_val = vcpu->arch.dbg_reg.dbcr0; 440 *spr_val = vcpu->arch.dbg_reg.dbcr0;
441 if (vcpu->guest_debug)
442 *spr_val = *spr_val | DBCR0_EDM;
283 break; 443 break;
284 case SPRN_DBCR1: 444 case SPRN_DBCR1:
285 *spr_val = vcpu->arch.dbg_reg.dbcr1; 445 *spr_val = vcpu->arch.dbg_reg.dbcr1;
286 break; 446 break;
447 case SPRN_DBCR2:
448 *spr_val = vcpu->arch.dbg_reg.dbcr2;
449 break;
287 case SPRN_DBSR: 450 case SPRN_DBSR:
288 *spr_val = vcpu->arch.dbsr; 451 *spr_val = vcpu->arch.dbsr;
289 break; 452 break;
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index e9fa56a911fd..81bd8a07aa51 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -238,7 +238,7 @@ kvm_handler BOOKE_INTERRUPT_EXTERNAL, EX_PARAMS(GEN), \
238kvm_handler BOOKE_INTERRUPT_ALIGNMENT, EX_PARAMS(GEN), \ 238kvm_handler BOOKE_INTERRUPT_ALIGNMENT, EX_PARAMS(GEN), \
239 SPRN_SRR0, SPRN_SRR1,(NEED_DEAR | NEED_ESR) 239 SPRN_SRR0, SPRN_SRR1,(NEED_DEAR | NEED_ESR)
240kvm_handler BOOKE_INTERRUPT_PROGRAM, EX_PARAMS(GEN), \ 240kvm_handler BOOKE_INTERRUPT_PROGRAM, EX_PARAMS(GEN), \
241 SPRN_SRR0, SPRN_SRR1,NEED_ESR 241 SPRN_SRR0, SPRN_SRR1, (NEED_ESR | NEED_EMU)
242kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, EX_PARAMS(GEN), \ 242kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, EX_PARAMS(GEN), \
243 SPRN_SRR0, SPRN_SRR1, 0 243 SPRN_SRR0, SPRN_SRR1, 0
244kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, EX_PARAMS(GEN), \ 244kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, EX_PARAMS(GEN), \
@@ -256,11 +256,9 @@ kvm_handler BOOKE_INTERRUPT_DTLB_MISS, EX_PARAMS_TLB, \
256 SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) 256 SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
257kvm_handler BOOKE_INTERRUPT_ITLB_MISS, EX_PARAMS_TLB, \ 257kvm_handler BOOKE_INTERRUPT_ITLB_MISS, EX_PARAMS_TLB, \
258 SPRN_SRR0, SPRN_SRR1, 0 258 SPRN_SRR0, SPRN_SRR1, 0
259kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, EX_PARAMS(GEN), \ 259kvm_handler BOOKE_INTERRUPT_ALTIVEC_UNAVAIL, EX_PARAMS(GEN), \
260 SPRN_SRR0, SPRN_SRR1, 0 260 SPRN_SRR0, SPRN_SRR1, 0
261kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, EX_PARAMS(GEN), \ 261kvm_handler BOOKE_INTERRUPT_ALTIVEC_ASSIST, EX_PARAMS(GEN), \
262 SPRN_SRR0, SPRN_SRR1, 0
263kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, EX_PARAMS(GEN), \
264 SPRN_SRR0, SPRN_SRR1, 0 262 SPRN_SRR0, SPRN_SRR1, 0
265kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, EX_PARAMS(GEN), \ 263kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, EX_PARAMS(GEN), \
266 SPRN_SRR0, SPRN_SRR1, 0 264 SPRN_SRR0, SPRN_SRR1, 0
@@ -350,7 +348,7 @@ kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR
350kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0 348kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0
351kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \ 349kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \
352 SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR) 350 SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR)
353kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, NEED_ESR 351kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, (NEED_ESR | NEED_EMU)
354kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 352kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
355kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 353kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
356kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 354kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
@@ -361,9 +359,6 @@ kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \
361kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \ 359kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \
362 SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) 360 SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
363kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0 361kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0
364kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
365kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0
366kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0
367kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0 362kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0
368kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0 363kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0
369kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \ 364kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index a326178bdea5..72920bed3ac6 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -22,6 +22,7 @@
22#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
23#include <asm/mmu-book3e.h> 23#include <asm/mmu-book3e.h>
24#include <asm/tlb.h> 24#include <asm/tlb.h>
25#include <asm/cputhreads.h>
25 26
26enum vcpu_ftr { 27enum vcpu_ftr {
27 VCPU_FTR_MMU_V2 28 VCPU_FTR_MMU_V2
@@ -289,6 +290,25 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
289#define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe) 290#define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe)
290#define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu) 291#define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu)
291#define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS) 292#define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS)
293
294/*
295 * These functions should be called with preemption disabled
296 * and the returned value is valid only in that context
297 */
298static inline int get_thread_specific_lpid(int vm_lpid)
299{
300 int vcpu_lpid = vm_lpid;
301
302 if (threads_per_core == 2)
303 vcpu_lpid |= smp_processor_id() & 1;
304
305 return vcpu_lpid;
306}
307
308static inline int get_lpid(struct kvm_vcpu *vcpu)
309{
310 return get_thread_specific_lpid(vcpu->kvm->arch.lpid);
311}
292#else 312#else
293unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu, 313unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
294 struct kvm_book3e_206_tlb_entry *gtlbe); 314 struct kvm_book3e_206_tlb_entry *gtlbe);
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index c99c40e9182a..ce7291c79f6c 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -259,6 +259,7 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
259 break; 259 break;
260 260
261 /* extra exceptions */ 261 /* extra exceptions */
262#ifdef CONFIG_SPE_POSSIBLE
262 case SPRN_IVOR32: 263 case SPRN_IVOR32:
263 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val; 264 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
264 break; 265 break;
@@ -268,6 +269,15 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
268 case SPRN_IVOR34: 269 case SPRN_IVOR34:
269 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val; 270 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
270 break; 271 break;
272#endif
273#ifdef CONFIG_ALTIVEC
274 case SPRN_IVOR32:
275 vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val;
276 break;
277 case SPRN_IVOR33:
278 vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val;
279 break;
280#endif
271 case SPRN_IVOR35: 281 case SPRN_IVOR35:
272 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val; 282 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
273 break; 283 break;
@@ -381,6 +391,7 @@ int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_v
381 break; 391 break;
382 392
383 /* extra exceptions */ 393 /* extra exceptions */
394#ifdef CONFIG_SPE_POSSIBLE
384 case SPRN_IVOR32: 395 case SPRN_IVOR32:
385 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; 396 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
386 break; 397 break;
@@ -390,6 +401,15 @@ int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_v
390 case SPRN_IVOR34: 401 case SPRN_IVOR34:
391 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; 402 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
392 break; 403 break;
404#endif
405#ifdef CONFIG_ALTIVEC
406 case SPRN_IVOR32:
407 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL];
408 break;
409 case SPRN_IVOR33:
410 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST];
411 break;
412#endif
393 case SPRN_IVOR35: 413 case SPRN_IVOR35:
394 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; 414 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
395 break; 415 break;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index b1f3f630315e..769778f855b0 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -69,7 +69,8 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
69 * writing shadow tlb entry to host TLB 69 * writing shadow tlb entry to host TLB
70 */ 70 */
71static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, 71static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
72 uint32_t mas0) 72 uint32_t mas0,
73 uint32_t lpid)
73{ 74{
74 unsigned long flags; 75 unsigned long flags;
75 76
@@ -80,7 +81,7 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
80 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); 81 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
81 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); 82 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
82#ifdef CONFIG_KVM_BOOKE_HV 83#ifdef CONFIG_KVM_BOOKE_HV
83 mtspr(SPRN_MAS8, stlbe->mas8); 84 mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
84#endif 85#endif
85 asm volatile("isync; tlbwe" : : : "memory"); 86 asm volatile("isync; tlbwe" : : : "memory");
86 87
@@ -129,11 +130,12 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
129 130
130 if (tlbsel == 0) { 131 if (tlbsel == 0) {
131 mas0 = get_host_mas0(stlbe->mas2); 132 mas0 = get_host_mas0(stlbe->mas2);
132 __write_host_tlbe(stlbe, mas0); 133 __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid);
133 } else { 134 } else {
134 __write_host_tlbe(stlbe, 135 __write_host_tlbe(stlbe,
135 MAS0_TLBSEL(1) | 136 MAS0_TLBSEL(1) |
136 MAS0_ESEL(to_htlb1_esel(sesel))); 137 MAS0_ESEL(to_htlb1_esel(sesel)),
138 vcpu_e500->vcpu.kvm->arch.lpid);
137 } 139 }
138} 140}
139 141
@@ -176,7 +178,7 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
176 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; 178 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
177 magic.mas8 = 0; 179 magic.mas8 = 0;
178 180
179 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); 181 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index), 0);
180 preempt_enable(); 182 preempt_enable();
181} 183}
182#endif 184#endif
@@ -317,10 +319,6 @@ static void kvmppc_e500_setup_stlbe(
317 stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR); 319 stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
318 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | 320 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
319 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); 321 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
320
321#ifdef CONFIG_KVM_BOOKE_HV
322 stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
323#endif
324} 322}
325 323
326static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, 324static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
@@ -633,7 +631,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
633 631
634 local_irq_save(flags); 632 local_irq_save(flags);
635 mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space); 633 mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
636 mtspr(SPRN_MAS5, MAS5_SGS | vcpu->kvm->arch.lpid); 634 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu));
637 asm volatile("tlbsx 0, %[geaddr]\n" : : 635 asm volatile("tlbsx 0, %[geaddr]\n" : :
638 [geaddr] "r" (geaddr)); 636 [geaddr] "r" (geaddr));
639 mtspr(SPRN_MAS5, 0); 637 mtspr(SPRN_MAS5, 0);
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 164bad2a19bf..2fdc8722e324 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -48,10 +48,11 @@ void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type)
48 return; 48 return;
49 } 49 }
50 50
51 51 preempt_disable();
52 tag = PPC_DBELL_LPID(vcpu->kvm->arch.lpid) | vcpu->vcpu_id; 52 tag = PPC_DBELL_LPID(get_lpid(vcpu)) | vcpu->vcpu_id;
53 mb(); 53 mb();
54 ppc_msgsnd(dbell_type, 0, tag); 54 ppc_msgsnd(dbell_type, 0, tag);
55 preempt_enable();
55} 56}
56 57
57/* gtlbe must not be mapped by more than one host tlb entry */ 58/* gtlbe must not be mapped by more than one host tlb entry */
@@ -60,12 +61,11 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
60{ 61{
61 unsigned int tid, ts; 62 unsigned int tid, ts;
62 gva_t eaddr; 63 gva_t eaddr;
63 u32 val, lpid; 64 u32 val;
64 unsigned long flags; 65 unsigned long flags;
65 66
66 ts = get_tlb_ts(gtlbe); 67 ts = get_tlb_ts(gtlbe);
67 tid = get_tlb_tid(gtlbe); 68 tid = get_tlb_tid(gtlbe);
68 lpid = vcpu_e500->vcpu.kvm->arch.lpid;
69 69
70 /* We search the host TLB to invalidate its shadow TLB entry */ 70 /* We search the host TLB to invalidate its shadow TLB entry */
71 val = (tid << 16) | ts; 71 val = (tid << 16) | ts;
@@ -74,7 +74,7 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
74 local_irq_save(flags); 74 local_irq_save(flags);
75 75
76 mtspr(SPRN_MAS6, val); 76 mtspr(SPRN_MAS6, val);
77 mtspr(SPRN_MAS5, MAS5_SGS | lpid); 77 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
78 78
79 asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr)); 79 asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr));
80 val = mfspr(SPRN_MAS1); 80 val = mfspr(SPRN_MAS1);
@@ -95,7 +95,7 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
95 unsigned long flags; 95 unsigned long flags;
96 96
97 local_irq_save(flags); 97 local_irq_save(flags);
98 mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.kvm->arch.lpid); 98 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
99 asm volatile("tlbilxlpid"); 99 asm volatile("tlbilxlpid");
100 mtspr(SPRN_MAS5, 0); 100 mtspr(SPRN_MAS5, 0);
101 local_irq_restore(flags); 101 local_irq_restore(flags);
@@ -110,6 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
110{ 110{
111} 111}
112 112
113/* We use two lpids per VM */
113static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid); 114static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid);
114 115
115static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) 116static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
@@ -118,10 +119,12 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
118 119
119 kvmppc_booke_vcpu_load(vcpu, cpu); 120 kvmppc_booke_vcpu_load(vcpu, cpu);
120 121
121 mtspr(SPRN_LPID, vcpu->kvm->arch.lpid); 122 mtspr(SPRN_LPID, get_lpid(vcpu));
122 mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); 123 mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
123 mtspr(SPRN_GPIR, vcpu->vcpu_id); 124 mtspr(SPRN_GPIR, vcpu->vcpu_id);
124 mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp); 125 mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp);
126 vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT);
127 vcpu->arch.epsc = vcpu->arch.eplc;
125 mtspr(SPRN_EPLC, vcpu->arch.eplc); 128 mtspr(SPRN_EPLC, vcpu->arch.eplc);
126 mtspr(SPRN_EPSC, vcpu->arch.epsc); 129 mtspr(SPRN_EPSC, vcpu->arch.epsc);
127 130
@@ -141,12 +144,10 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
141 mtspr(SPRN_GESR, vcpu->arch.shared->esr); 144 mtspr(SPRN_GESR, vcpu->arch.shared->esr);
142 145
143 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || 146 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
144 __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] != vcpu) { 147 __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] != vcpu) {
145 kvmppc_e500_tlbil_all(vcpu_e500); 148 kvmppc_e500_tlbil_all(vcpu_e500);
146 __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] = vcpu; 149 __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] = vcpu;
147 } 150 }
148
149 kvmppc_load_guest_fp(vcpu);
150} 151}
151 152
152static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu) 153static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
@@ -179,6 +180,16 @@ int kvmppc_core_check_processor_compat(void)
179 r = 0; 180 r = 0;
180 else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0) 181 else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
181 r = 0; 182 r = 0;
183#ifdef CONFIG_ALTIVEC
184 /*
185 * Since guests have the priviledge to enable AltiVec, we need AltiVec
186 * support in the host to save/restore their context.
187 * Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
188 * because it's cleared in the absence of CONFIG_ALTIVEC!
189 */
190 else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
191 r = 0;
192#endif
182 else 193 else
183 r = -ENOTSUPP; 194 r = -ENOTSUPP;
184 195
@@ -194,9 +205,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
194#ifdef CONFIG_64BIT 205#ifdef CONFIG_64BIT
195 vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM; 206 vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM;
196#endif 207#endif
197 vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP; 208 vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP;
198 vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT);
199 vcpu->arch.epsc = vcpu->arch.eplc;
200 209
201 vcpu->arch.pvr = mfspr(SPRN_PVR); 210 vcpu->arch.pvr = mfspr(SPRN_PVR);
202 vcpu_e500->svr = mfspr(SPRN_SVR); 211 vcpu_e500->svr = mfspr(SPRN_SVR);
@@ -356,13 +365,26 @@ static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
356 if (lpid < 0) 365 if (lpid < 0)
357 return lpid; 366 return lpid;
358 367
368 /*
369 * Use two lpids per VM on cores with two threads like e6500. Use
370 * even numbers to speedup vcpu lpid computation with consecutive lpids
371 * per VM. vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on.
372 */
373 if (threads_per_core == 2)
374 lpid <<= 1;
375
359 kvm->arch.lpid = lpid; 376 kvm->arch.lpid = lpid;
360 return 0; 377 return 0;
361} 378}
362 379
363static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm) 380static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
364{ 381{
365 kvmppc_free_lpid(kvm->arch.lpid); 382 int lpid = kvm->arch.lpid;
383
384 if (threads_per_core == 2)
385 lpid >>= 1;
386
387 kvmppc_free_lpid(lpid);
366} 388}
367 389
368static struct kvmppc_ops kvm_ops_e500mc = { 390static struct kvmppc_ops kvm_ops_e500mc = {
@@ -390,7 +412,13 @@ static int __init kvmppc_e500mc_init(void)
390 if (r) 412 if (r)
391 goto err_out; 413 goto err_out;
392 414
393 kvmppc_init_lpid(64); 415 /*
416 * Use two lpids per VM on dual threaded processors like e6500
417 * to workarround the lack of tlb write conditional instruction.
418 * Expose half the number of available hardware lpids to the lpid
419 * allocator.
420 */
421 kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core);
394 kvmppc_claim_lpid(0); /* host */ 422 kvmppc_claim_lpid(0); /* host */
395 423
396 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); 424 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index e96b50d0bdab..5cc2e7af3a7b 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -219,7 +219,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
219 /* this default type might be overwritten by subcategories */ 219 /* this default type might be overwritten by subcategories */
220 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 220 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
221 221
222 emulated = kvmppc_get_last_inst(vcpu, false, &inst); 222 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
223 if (emulated != EMULATE_DONE) 223 if (emulated != EMULATE_DONE)
224 return emulated; 224 return emulated;
225 225
@@ -274,6 +274,21 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
274 } 274 }
275 break; 275 break;
276 276
277 case 0:
278 /*
279 * Instruction with primary opcode 0. Based on PowerISA
280 * these are illegal instructions.
281 */
282 if (inst == KVMPPC_INST_SW_BREAKPOINT) {
283 run->exit_reason = KVM_EXIT_DEBUG;
284 run->debug.arch.address = kvmppc_get_pc(vcpu);
285 emulated = EMULATE_EXIT_USER;
286 advance = 0;
287 } else
288 emulated = EMULATE_FAIL;
289
290 break;
291
277 default: 292 default:
278 emulated = EMULATE_FAIL; 293 emulated = EMULATE_FAIL;
279 } 294 }
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 0de4ffa175a9..6d3c0ee1d744 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -58,7 +58,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
58 /* this default type might be overwritten by subcategories */ 58 /* this default type might be overwritten by subcategories */
59 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 59 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
60 60
61 emulated = kvmppc_get_last_inst(vcpu, false, &inst); 61 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
62 if (emulated != EMULATE_DONE) 62 if (emulated != EMULATE_DONE)
63 return emulated; 63 return emulated;
64 64
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index da505237a664..c1f8f53cd312 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -294,7 +294,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
294 { 294 {
295 u32 last_inst; 295 u32 last_inst;
296 296
297 kvmppc_get_last_inst(vcpu, false, &last_inst); 297 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
298 /* XXX Deliver Program interrupt to guest. */ 298 /* XXX Deliver Program interrupt to guest. */
299 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); 299 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
300 r = RESUME_HOST; 300 r = RESUME_HOST;
@@ -638,7 +638,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
638{ 638{
639 /* Make sure we're not using the vcpu anymore */ 639 /* Make sure we're not using the vcpu anymore */
640 hrtimer_cancel(&vcpu->arch.dec_timer); 640 hrtimer_cancel(&vcpu->arch.dec_timer);
641 tasklet_kill(&vcpu->arch.tasklet);
642 641
643 kvmppc_remove_vcpu_debugfs(vcpu); 642 kvmppc_remove_vcpu_debugfs(vcpu);
644 643
@@ -664,16 +663,12 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
664 return kvmppc_core_pending_dec(vcpu); 663 return kvmppc_core_pending_dec(vcpu);
665} 664}
666 665
667/*
668 * low level hrtimer wake routine. Because this runs in hardirq context
669 * we schedule a tasklet to do the real work.
670 */
671enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 666enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
672{ 667{
673 struct kvm_vcpu *vcpu; 668 struct kvm_vcpu *vcpu;
674 669
675 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 670 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
676 tasklet_schedule(&vcpu->arch.tasklet); 671 kvmppc_decrementer_func(vcpu);
677 672
678 return HRTIMER_NORESTART; 673 return HRTIMER_NORESTART;
679} 674}
@@ -683,7 +678,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
683 int ret; 678 int ret;
684 679
685 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 680 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
686 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
687 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 681 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
688 vcpu->arch.dec_expires = ~(u64)0; 682 vcpu->arch.dec_expires = ~(u64)0;
689 683
@@ -907,6 +901,103 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
907} 901}
908EXPORT_SYMBOL_GPL(kvmppc_handle_store); 902EXPORT_SYMBOL_GPL(kvmppc_handle_store);
909 903
904int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
905{
906 int r = 0;
907 union kvmppc_one_reg val;
908 int size;
909
910 size = one_reg_size(reg->id);
911 if (size > sizeof(val))
912 return -EINVAL;
913
914 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
915 if (r == -EINVAL) {
916 r = 0;
917 switch (reg->id) {
918#ifdef CONFIG_ALTIVEC
919 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
920 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
921 r = -ENXIO;
922 break;
923 }
924 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
925 break;
926 case KVM_REG_PPC_VSCR:
927 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
928 r = -ENXIO;
929 break;
930 }
931 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
932 break;
933 case KVM_REG_PPC_VRSAVE:
934 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
935 r = -ENXIO;
936 break;
937 }
938 vcpu->arch.vrsave = set_reg_val(reg->id, val);
939 break;
940#endif /* CONFIG_ALTIVEC */
941 default:
942 r = -EINVAL;
943 break;
944 }
945 }
946
947 if (r)
948 return r;
949
950 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
951 r = -EFAULT;
952
953 return r;
954}
955
956int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
957{
958 int r;
959 union kvmppc_one_reg val;
960 int size;
961
962 size = one_reg_size(reg->id);
963 if (size > sizeof(val))
964 return -EINVAL;
965
966 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
967 return -EFAULT;
968
969 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
970 if (r == -EINVAL) {
971 r = 0;
972 switch (reg->id) {
973#ifdef CONFIG_ALTIVEC
974 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
975 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
976 r = -ENXIO;
977 break;
978 }
979 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
980 break;
981 case KVM_REG_PPC_VSCR:
982 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
983 r = -ENXIO;
984 break;
985 }
986 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
987 break;
988 case KVM_REG_PPC_VRSAVE:
989 val = get_reg_val(reg->id, vcpu->arch.vrsave);
990 break;
991#endif /* CONFIG_ALTIVEC */
992 default:
993 r = -EINVAL;
994 break;
995 }
996 }
997
998 return r;
999}
1000
910int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 1001int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
911{ 1002{
912 int r; 1003 int r;
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index e8bc40869cbd..7d9ee3d8c618 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -303,9 +303,13 @@ config PPC_ICSWX_USE_SIGILL
303 303
304 If in doubt, say N here. 304 If in doubt, say N here.
305 305
306config SPE_POSSIBLE
307 def_bool y
308 depends on E200 || (E500 && !PPC_E500MC)
309
306config SPE 310config SPE
307 bool "SPE Support" 311 bool "SPE Support"
308 depends on E200 || (E500 && !PPC_E500MC) 312 depends on SPE_POSSIBLE
309 default y 313 default y
310 ---help--- 314 ---help---
311 This option enables kernel support for the Signal Processing 315 This option enables kernel support for the Signal Processing