diff options
Diffstat (limited to 'arch/powerpc/kvm')
30 files changed, 3583 insertions, 1281 deletions
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index 7b612a76c701..50e7dbc7356c 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c | |||
@@ -29,15 +29,18 @@ | |||
29 | #include <asm/kvm_ppc.h> | 29 | #include <asm/kvm_ppc.h> |
30 | 30 | ||
31 | #include "44x_tlb.h" | 31 | #include "44x_tlb.h" |
32 | #include "booke.h" | ||
32 | 33 | ||
33 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 34 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
34 | { | 35 | { |
36 | kvmppc_booke_vcpu_load(vcpu, cpu); | ||
35 | kvmppc_44x_tlb_load(vcpu); | 37 | kvmppc_44x_tlb_load(vcpu); |
36 | } | 38 | } |
37 | 39 | ||
38 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 40 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
39 | { | 41 | { |
40 | kvmppc_44x_tlb_put(vcpu); | 42 | kvmppc_44x_tlb_put(vcpu); |
43 | kvmppc_booke_vcpu_put(vcpu); | ||
41 | } | 44 | } |
42 | 45 | ||
43 | int kvmppc_core_check_processor_compat(void) | 46 | int kvmppc_core_check_processor_compat(void) |
@@ -160,6 +163,15 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
160 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); | 163 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); |
161 | } | 164 | } |
162 | 165 | ||
166 | int kvmppc_core_init_vm(struct kvm *kvm) | ||
167 | { | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | void kvmppc_core_destroy_vm(struct kvm *kvm) | ||
172 | { | ||
173 | } | ||
174 | |||
163 | static int __init kvmppc_44x_init(void) | 175 | static int __init kvmppc_44x_init(void) |
164 | { | 176 | { |
165 | int r; | 177 | int r; |
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index 549bb2c9a47a..c8c61578fdfc 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c | |||
@@ -37,22 +37,19 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
37 | unsigned int inst, int *advance) | 37 | unsigned int inst, int *advance) |
38 | { | 38 | { |
39 | int emulated = EMULATE_DONE; | 39 | int emulated = EMULATE_DONE; |
40 | int dcrn; | 40 | int dcrn = get_dcrn(inst); |
41 | int ra; | 41 | int ra = get_ra(inst); |
42 | int rb; | 42 | int rb = get_rb(inst); |
43 | int rc; | 43 | int rc = get_rc(inst); |
44 | int rs; | 44 | int rs = get_rs(inst); |
45 | int rt; | 45 | int rt = get_rt(inst); |
46 | int ws; | 46 | int ws = get_ws(inst); |
47 | 47 | ||
48 | switch (get_op(inst)) { | 48 | switch (get_op(inst)) { |
49 | case 31: | 49 | case 31: |
50 | switch (get_xop(inst)) { | 50 | switch (get_xop(inst)) { |
51 | 51 | ||
52 | case XOP_MFDCR: | 52 | case XOP_MFDCR: |
53 | dcrn = get_dcrn(inst); | ||
54 | rt = get_rt(inst); | ||
55 | |||
56 | /* The guest may access CPR0 registers to determine the timebase | 53 | /* The guest may access CPR0 registers to determine the timebase |
57 | * frequency, and it must know the real host frequency because it | 54 | * frequency, and it must know the real host frequency because it |
58 | * can directly access the timebase registers. | 55 | * can directly access the timebase registers. |
@@ -88,9 +85,6 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
88 | break; | 85 | break; |
89 | 86 | ||
90 | case XOP_MTDCR: | 87 | case XOP_MTDCR: |
91 | dcrn = get_dcrn(inst); | ||
92 | rs = get_rs(inst); | ||
93 | |||
94 | /* emulate some access in kernel */ | 88 | /* emulate some access in kernel */ |
95 | switch (dcrn) { | 89 | switch (dcrn) { |
96 | case DCRN_CPR0_CONFIG_ADDR: | 90 | case DCRN_CPR0_CONFIG_ADDR: |
@@ -108,17 +102,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
108 | break; | 102 | break; |
109 | 103 | ||
110 | case XOP_TLBWE: | 104 | case XOP_TLBWE: |
111 | ra = get_ra(inst); | ||
112 | rs = get_rs(inst); | ||
113 | ws = get_ws(inst); | ||
114 | emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); | 105 | emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); |
115 | break; | 106 | break; |
116 | 107 | ||
117 | case XOP_TLBSX: | 108 | case XOP_TLBSX: |
118 | rt = get_rt(inst); | ||
119 | ra = get_ra(inst); | ||
120 | rb = get_rb(inst); | ||
121 | rc = get_rc(inst); | ||
122 | emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); | 109 | emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); |
123 | break; | 110 | break; |
124 | 111 | ||
@@ -141,41 +128,41 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
141 | return emulated; | 128 | return emulated; |
142 | } | 129 | } |
143 | 130 | ||
144 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 131 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
145 | { | 132 | { |
146 | int emulated = EMULATE_DONE; | 133 | int emulated = EMULATE_DONE; |
147 | 134 | ||
148 | switch (sprn) { | 135 | switch (sprn) { |
149 | case SPRN_PID: | 136 | case SPRN_PID: |
150 | kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break; | 137 | kvmppc_set_pid(vcpu, spr_val); break; |
151 | case SPRN_MMUCR: | 138 | case SPRN_MMUCR: |
152 | vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break; | 139 | vcpu->arch.mmucr = spr_val; break; |
153 | case SPRN_CCR0: | 140 | case SPRN_CCR0: |
154 | vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break; | 141 | vcpu->arch.ccr0 = spr_val; break; |
155 | case SPRN_CCR1: | 142 | case SPRN_CCR1: |
156 | vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break; | 143 | vcpu->arch.ccr1 = spr_val; break; |
157 | default: | 144 | default: |
158 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); | 145 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val); |
159 | } | 146 | } |
160 | 147 | ||
161 | return emulated; | 148 | return emulated; |
162 | } | 149 | } |
163 | 150 | ||
164 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 151 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
165 | { | 152 | { |
166 | int emulated = EMULATE_DONE; | 153 | int emulated = EMULATE_DONE; |
167 | 154 | ||
168 | switch (sprn) { | 155 | switch (sprn) { |
169 | case SPRN_PID: | 156 | case SPRN_PID: |
170 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break; | 157 | *spr_val = vcpu->arch.pid; break; |
171 | case SPRN_MMUCR: | 158 | case SPRN_MMUCR: |
172 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break; | 159 | *spr_val = vcpu->arch.mmucr; break; |
173 | case SPRN_CCR0: | 160 | case SPRN_CCR0: |
174 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break; | 161 | *spr_val = vcpu->arch.ccr0; break; |
175 | case SPRN_CCR1: | 162 | case SPRN_CCR1: |
176 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break; | 163 | *spr_val = vcpu->arch.ccr1; break; |
177 | default: | 164 | default: |
178 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); | 165 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val); |
179 | } | 166 | } |
180 | 167 | ||
181 | return emulated; | 168 | return emulated; |
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 8f64709ae331..f4dacb9c57fa 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -90,6 +90,9 @@ config KVM_BOOK3S_64_PR | |||
90 | depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV | 90 | depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV |
91 | select KVM_BOOK3S_PR | 91 | select KVM_BOOK3S_PR |
92 | 92 | ||
93 | config KVM_BOOKE_HV | ||
94 | bool | ||
95 | |||
93 | config KVM_440 | 96 | config KVM_440 |
94 | bool "KVM support for PowerPC 440 processors" | 97 | bool "KVM support for PowerPC 440 processors" |
95 | depends on EXPERIMENTAL && 44x | 98 | depends on EXPERIMENTAL && 44x |
@@ -106,7 +109,7 @@ config KVM_440 | |||
106 | 109 | ||
107 | config KVM_EXIT_TIMING | 110 | config KVM_EXIT_TIMING |
108 | bool "Detailed exit timing" | 111 | bool "Detailed exit timing" |
109 | depends on KVM_440 || KVM_E500 | 112 | depends on KVM_440 || KVM_E500V2 || KVM_E500MC |
110 | ---help--- | 113 | ---help--- |
111 | Calculate elapsed time for every exit/enter cycle. A per-vcpu | 114 | Calculate elapsed time for every exit/enter cycle. A per-vcpu |
112 | report is available in debugfs kvm/vm#_vcpu#_timing. | 115 | report is available in debugfs kvm/vm#_vcpu#_timing. |
@@ -115,14 +118,29 @@ config KVM_EXIT_TIMING | |||
115 | 118 | ||
116 | If unsure, say N. | 119 | If unsure, say N. |
117 | 120 | ||
118 | config KVM_E500 | 121 | config KVM_E500V2 |
119 | bool "KVM support for PowerPC E500 processors" | 122 | bool "KVM support for PowerPC E500v2 processors" |
120 | depends on EXPERIMENTAL && E500 | 123 | depends on EXPERIMENTAL && E500 && !PPC_E500MC |
121 | select KVM | 124 | select KVM |
122 | select KVM_MMIO | 125 | select KVM_MMIO |
123 | ---help--- | 126 | ---help--- |
124 | Support running unmodified E500 guest kernels in virtual machines on | 127 | Support running unmodified E500 guest kernels in virtual machines on |
125 | E500 host processors. | 128 | E500v2 host processors. |
129 | |||
130 | This module provides access to the hardware capabilities through | ||
131 | a character device node named /dev/kvm. | ||
132 | |||
133 | If unsure, say N. | ||
134 | |||
135 | config KVM_E500MC | ||
136 | bool "KVM support for PowerPC E500MC/E5500 processors" | ||
137 | depends on EXPERIMENTAL && PPC_E500MC | ||
138 | select KVM | ||
139 | select KVM_MMIO | ||
140 | select KVM_BOOKE_HV | ||
141 | ---help--- | ||
142 | Support running unmodified E500MC/E5500 (32-bit) guest kernels in | ||
143 | virtual machines on E500MC/E5500 host processors. | ||
126 | 144 | ||
127 | This module provides access to the hardware capabilities through | 145 | This module provides access to the hardware capabilities through |
128 | a character device node named /dev/kvm. | 146 | a character device node named /dev/kvm. |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 3688aeecc4b2..c2a08636e6d4 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -36,7 +36,17 @@ kvm-e500-objs := \ | |||
36 | e500.o \ | 36 | e500.o \ |
37 | e500_tlb.o \ | 37 | e500_tlb.o \ |
38 | e500_emulate.o | 38 | e500_emulate.o |
39 | kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs) | 39 | kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs) |
40 | |||
41 | kvm-e500mc-objs := \ | ||
42 | $(common-objs-y) \ | ||
43 | booke.o \ | ||
44 | booke_emulate.o \ | ||
45 | bookehv_interrupts.o \ | ||
46 | e500mc.o \ | ||
47 | e500_tlb.o \ | ||
48 | e500_emulate.o | ||
49 | kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) | ||
40 | 50 | ||
41 | kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ | 51 | kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ |
42 | ../../../virt/kvm/coalesced_mmio.o \ | 52 | ../../../virt/kvm/coalesced_mmio.o \ |
@@ -44,6 +54,7 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ | |||
44 | book3s_paired_singles.o \ | 54 | book3s_paired_singles.o \ |
45 | book3s_pr.o \ | 55 | book3s_pr.o \ |
46 | book3s_pr_papr.o \ | 56 | book3s_pr_papr.o \ |
57 | book3s_64_vio_hv.o \ | ||
47 | book3s_emulate.o \ | 58 | book3s_emulate.o \ |
48 | book3s_interrupts.o \ | 59 | book3s_interrupts.o \ |
49 | book3s_mmu_hpte.o \ | 60 | book3s_mmu_hpte.o \ |
@@ -68,6 +79,7 @@ kvm-book3s_64-module-objs := \ | |||
68 | powerpc.o \ | 79 | powerpc.o \ |
69 | emulate.o \ | 80 | emulate.o \ |
70 | book3s.o \ | 81 | book3s.o \ |
82 | book3s_64_vio.o \ | ||
71 | $(kvm-book3s_64-objs-y) | 83 | $(kvm-book3s_64-objs-y) |
72 | 84 | ||
73 | kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs) | 85 | kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs) |
@@ -88,7 +100,8 @@ kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs) | |||
88 | kvm-objs := $(kvm-objs-m) $(kvm-objs-y) | 100 | kvm-objs := $(kvm-objs-m) $(kvm-objs-y) |
89 | 101 | ||
90 | obj-$(CONFIG_KVM_440) += kvm.o | 102 | obj-$(CONFIG_KVM_440) += kvm.o |
91 | obj-$(CONFIG_KVM_E500) += kvm.o | 103 | obj-$(CONFIG_KVM_E500V2) += kvm.o |
104 | obj-$(CONFIG_KVM_E500MC) += kvm.o | ||
92 | obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o | 105 | obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o |
93 | obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o | 106 | obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o |
94 | 107 | ||
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 7d54f4ed6d96..3f2a8360c857 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -258,7 +258,7 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) | |||
258 | return true; | 258 | return true; |
259 | } | 259 | } |
260 | 260 | ||
261 | void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) | 261 | int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) |
262 | { | 262 | { |
263 | unsigned long *pending = &vcpu->arch.pending_exceptions; | 263 | unsigned long *pending = &vcpu->arch.pending_exceptions; |
264 | unsigned long old_pending = vcpu->arch.pending_exceptions; | 264 | unsigned long old_pending = vcpu->arch.pending_exceptions; |
@@ -283,12 +283,17 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) | |||
283 | 283 | ||
284 | /* Tell the guest about our interrupt status */ | 284 | /* Tell the guest about our interrupt status */ |
285 | kvmppc_update_int_pending(vcpu, *pending, old_pending); | 285 | kvmppc_update_int_pending(vcpu, *pending, old_pending); |
286 | |||
287 | return 0; | ||
286 | } | 288 | } |
287 | 289 | ||
288 | pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) | 290 | pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) |
289 | { | 291 | { |
290 | ulong mp_pa = vcpu->arch.magic_page_pa; | 292 | ulong mp_pa = vcpu->arch.magic_page_pa; |
291 | 293 | ||
294 | if (!(vcpu->arch.shared->msr & MSR_SF)) | ||
295 | mp_pa = (uint32_t)mp_pa; | ||
296 | |||
292 | /* Magic page override */ | 297 | /* Magic page override */ |
293 | if (unlikely(mp_pa) && | 298 | if (unlikely(mp_pa) && |
294 | unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == | 299 | unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index c3beaeef3f60..80a577517584 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -36,13 +36,11 @@ | |||
36 | 36 | ||
37 | /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ | 37 | /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ |
38 | #define MAX_LPID_970 63 | 38 | #define MAX_LPID_970 63 |
39 | #define NR_LPIDS (LPID_RSVD + 1) | ||
40 | unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)]; | ||
41 | 39 | ||
42 | long kvmppc_alloc_hpt(struct kvm *kvm) | 40 | long kvmppc_alloc_hpt(struct kvm *kvm) |
43 | { | 41 | { |
44 | unsigned long hpt; | 42 | unsigned long hpt; |
45 | unsigned long lpid; | 43 | long lpid; |
46 | struct revmap_entry *rev; | 44 | struct revmap_entry *rev; |
47 | struct kvmppc_linear_info *li; | 45 | struct kvmppc_linear_info *li; |
48 | 46 | ||
@@ -72,14 +70,9 @@ long kvmppc_alloc_hpt(struct kvm *kvm) | |||
72 | } | 70 | } |
73 | kvm->arch.revmap = rev; | 71 | kvm->arch.revmap = rev; |
74 | 72 | ||
75 | /* Allocate the guest's logical partition ID */ | 73 | lpid = kvmppc_alloc_lpid(); |
76 | do { | 74 | if (lpid < 0) |
77 | lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS); | 75 | goto out_freeboth; |
78 | if (lpid >= NR_LPIDS) { | ||
79 | pr_err("kvm_alloc_hpt: No LPIDs free\n"); | ||
80 | goto out_freeboth; | ||
81 | } | ||
82 | } while (test_and_set_bit(lpid, lpid_inuse)); | ||
83 | 76 | ||
84 | kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18); | 77 | kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18); |
85 | kvm->arch.lpid = lpid; | 78 | kvm->arch.lpid = lpid; |
@@ -96,7 +89,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm) | |||
96 | 89 | ||
97 | void kvmppc_free_hpt(struct kvm *kvm) | 90 | void kvmppc_free_hpt(struct kvm *kvm) |
98 | { | 91 | { |
99 | clear_bit(kvm->arch.lpid, lpid_inuse); | 92 | kvmppc_free_lpid(kvm->arch.lpid); |
100 | vfree(kvm->arch.revmap); | 93 | vfree(kvm->arch.revmap); |
101 | if (kvm->arch.hpt_li) | 94 | if (kvm->arch.hpt_li) |
102 | kvm_release_hpt(kvm->arch.hpt_li); | 95 | kvm_release_hpt(kvm->arch.hpt_li); |
@@ -171,8 +164,7 @@ int kvmppc_mmu_hv_init(void) | |||
171 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | 164 | if (!cpu_has_feature(CPU_FTR_HVMODE)) |
172 | return -EINVAL; | 165 | return -EINVAL; |
173 | 166 | ||
174 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); | 167 | /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */ |
175 | |||
176 | if (cpu_has_feature(CPU_FTR_ARCH_206)) { | 168 | if (cpu_has_feature(CPU_FTR_ARCH_206)) { |
177 | host_lpid = mfspr(SPRN_LPID); /* POWER7 */ | 169 | host_lpid = mfspr(SPRN_LPID); /* POWER7 */ |
178 | rsvd_lpid = LPID_RSVD; | 170 | rsvd_lpid = LPID_RSVD; |
@@ -181,9 +173,11 @@ int kvmppc_mmu_hv_init(void) | |||
181 | rsvd_lpid = MAX_LPID_970; | 173 | rsvd_lpid = MAX_LPID_970; |
182 | } | 174 | } |
183 | 175 | ||
184 | set_bit(host_lpid, lpid_inuse); | 176 | kvmppc_init_lpid(rsvd_lpid + 1); |
177 | |||
178 | kvmppc_claim_lpid(host_lpid); | ||
185 | /* rsvd_lpid is reserved for use in partition switching */ | 179 | /* rsvd_lpid is reserved for use in partition switching */ |
186 | set_bit(rsvd_lpid, lpid_inuse); | 180 | kvmppc_claim_lpid(rsvd_lpid); |
187 | 181 | ||
188 | return 0; | 182 | return 0; |
189 | } | 183 | } |
@@ -452,7 +446,7 @@ static int instruction_is_store(unsigned int instr) | |||
452 | } | 446 | } |
453 | 447 | ||
454 | static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, | 448 | static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, |
455 | unsigned long gpa, int is_store) | 449 | unsigned long gpa, gva_t ea, int is_store) |
456 | { | 450 | { |
457 | int ret; | 451 | int ret; |
458 | u32 last_inst; | 452 | u32 last_inst; |
@@ -499,6 +493,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
499 | */ | 493 | */ |
500 | 494 | ||
501 | vcpu->arch.paddr_accessed = gpa; | 495 | vcpu->arch.paddr_accessed = gpa; |
496 | vcpu->arch.vaddr_accessed = ea; | ||
502 | return kvmppc_emulate_mmio(run, vcpu); | 497 | return kvmppc_emulate_mmio(run, vcpu); |
503 | } | 498 | } |
504 | 499 | ||
@@ -552,7 +547,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
552 | /* No memslot means it's an emulated MMIO region */ | 547 | /* No memslot means it's an emulated MMIO region */ |
553 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { | 548 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { |
554 | unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1)); | 549 | unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1)); |
555 | return kvmppc_hv_emulate_mmio(run, vcpu, gpa, | 550 | return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, |
556 | dsisr & DSISR_ISSTORE); | 551 | dsisr & DSISR_ISSTORE); |
557 | } | 552 | } |
558 | 553 | ||
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index f2e6e48ea463..56b983e7b738 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S | |||
@@ -90,8 +90,6 @@ slb_exit_skip_ ## num: | |||
90 | or r10, r10, r12 | 90 | or r10, r10, r12 |
91 | slbie r10 | 91 | slbie r10 |
92 | 92 | ||
93 | isync | ||
94 | |||
95 | /* Fill SLB with our shadow */ | 93 | /* Fill SLB with our shadow */ |
96 | 94 | ||
97 | lbz r12, SVCPU_SLB_MAX(r3) | 95 | lbz r12, SVCPU_SLB_MAX(r3) |
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c new file mode 100644 index 000000000000..72ffc899c082 --- /dev/null +++ b/arch/powerpc/kvm/book3s_64_vio.c | |||
@@ -0,0 +1,150 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
16 | * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com> | ||
17 | */ | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <linux/kvm.h> | ||
22 | #include <linux/kvm_host.h> | ||
23 | #include <linux/highmem.h> | ||
24 | #include <linux/gfp.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/hugetlb.h> | ||
27 | #include <linux/list.h> | ||
28 | #include <linux/anon_inodes.h> | ||
29 | |||
30 | #include <asm/tlbflush.h> | ||
31 | #include <asm/kvm_ppc.h> | ||
32 | #include <asm/kvm_book3s.h> | ||
33 | #include <asm/mmu-hash64.h> | ||
34 | #include <asm/hvcall.h> | ||
35 | #include <asm/synch.h> | ||
36 | #include <asm/ppc-opcode.h> | ||
37 | #include <asm/kvm_host.h> | ||
38 | #include <asm/udbg.h> | ||
39 | |||
40 | #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) | ||
41 | |||
42 | static long kvmppc_stt_npages(unsigned long window_size) | ||
43 | { | ||
44 | return ALIGN((window_size >> SPAPR_TCE_SHIFT) | ||
45 | * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; | ||
46 | } | ||
47 | |||
48 | static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt) | ||
49 | { | ||
50 | struct kvm *kvm = stt->kvm; | ||
51 | int i; | ||
52 | |||
53 | mutex_lock(&kvm->lock); | ||
54 | list_del(&stt->list); | ||
55 | for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++) | ||
56 | __free_page(stt->pages[i]); | ||
57 | kfree(stt); | ||
58 | mutex_unlock(&kvm->lock); | ||
59 | |||
60 | kvm_put_kvm(kvm); | ||
61 | } | ||
62 | |||
63 | static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
64 | { | ||
65 | struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data; | ||
66 | struct page *page; | ||
67 | |||
68 | if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size)) | ||
69 | return VM_FAULT_SIGBUS; | ||
70 | |||
71 | page = stt->pages[vmf->pgoff]; | ||
72 | get_page(page); | ||
73 | vmf->page = page; | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static const struct vm_operations_struct kvm_spapr_tce_vm_ops = { | ||
78 | .fault = kvm_spapr_tce_fault, | ||
79 | }; | ||
80 | |||
81 | static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma) | ||
82 | { | ||
83 | vma->vm_ops = &kvm_spapr_tce_vm_ops; | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) | ||
88 | { | ||
89 | struct kvmppc_spapr_tce_table *stt = filp->private_data; | ||
90 | |||
91 | release_spapr_tce_table(stt); | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | static struct file_operations kvm_spapr_tce_fops = { | ||
96 | .mmap = kvm_spapr_tce_mmap, | ||
97 | .release = kvm_spapr_tce_release, | ||
98 | }; | ||
99 | |||
100 | long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | ||
101 | struct kvm_create_spapr_tce *args) | ||
102 | { | ||
103 | struct kvmppc_spapr_tce_table *stt = NULL; | ||
104 | long npages; | ||
105 | int ret = -ENOMEM; | ||
106 | int i; | ||
107 | |||
108 | /* Check this LIOBN hasn't been previously allocated */ | ||
109 | list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) { | ||
110 | if (stt->liobn == args->liobn) | ||
111 | return -EBUSY; | ||
112 | } | ||
113 | |||
114 | npages = kvmppc_stt_npages(args->window_size); | ||
115 | |||
116 | stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), | ||
117 | GFP_KERNEL); | ||
118 | if (!stt) | ||
119 | goto fail; | ||
120 | |||
121 | stt->liobn = args->liobn; | ||
122 | stt->window_size = args->window_size; | ||
123 | stt->kvm = kvm; | ||
124 | |||
125 | for (i = 0; i < npages; i++) { | ||
126 | stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
127 | if (!stt->pages[i]) | ||
128 | goto fail; | ||
129 | } | ||
130 | |||
131 | kvm_get_kvm(kvm); | ||
132 | |||
133 | mutex_lock(&kvm->lock); | ||
134 | list_add(&stt->list, &kvm->arch.spapr_tce_tables); | ||
135 | |||
136 | mutex_unlock(&kvm->lock); | ||
137 | |||
138 | return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, | ||
139 | stt, O_RDWR); | ||
140 | |||
141 | fail: | ||
142 | if (stt) { | ||
143 | for (i = 0; i < npages; i++) | ||
144 | if (stt->pages[i]) | ||
145 | __free_page(stt->pages[i]); | ||
146 | |||
147 | kfree(stt); | ||
148 | } | ||
149 | return ret; | ||
150 | } | ||
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index ea0f8c537c28..30c2f3b134c6 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c | |||
@@ -38,6 +38,9 @@ | |||
38 | 38 | ||
39 | #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) | 39 | #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) |
40 | 40 | ||
41 | /* WARNING: This will be called in real-mode on HV KVM and virtual | ||
42 | * mode on PR KVM | ||
43 | */ | ||
41 | long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | 44 | long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
42 | unsigned long ioba, unsigned long tce) | 45 | unsigned long ioba, unsigned long tce) |
43 | { | 46 | { |
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 135663a3e4fc..b9a989dc76cc 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c | |||
@@ -87,6 +87,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
87 | unsigned int inst, int *advance) | 87 | unsigned int inst, int *advance) |
88 | { | 88 | { |
89 | int emulated = EMULATE_DONE; | 89 | int emulated = EMULATE_DONE; |
90 | int rt = get_rt(inst); | ||
91 | int rs = get_rs(inst); | ||
92 | int ra = get_ra(inst); | ||
93 | int rb = get_rb(inst); | ||
90 | 94 | ||
91 | switch (get_op(inst)) { | 95 | switch (get_op(inst)) { |
92 | case 19: | 96 | case 19: |
@@ -106,21 +110,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
106 | case 31: | 110 | case 31: |
107 | switch (get_xop(inst)) { | 111 | switch (get_xop(inst)) { |
108 | case OP_31_XOP_MFMSR: | 112 | case OP_31_XOP_MFMSR: |
109 | kvmppc_set_gpr(vcpu, get_rt(inst), | 113 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); |
110 | vcpu->arch.shared->msr); | ||
111 | break; | 114 | break; |
112 | case OP_31_XOP_MTMSRD: | 115 | case OP_31_XOP_MTMSRD: |
113 | { | 116 | { |
114 | ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); | 117 | ulong rs_val = kvmppc_get_gpr(vcpu, rs); |
115 | if (inst & 0x10000) { | 118 | if (inst & 0x10000) { |
116 | vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE); | 119 | ulong new_msr = vcpu->arch.shared->msr; |
117 | vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE); | 120 | new_msr &= ~(MSR_RI | MSR_EE); |
121 | new_msr |= rs_val & (MSR_RI | MSR_EE); | ||
122 | vcpu->arch.shared->msr = new_msr; | ||
118 | } else | 123 | } else |
119 | kvmppc_set_msr(vcpu, rs); | 124 | kvmppc_set_msr(vcpu, rs_val); |
120 | break; | 125 | break; |
121 | } | 126 | } |
122 | case OP_31_XOP_MTMSR: | 127 | case OP_31_XOP_MTMSR: |
123 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); | 128 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); |
124 | break; | 129 | break; |
125 | case OP_31_XOP_MFSR: | 130 | case OP_31_XOP_MFSR: |
126 | { | 131 | { |
@@ -130,7 +135,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
130 | if (vcpu->arch.mmu.mfsrin) { | 135 | if (vcpu->arch.mmu.mfsrin) { |
131 | u32 sr; | 136 | u32 sr; |
132 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); | 137 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); |
133 | kvmppc_set_gpr(vcpu, get_rt(inst), sr); | 138 | kvmppc_set_gpr(vcpu, rt, sr); |
134 | } | 139 | } |
135 | break; | 140 | break; |
136 | } | 141 | } |
@@ -138,29 +143,29 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
138 | { | 143 | { |
139 | int srnum; | 144 | int srnum; |
140 | 145 | ||
141 | srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf; | 146 | srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf; |
142 | if (vcpu->arch.mmu.mfsrin) { | 147 | if (vcpu->arch.mmu.mfsrin) { |
143 | u32 sr; | 148 | u32 sr; |
144 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); | 149 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); |
145 | kvmppc_set_gpr(vcpu, get_rt(inst), sr); | 150 | kvmppc_set_gpr(vcpu, rt, sr); |
146 | } | 151 | } |
147 | break; | 152 | break; |
148 | } | 153 | } |
149 | case OP_31_XOP_MTSR: | 154 | case OP_31_XOP_MTSR: |
150 | vcpu->arch.mmu.mtsrin(vcpu, | 155 | vcpu->arch.mmu.mtsrin(vcpu, |
151 | (inst >> 16) & 0xf, | 156 | (inst >> 16) & 0xf, |
152 | kvmppc_get_gpr(vcpu, get_rs(inst))); | 157 | kvmppc_get_gpr(vcpu, rs)); |
153 | break; | 158 | break; |
154 | case OP_31_XOP_MTSRIN: | 159 | case OP_31_XOP_MTSRIN: |
155 | vcpu->arch.mmu.mtsrin(vcpu, | 160 | vcpu->arch.mmu.mtsrin(vcpu, |
156 | (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, | 161 | (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf, |
157 | kvmppc_get_gpr(vcpu, get_rs(inst))); | 162 | kvmppc_get_gpr(vcpu, rs)); |
158 | break; | 163 | break; |
159 | case OP_31_XOP_TLBIE: | 164 | case OP_31_XOP_TLBIE: |
160 | case OP_31_XOP_TLBIEL: | 165 | case OP_31_XOP_TLBIEL: |
161 | { | 166 | { |
162 | bool large = (inst & 0x00200000) ? true : false; | 167 | bool large = (inst & 0x00200000) ? true : false; |
163 | ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst)); | 168 | ulong addr = kvmppc_get_gpr(vcpu, rb); |
164 | vcpu->arch.mmu.tlbie(vcpu, addr, large); | 169 | vcpu->arch.mmu.tlbie(vcpu, addr, large); |
165 | break; | 170 | break; |
166 | } | 171 | } |
@@ -171,15 +176,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
171 | return EMULATE_FAIL; | 176 | return EMULATE_FAIL; |
172 | 177 | ||
173 | vcpu->arch.mmu.slbmte(vcpu, | 178 | vcpu->arch.mmu.slbmte(vcpu, |
174 | kvmppc_get_gpr(vcpu, get_rs(inst)), | 179 | kvmppc_get_gpr(vcpu, rs), |
175 | kvmppc_get_gpr(vcpu, get_rb(inst))); | 180 | kvmppc_get_gpr(vcpu, rb)); |
176 | break; | 181 | break; |
177 | case OP_31_XOP_SLBIE: | 182 | case OP_31_XOP_SLBIE: |
178 | if (!vcpu->arch.mmu.slbie) | 183 | if (!vcpu->arch.mmu.slbie) |
179 | return EMULATE_FAIL; | 184 | return EMULATE_FAIL; |
180 | 185 | ||
181 | vcpu->arch.mmu.slbie(vcpu, | 186 | vcpu->arch.mmu.slbie(vcpu, |
182 | kvmppc_get_gpr(vcpu, get_rb(inst))); | 187 | kvmppc_get_gpr(vcpu, rb)); |
183 | break; | 188 | break; |
184 | case OP_31_XOP_SLBIA: | 189 | case OP_31_XOP_SLBIA: |
185 | if (!vcpu->arch.mmu.slbia) | 190 | if (!vcpu->arch.mmu.slbia) |
@@ -191,22 +196,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
191 | if (!vcpu->arch.mmu.slbmfee) { | 196 | if (!vcpu->arch.mmu.slbmfee) { |
192 | emulated = EMULATE_FAIL; | 197 | emulated = EMULATE_FAIL; |
193 | } else { | 198 | } else { |
194 | ulong t, rb; | 199 | ulong t, rb_val; |
195 | 200 | ||
196 | rb = kvmppc_get_gpr(vcpu, get_rb(inst)); | 201 | rb_val = kvmppc_get_gpr(vcpu, rb); |
197 | t = vcpu->arch.mmu.slbmfee(vcpu, rb); | 202 | t = vcpu->arch.mmu.slbmfee(vcpu, rb_val); |
198 | kvmppc_set_gpr(vcpu, get_rt(inst), t); | 203 | kvmppc_set_gpr(vcpu, rt, t); |
199 | } | 204 | } |
200 | break; | 205 | break; |
201 | case OP_31_XOP_SLBMFEV: | 206 | case OP_31_XOP_SLBMFEV: |
202 | if (!vcpu->arch.mmu.slbmfev) { | 207 | if (!vcpu->arch.mmu.slbmfev) { |
203 | emulated = EMULATE_FAIL; | 208 | emulated = EMULATE_FAIL; |
204 | } else { | 209 | } else { |
205 | ulong t, rb; | 210 | ulong t, rb_val; |
206 | 211 | ||
207 | rb = kvmppc_get_gpr(vcpu, get_rb(inst)); | 212 | rb_val = kvmppc_get_gpr(vcpu, rb); |
208 | t = vcpu->arch.mmu.slbmfev(vcpu, rb); | 213 | t = vcpu->arch.mmu.slbmfev(vcpu, rb_val); |
209 | kvmppc_set_gpr(vcpu, get_rt(inst), t); | 214 | kvmppc_set_gpr(vcpu, rt, t); |
210 | } | 215 | } |
211 | break; | 216 | break; |
212 | case OP_31_XOP_DCBA: | 217 | case OP_31_XOP_DCBA: |
@@ -214,17 +219,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
214 | break; | 219 | break; |
215 | case OP_31_XOP_DCBZ: | 220 | case OP_31_XOP_DCBZ: |
216 | { | 221 | { |
217 | ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); | 222 | ulong rb_val = kvmppc_get_gpr(vcpu, rb); |
218 | ulong ra = 0; | 223 | ulong ra_val = 0; |
219 | ulong addr, vaddr; | 224 | ulong addr, vaddr; |
220 | u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; | 225 | u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; |
221 | u32 dsisr; | 226 | u32 dsisr; |
222 | int r; | 227 | int r; |
223 | 228 | ||
224 | if (get_ra(inst)) | 229 | if (ra) |
225 | ra = kvmppc_get_gpr(vcpu, get_ra(inst)); | 230 | ra_val = kvmppc_get_gpr(vcpu, ra); |
226 | 231 | ||
227 | addr = (ra + rb) & ~31ULL; | 232 | addr = (ra_val + rb_val) & ~31ULL; |
228 | if (!(vcpu->arch.shared->msr & MSR_SF)) | 233 | if (!(vcpu->arch.shared->msr & MSR_SF)) |
229 | addr &= 0xffffffff; | 234 | addr &= 0xffffffff; |
230 | vaddr = addr; | 235 | vaddr = addr; |
@@ -313,10 +318,9 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) | |||
313 | return bat; | 318 | return bat; |
314 | } | 319 | } |
315 | 320 | ||
316 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 321 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
317 | { | 322 | { |
318 | int emulated = EMULATE_DONE; | 323 | int emulated = EMULATE_DONE; |
319 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
320 | 324 | ||
321 | switch (sprn) { | 325 | switch (sprn) { |
322 | case SPRN_SDR1: | 326 | case SPRN_SDR1: |
@@ -428,7 +432,7 @@ unprivileged: | |||
428 | return emulated; | 432 | return emulated; |
429 | } | 433 | } |
430 | 434 | ||
431 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 435 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
432 | { | 436 | { |
433 | int emulated = EMULATE_DONE; | 437 | int emulated = EMULATE_DONE; |
434 | 438 | ||
@@ -441,46 +445,46 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
441 | struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); | 445 | struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); |
442 | 446 | ||
443 | if (sprn % 2) | 447 | if (sprn % 2) |
444 | kvmppc_set_gpr(vcpu, rt, bat->raw >> 32); | 448 | *spr_val = bat->raw >> 32; |
445 | else | 449 | else |
446 | kvmppc_set_gpr(vcpu, rt, bat->raw); | 450 | *spr_val = bat->raw; |
447 | 451 | ||
448 | break; | 452 | break; |
449 | } | 453 | } |
450 | case SPRN_SDR1: | 454 | case SPRN_SDR1: |
451 | if (!spr_allowed(vcpu, PRIV_HYPER)) | 455 | if (!spr_allowed(vcpu, PRIV_HYPER)) |
452 | goto unprivileged; | 456 | goto unprivileged; |
453 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); | 457 | *spr_val = to_book3s(vcpu)->sdr1; |
454 | break; | 458 | break; |
455 | case SPRN_DSISR: | 459 | case SPRN_DSISR: |
456 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr); | 460 | *spr_val = vcpu->arch.shared->dsisr; |
457 | break; | 461 | break; |
458 | case SPRN_DAR: | 462 | case SPRN_DAR: |
459 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); | 463 | *spr_val = vcpu->arch.shared->dar; |
460 | break; | 464 | break; |
461 | case SPRN_HIOR: | 465 | case SPRN_HIOR: |
462 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); | 466 | *spr_val = to_book3s(vcpu)->hior; |
463 | break; | 467 | break; |
464 | case SPRN_HID0: | 468 | case SPRN_HID0: |
465 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]); | 469 | *spr_val = to_book3s(vcpu)->hid[0]; |
466 | break; | 470 | break; |
467 | case SPRN_HID1: | 471 | case SPRN_HID1: |
468 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); | 472 | *spr_val = to_book3s(vcpu)->hid[1]; |
469 | break; | 473 | break; |
470 | case SPRN_HID2: | 474 | case SPRN_HID2: |
471 | case SPRN_HID2_GEKKO: | 475 | case SPRN_HID2_GEKKO: |
472 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); | 476 | *spr_val = to_book3s(vcpu)->hid[2]; |
473 | break; | 477 | break; |
474 | case SPRN_HID4: | 478 | case SPRN_HID4: |
475 | case SPRN_HID4_GEKKO: | 479 | case SPRN_HID4_GEKKO: |
476 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); | 480 | *spr_val = to_book3s(vcpu)->hid[4]; |
477 | break; | 481 | break; |
478 | case SPRN_HID5: | 482 | case SPRN_HID5: |
479 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); | 483 | *spr_val = to_book3s(vcpu)->hid[5]; |
480 | break; | 484 | break; |
481 | case SPRN_CFAR: | 485 | case SPRN_CFAR: |
482 | case SPRN_PURR: | 486 | case SPRN_PURR: |
483 | kvmppc_set_gpr(vcpu, rt, 0); | 487 | *spr_val = 0; |
484 | break; | 488 | break; |
485 | case SPRN_GQR0: | 489 | case SPRN_GQR0: |
486 | case SPRN_GQR1: | 490 | case SPRN_GQR1: |
@@ -490,8 +494,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
490 | case SPRN_GQR5: | 494 | case SPRN_GQR5: |
491 | case SPRN_GQR6: | 495 | case SPRN_GQR6: |
492 | case SPRN_GQR7: | 496 | case SPRN_GQR7: |
493 | kvmppc_set_gpr(vcpu, rt, | 497 | *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; |
494 | to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]); | ||
495 | break; | 498 | break; |
496 | case SPRN_THRM1: | 499 | case SPRN_THRM1: |
497 | case SPRN_THRM2: | 500 | case SPRN_THRM2: |
@@ -506,7 +509,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
506 | case SPRN_PMC3_GEKKO: | 509 | case SPRN_PMC3_GEKKO: |
507 | case SPRN_PMC4_GEKKO: | 510 | case SPRN_PMC4_GEKKO: |
508 | case SPRN_WPAR_GEKKO: | 511 | case SPRN_WPAR_GEKKO: |
509 | kvmppc_set_gpr(vcpu, rt, 0); | 512 | *spr_val = 0; |
510 | break; | 513 | break; |
511 | default: | 514 | default: |
512 | unprivileged: | 515 | unprivileged: |
@@ -565,23 +568,22 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) | |||
565 | ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) | 568 | ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) |
566 | { | 569 | { |
567 | ulong dar = 0; | 570 | ulong dar = 0; |
568 | ulong ra; | 571 | ulong ra = get_ra(inst); |
572 | ulong rb = get_rb(inst); | ||
569 | 573 | ||
570 | switch (get_op(inst)) { | 574 | switch (get_op(inst)) { |
571 | case OP_LFS: | 575 | case OP_LFS: |
572 | case OP_LFD: | 576 | case OP_LFD: |
573 | case OP_STFD: | 577 | case OP_STFD: |
574 | case OP_STFS: | 578 | case OP_STFS: |
575 | ra = get_ra(inst); | ||
576 | if (ra) | 579 | if (ra) |
577 | dar = kvmppc_get_gpr(vcpu, ra); | 580 | dar = kvmppc_get_gpr(vcpu, ra); |
578 | dar += (s32)((s16)inst); | 581 | dar += (s32)((s16)inst); |
579 | break; | 582 | break; |
580 | case 31: | 583 | case 31: |
581 | ra = get_ra(inst); | ||
582 | if (ra) | 584 | if (ra) |
583 | dar = kvmppc_get_gpr(vcpu, ra); | 585 | dar = kvmppc_get_gpr(vcpu, ra); |
584 | dar += kvmppc_get_gpr(vcpu, get_rb(inst)); | 586 | dar += kvmppc_get_gpr(vcpu, rb); |
585 | break; | 587 | break; |
586 | default: | 588 | default: |
587 | printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); | 589 | printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 108d1f580177..c6af1d623839 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -60,12 +60,20 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu); | |||
60 | 60 | ||
61 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 61 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
62 | { | 62 | { |
63 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | ||
64 | |||
63 | local_paca->kvm_hstate.kvm_vcpu = vcpu; | 65 | local_paca->kvm_hstate.kvm_vcpu = vcpu; |
64 | local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore; | 66 | local_paca->kvm_hstate.kvm_vcore = vc; |
67 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) | ||
68 | vc->stolen_tb += mftb() - vc->preempt_tb; | ||
65 | } | 69 | } |
66 | 70 | ||
67 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 71 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
68 | { | 72 | { |
73 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | ||
74 | |||
75 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) | ||
76 | vc->preempt_tb = mftb(); | ||
69 | } | 77 | } |
70 | 78 | ||
71 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | 79 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) |
@@ -134,6 +142,22 @@ static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) | |||
134 | vpa->yield_count = 1; | 142 | vpa->yield_count = 1; |
135 | } | 143 | } |
136 | 144 | ||
145 | /* Length for a per-processor buffer is passed in at offset 4 in the buffer */ | ||
146 | struct reg_vpa { | ||
147 | u32 dummy; | ||
148 | union { | ||
149 | u16 hword; | ||
150 | u32 word; | ||
151 | } length; | ||
152 | }; | ||
153 | |||
154 | static int vpa_is_registered(struct kvmppc_vpa *vpap) | ||
155 | { | ||
156 | if (vpap->update_pending) | ||
157 | return vpap->next_gpa != 0; | ||
158 | return vpap->pinned_addr != NULL; | ||
159 | } | ||
160 | |||
137 | static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, | 161 | static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, |
138 | unsigned long flags, | 162 | unsigned long flags, |
139 | unsigned long vcpuid, unsigned long vpa) | 163 | unsigned long vcpuid, unsigned long vpa) |
@@ -142,88 +166,182 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, | |||
142 | unsigned long len, nb; | 166 | unsigned long len, nb; |
143 | void *va; | 167 | void *va; |
144 | struct kvm_vcpu *tvcpu; | 168 | struct kvm_vcpu *tvcpu; |
145 | int err = H_PARAMETER; | 169 | int err; |
170 | int subfunc; | ||
171 | struct kvmppc_vpa *vpap; | ||
146 | 172 | ||
147 | tvcpu = kvmppc_find_vcpu(kvm, vcpuid); | 173 | tvcpu = kvmppc_find_vcpu(kvm, vcpuid); |
148 | if (!tvcpu) | 174 | if (!tvcpu) |
149 | return H_PARAMETER; | 175 | return H_PARAMETER; |
150 | 176 | ||
151 | flags >>= 63 - 18; | 177 | subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK; |
152 | flags &= 7; | 178 | if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL || |
153 | if (flags == 0 || flags == 4) | 179 | subfunc == H_VPA_REG_SLB) { |
154 | return H_PARAMETER; | 180 | /* Registering new area - address must be cache-line aligned */ |
155 | if (flags < 4) { | 181 | if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa) |
156 | if (vpa & 0x7f) | ||
157 | return H_PARAMETER; | 182 | return H_PARAMETER; |
158 | if (flags >= 2 && !tvcpu->arch.vpa) | 183 | |
159 | return H_RESOURCE; | 184 | /* convert logical addr to kernel addr and read length */ |
160 | /* registering new area; convert logical addr to real */ | ||
161 | va = kvmppc_pin_guest_page(kvm, vpa, &nb); | 185 | va = kvmppc_pin_guest_page(kvm, vpa, &nb); |
162 | if (va == NULL) | 186 | if (va == NULL) |
163 | return H_PARAMETER; | 187 | return H_PARAMETER; |
164 | if (flags <= 1) | 188 | if (subfunc == H_VPA_REG_VPA) |
165 | len = *(unsigned short *)(va + 4); | 189 | len = ((struct reg_vpa *)va)->length.hword; |
166 | else | 190 | else |
167 | len = *(unsigned int *)(va + 4); | 191 | len = ((struct reg_vpa *)va)->length.word; |
168 | if (len > nb) | 192 | kvmppc_unpin_guest_page(kvm, va); |
169 | goto out_unpin; | 193 | |
170 | switch (flags) { | 194 | /* Check length */ |
171 | case 1: /* register VPA */ | 195 | if (len > nb || len < sizeof(struct reg_vpa)) |
172 | if (len < 640) | 196 | return H_PARAMETER; |
173 | goto out_unpin; | 197 | } else { |
174 | if (tvcpu->arch.vpa) | 198 | vpa = 0; |
175 | kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa); | 199 | len = 0; |
176 | tvcpu->arch.vpa = va; | 200 | } |
177 | init_vpa(vcpu, va); | 201 | |
178 | break; | 202 | err = H_PARAMETER; |
179 | case 2: /* register DTL */ | 203 | vpap = NULL; |
180 | if (len < 48) | 204 | spin_lock(&tvcpu->arch.vpa_update_lock); |
181 | goto out_unpin; | 205 | |
182 | len -= len % 48; | 206 | switch (subfunc) { |
183 | if (tvcpu->arch.dtl) | 207 | case H_VPA_REG_VPA: /* register VPA */ |
184 | kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl); | 208 | if (len < sizeof(struct lppaca)) |
185 | tvcpu->arch.dtl = va; | ||
186 | tvcpu->arch.dtl_end = va + len; | ||
187 | break; | 209 | break; |
188 | case 3: /* register SLB shadow buffer */ | 210 | vpap = &tvcpu->arch.vpa; |
189 | if (len < 16) | 211 | err = 0; |
190 | goto out_unpin; | 212 | break; |
191 | if (tvcpu->arch.slb_shadow) | 213 | |
192 | kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow); | 214 | case H_VPA_REG_DTL: /* register DTL */ |
193 | tvcpu->arch.slb_shadow = va; | 215 | if (len < sizeof(struct dtl_entry)) |
194 | break; | 216 | break; |
195 | } | 217 | len -= len % sizeof(struct dtl_entry); |
196 | } else { | 218 | |
197 | switch (flags) { | 219 | /* Check that they have previously registered a VPA */ |
198 | case 5: /* unregister VPA */ | 220 | err = H_RESOURCE; |
199 | if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl) | 221 | if (!vpa_is_registered(&tvcpu->arch.vpa)) |
200 | return H_RESOURCE; | ||
201 | if (!tvcpu->arch.vpa) | ||
202 | break; | ||
203 | kvmppc_unpin_guest_page(kvm, tvcpu->arch.vpa); | ||
204 | tvcpu->arch.vpa = NULL; | ||
205 | break; | 222 | break; |
206 | case 6: /* unregister DTL */ | 223 | |
207 | if (!tvcpu->arch.dtl) | 224 | vpap = &tvcpu->arch.dtl; |
208 | break; | 225 | err = 0; |
209 | kvmppc_unpin_guest_page(kvm, tvcpu->arch.dtl); | 226 | break; |
210 | tvcpu->arch.dtl = NULL; | 227 | |
228 | case H_VPA_REG_SLB: /* register SLB shadow buffer */ | ||
229 | /* Check that they have previously registered a VPA */ | ||
230 | err = H_RESOURCE; | ||
231 | if (!vpa_is_registered(&tvcpu->arch.vpa)) | ||
211 | break; | 232 | break; |
212 | case 7: /* unregister SLB shadow buffer */ | 233 | |
213 | if (!tvcpu->arch.slb_shadow) | 234 | vpap = &tvcpu->arch.slb_shadow; |
214 | break; | 235 | err = 0; |
215 | kvmppc_unpin_guest_page(kvm, tvcpu->arch.slb_shadow); | 236 | break; |
216 | tvcpu->arch.slb_shadow = NULL; | 237 | |
238 | case H_VPA_DEREG_VPA: /* deregister VPA */ | ||
239 | /* Check they don't still have a DTL or SLB buf registered */ | ||
240 | err = H_RESOURCE; | ||
241 | if (vpa_is_registered(&tvcpu->arch.dtl) || | ||
242 | vpa_is_registered(&tvcpu->arch.slb_shadow)) | ||
217 | break; | 243 | break; |
218 | } | 244 | |
245 | vpap = &tvcpu->arch.vpa; | ||
246 | err = 0; | ||
247 | break; | ||
248 | |||
249 | case H_VPA_DEREG_DTL: /* deregister DTL */ | ||
250 | vpap = &tvcpu->arch.dtl; | ||
251 | err = 0; | ||
252 | break; | ||
253 | |||
254 | case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */ | ||
255 | vpap = &tvcpu->arch.slb_shadow; | ||
256 | err = 0; | ||
257 | break; | ||
258 | } | ||
259 | |||
260 | if (vpap) { | ||
261 | vpap->next_gpa = vpa; | ||
262 | vpap->len = len; | ||
263 | vpap->update_pending = 1; | ||
219 | } | 264 | } |
220 | return H_SUCCESS; | ||
221 | 265 | ||
222 | out_unpin: | 266 | spin_unlock(&tvcpu->arch.vpa_update_lock); |
223 | kvmppc_unpin_guest_page(kvm, va); | 267 | |
224 | return err; | 268 | return err; |
225 | } | 269 | } |
226 | 270 | ||
271 | static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap) | ||
272 | { | ||
273 | void *va; | ||
274 | unsigned long nb; | ||
275 | |||
276 | vpap->update_pending = 0; | ||
277 | va = NULL; | ||
278 | if (vpap->next_gpa) { | ||
279 | va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb); | ||
280 | if (nb < vpap->len) { | ||
281 | /* | ||
282 | * If it's now too short, it must be that userspace | ||
283 | * has changed the mappings underlying guest memory, | ||
284 | * so unregister the region. | ||
285 | */ | ||
286 | kvmppc_unpin_guest_page(kvm, va); | ||
287 | va = NULL; | ||
288 | } | ||
289 | } | ||
290 | if (vpap->pinned_addr) | ||
291 | kvmppc_unpin_guest_page(kvm, vpap->pinned_addr); | ||
292 | vpap->pinned_addr = va; | ||
293 | if (va) | ||
294 | vpap->pinned_end = va + vpap->len; | ||
295 | } | ||
296 | |||
297 | static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) | ||
298 | { | ||
299 | struct kvm *kvm = vcpu->kvm; | ||
300 | |||
301 | spin_lock(&vcpu->arch.vpa_update_lock); | ||
302 | if (vcpu->arch.vpa.update_pending) { | ||
303 | kvmppc_update_vpa(kvm, &vcpu->arch.vpa); | ||
304 | init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); | ||
305 | } | ||
306 | if (vcpu->arch.dtl.update_pending) { | ||
307 | kvmppc_update_vpa(kvm, &vcpu->arch.dtl); | ||
308 | vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; | ||
309 | vcpu->arch.dtl_index = 0; | ||
310 | } | ||
311 | if (vcpu->arch.slb_shadow.update_pending) | ||
312 | kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow); | ||
313 | spin_unlock(&vcpu->arch.vpa_update_lock); | ||
314 | } | ||
315 | |||
316 | static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, | ||
317 | struct kvmppc_vcore *vc) | ||
318 | { | ||
319 | struct dtl_entry *dt; | ||
320 | struct lppaca *vpa; | ||
321 | unsigned long old_stolen; | ||
322 | |||
323 | dt = vcpu->arch.dtl_ptr; | ||
324 | vpa = vcpu->arch.vpa.pinned_addr; | ||
325 | old_stolen = vcpu->arch.stolen_logged; | ||
326 | vcpu->arch.stolen_logged = vc->stolen_tb; | ||
327 | if (!dt || !vpa) | ||
328 | return; | ||
329 | memset(dt, 0, sizeof(struct dtl_entry)); | ||
330 | dt->dispatch_reason = 7; | ||
331 | dt->processor_id = vc->pcpu + vcpu->arch.ptid; | ||
332 | dt->timebase = mftb(); | ||
333 | dt->enqueue_to_dispatch_time = vc->stolen_tb - old_stolen; | ||
334 | dt->srr0 = kvmppc_get_pc(vcpu); | ||
335 | dt->srr1 = vcpu->arch.shregs.msr; | ||
336 | ++dt; | ||
337 | if (dt == vcpu->arch.dtl.pinned_end) | ||
338 | dt = vcpu->arch.dtl.pinned_addr; | ||
339 | vcpu->arch.dtl_ptr = dt; | ||
340 | /* order writing *dt vs. writing vpa->dtl_idx */ | ||
341 | smp_wmb(); | ||
342 | vpa->dtl_idx = ++vcpu->arch.dtl_index; | ||
343 | } | ||
344 | |||
227 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | 345 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) |
228 | { | 346 | { |
229 | unsigned long req = kvmppc_get_gpr(vcpu, 3); | 347 | unsigned long req = kvmppc_get_gpr(vcpu, 3); |
@@ -468,6 +586,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
468 | /* default to host PVR, since we can't spoof it */ | 586 | /* default to host PVR, since we can't spoof it */ |
469 | vcpu->arch.pvr = mfspr(SPRN_PVR); | 587 | vcpu->arch.pvr = mfspr(SPRN_PVR); |
470 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | 588 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); |
589 | spin_lock_init(&vcpu->arch.vpa_update_lock); | ||
471 | 590 | ||
472 | kvmppc_mmu_book3s_hv_init(vcpu); | 591 | kvmppc_mmu_book3s_hv_init(vcpu); |
473 | 592 | ||
@@ -486,6 +605,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
486 | INIT_LIST_HEAD(&vcore->runnable_threads); | 605 | INIT_LIST_HEAD(&vcore->runnable_threads); |
487 | spin_lock_init(&vcore->lock); | 606 | spin_lock_init(&vcore->lock); |
488 | init_waitqueue_head(&vcore->wq); | 607 | init_waitqueue_head(&vcore->wq); |
608 | vcore->preempt_tb = mftb(); | ||
489 | } | 609 | } |
490 | kvm->arch.vcores[core] = vcore; | 610 | kvm->arch.vcores[core] = vcore; |
491 | } | 611 | } |
@@ -498,6 +618,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
498 | ++vcore->num_threads; | 618 | ++vcore->num_threads; |
499 | spin_unlock(&vcore->lock); | 619 | spin_unlock(&vcore->lock); |
500 | vcpu->arch.vcore = vcore; | 620 | vcpu->arch.vcore = vcore; |
621 | vcpu->arch.stolen_logged = vcore->stolen_tb; | ||
501 | 622 | ||
502 | vcpu->arch.cpu_type = KVM_CPU_3S_64; | 623 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
503 | kvmppc_sanity_check(vcpu); | 624 | kvmppc_sanity_check(vcpu); |
@@ -512,12 +633,14 @@ out: | |||
512 | 633 | ||
513 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | 634 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) |
514 | { | 635 | { |
515 | if (vcpu->arch.dtl) | 636 | spin_lock(&vcpu->arch.vpa_update_lock); |
516 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl); | 637 | if (vcpu->arch.dtl.pinned_addr) |
517 | if (vcpu->arch.slb_shadow) | 638 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl.pinned_addr); |
518 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow); | 639 | if (vcpu->arch.slb_shadow.pinned_addr) |
519 | if (vcpu->arch.vpa) | 640 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow.pinned_addr); |
520 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa); | 641 | if (vcpu->arch.vpa.pinned_addr) |
642 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa.pinned_addr); | ||
643 | spin_unlock(&vcpu->arch.vpa_update_lock); | ||
521 | kvm_vcpu_uninit(vcpu); | 644 | kvm_vcpu_uninit(vcpu); |
522 | kmem_cache_free(kvm_vcpu_cache, vcpu); | 645 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
523 | } | 646 | } |
@@ -569,6 +692,45 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, | |||
569 | list_del(&vcpu->arch.run_list); | 692 | list_del(&vcpu->arch.run_list); |
570 | } | 693 | } |
571 | 694 | ||
695 | static int kvmppc_grab_hwthread(int cpu) | ||
696 | { | ||
697 | struct paca_struct *tpaca; | ||
698 | long timeout = 1000; | ||
699 | |||
700 | tpaca = &paca[cpu]; | ||
701 | |||
702 | /* Ensure the thread won't go into the kernel if it wakes */ | ||
703 | tpaca->kvm_hstate.hwthread_req = 1; | ||
704 | |||
705 | /* | ||
706 | * If the thread is already executing in the kernel (e.g. handling | ||
707 | * a stray interrupt), wait for it to get back to nap mode. | ||
708 | * The smp_mb() is to ensure that our setting of hwthread_req | ||
709 | * is visible before we look at hwthread_state, so if this | ||
710 | * races with the code at system_reset_pSeries and the thread | ||
711 | * misses our setting of hwthread_req, we are sure to see its | ||
712 | * setting of hwthread_state, and vice versa. | ||
713 | */ | ||
714 | smp_mb(); | ||
715 | while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) { | ||
716 | if (--timeout <= 0) { | ||
717 | pr_err("KVM: couldn't grab cpu %d\n", cpu); | ||
718 | return -EBUSY; | ||
719 | } | ||
720 | udelay(1); | ||
721 | } | ||
722 | return 0; | ||
723 | } | ||
724 | |||
725 | static void kvmppc_release_hwthread(int cpu) | ||
726 | { | ||
727 | struct paca_struct *tpaca; | ||
728 | |||
729 | tpaca = &paca[cpu]; | ||
730 | tpaca->kvm_hstate.hwthread_req = 0; | ||
731 | tpaca->kvm_hstate.kvm_vcpu = NULL; | ||
732 | } | ||
733 | |||
572 | static void kvmppc_start_thread(struct kvm_vcpu *vcpu) | 734 | static void kvmppc_start_thread(struct kvm_vcpu *vcpu) |
573 | { | 735 | { |
574 | int cpu; | 736 | int cpu; |
@@ -588,8 +750,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu) | |||
588 | smp_wmb(); | 750 | smp_wmb(); |
589 | #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) | 751 | #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) |
590 | if (vcpu->arch.ptid) { | 752 | if (vcpu->arch.ptid) { |
591 | tpaca->cpu_start = 0x80; | 753 | kvmppc_grab_hwthread(cpu); |
592 | wmb(); | ||
593 | xics_wake_cpu(cpu); | 754 | xics_wake_cpu(cpu); |
594 | ++vc->n_woken; | 755 | ++vc->n_woken; |
595 | } | 756 | } |
@@ -639,7 +800,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) | |||
639 | struct kvm_vcpu *vcpu, *vcpu0, *vnext; | 800 | struct kvm_vcpu *vcpu, *vcpu0, *vnext; |
640 | long ret; | 801 | long ret; |
641 | u64 now; | 802 | u64 now; |
642 | int ptid; | 803 | int ptid, i; |
643 | 804 | ||
644 | /* don't start if any threads have a signal pending */ | 805 | /* don't start if any threads have a signal pending */ |
645 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) | 806 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) |
@@ -681,17 +842,29 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) | |||
681 | vc->nap_count = 0; | 842 | vc->nap_count = 0; |
682 | vc->entry_exit_count = 0; | 843 | vc->entry_exit_count = 0; |
683 | vc->vcore_state = VCORE_RUNNING; | 844 | vc->vcore_state = VCORE_RUNNING; |
845 | vc->stolen_tb += mftb() - vc->preempt_tb; | ||
684 | vc->in_guest = 0; | 846 | vc->in_guest = 0; |
685 | vc->pcpu = smp_processor_id(); | 847 | vc->pcpu = smp_processor_id(); |
686 | vc->napping_threads = 0; | 848 | vc->napping_threads = 0; |
687 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) | 849 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { |
688 | kvmppc_start_thread(vcpu); | 850 | kvmppc_start_thread(vcpu); |
851 | if (vcpu->arch.vpa.update_pending || | ||
852 | vcpu->arch.slb_shadow.update_pending || | ||
853 | vcpu->arch.dtl.update_pending) | ||
854 | kvmppc_update_vpas(vcpu); | ||
855 | kvmppc_create_dtl_entry(vcpu, vc); | ||
856 | } | ||
857 | /* Grab any remaining hw threads so they can't go into the kernel */ | ||
858 | for (i = ptid; i < threads_per_core; ++i) | ||
859 | kvmppc_grab_hwthread(vc->pcpu + i); | ||
689 | 860 | ||
690 | preempt_disable(); | 861 | preempt_disable(); |
691 | spin_unlock(&vc->lock); | 862 | spin_unlock(&vc->lock); |
692 | 863 | ||
693 | kvm_guest_enter(); | 864 | kvm_guest_enter(); |
694 | __kvmppc_vcore_entry(NULL, vcpu0); | 865 | __kvmppc_vcore_entry(NULL, vcpu0); |
866 | for (i = 0; i < threads_per_core; ++i) | ||
867 | kvmppc_release_hwthread(vc->pcpu + i); | ||
695 | 868 | ||
696 | spin_lock(&vc->lock); | 869 | spin_lock(&vc->lock); |
697 | /* disable sending of IPIs on virtual external irqs */ | 870 | /* disable sending of IPIs on virtual external irqs */ |
@@ -737,6 +910,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) | |||
737 | spin_lock(&vc->lock); | 910 | spin_lock(&vc->lock); |
738 | out: | 911 | out: |
739 | vc->vcore_state = VCORE_INACTIVE; | 912 | vc->vcore_state = VCORE_INACTIVE; |
913 | vc->preempt_tb = mftb(); | ||
740 | list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, | 914 | list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, |
741 | arch.run_list) { | 915 | arch.run_list) { |
742 | if (vcpu->arch.ret != RESUME_GUEST) { | 916 | if (vcpu->arch.ret != RESUME_GUEST) { |
@@ -835,6 +1009,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
835 | spin_lock(&vc->lock); | 1009 | spin_lock(&vc->lock); |
836 | continue; | 1010 | continue; |
837 | } | 1011 | } |
1012 | vc->runner = vcpu; | ||
838 | n_ceded = 0; | 1013 | n_ceded = 0; |
839 | list_for_each_entry(v, &vc->runnable_threads, arch.run_list) | 1014 | list_for_each_entry(v, &vc->runnable_threads, arch.run_list) |
840 | n_ceded += v->arch.ceded; | 1015 | n_ceded += v->arch.ceded; |
@@ -854,6 +1029,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
854 | wake_up(&v->arch.cpu_run); | 1029 | wake_up(&v->arch.cpu_run); |
855 | } | 1030 | } |
856 | } | 1031 | } |
1032 | vc->runner = NULL; | ||
857 | } | 1033 | } |
858 | 1034 | ||
859 | if (signal_pending(current)) { | 1035 | if (signal_pending(current)) { |
@@ -917,115 +1093,6 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
917 | return r; | 1093 | return r; |
918 | } | 1094 | } |
919 | 1095 | ||
920 | static long kvmppc_stt_npages(unsigned long window_size) | ||
921 | { | ||
922 | return ALIGN((window_size >> SPAPR_TCE_SHIFT) | ||
923 | * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; | ||
924 | } | ||
925 | |||
926 | static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt) | ||
927 | { | ||
928 | struct kvm *kvm = stt->kvm; | ||
929 | int i; | ||
930 | |||
931 | mutex_lock(&kvm->lock); | ||
932 | list_del(&stt->list); | ||
933 | for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++) | ||
934 | __free_page(stt->pages[i]); | ||
935 | kfree(stt); | ||
936 | mutex_unlock(&kvm->lock); | ||
937 | |||
938 | kvm_put_kvm(kvm); | ||
939 | } | ||
940 | |||
941 | static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
942 | { | ||
943 | struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data; | ||
944 | struct page *page; | ||
945 | |||
946 | if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size)) | ||
947 | return VM_FAULT_SIGBUS; | ||
948 | |||
949 | page = stt->pages[vmf->pgoff]; | ||
950 | get_page(page); | ||
951 | vmf->page = page; | ||
952 | return 0; | ||
953 | } | ||
954 | |||
955 | static const struct vm_operations_struct kvm_spapr_tce_vm_ops = { | ||
956 | .fault = kvm_spapr_tce_fault, | ||
957 | }; | ||
958 | |||
959 | static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma) | ||
960 | { | ||
961 | vma->vm_ops = &kvm_spapr_tce_vm_ops; | ||
962 | return 0; | ||
963 | } | ||
964 | |||
965 | static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) | ||
966 | { | ||
967 | struct kvmppc_spapr_tce_table *stt = filp->private_data; | ||
968 | |||
969 | release_spapr_tce_table(stt); | ||
970 | return 0; | ||
971 | } | ||
972 | |||
973 | static struct file_operations kvm_spapr_tce_fops = { | ||
974 | .mmap = kvm_spapr_tce_mmap, | ||
975 | .release = kvm_spapr_tce_release, | ||
976 | }; | ||
977 | |||
978 | long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | ||
979 | struct kvm_create_spapr_tce *args) | ||
980 | { | ||
981 | struct kvmppc_spapr_tce_table *stt = NULL; | ||
982 | long npages; | ||
983 | int ret = -ENOMEM; | ||
984 | int i; | ||
985 | |||
986 | /* Check this LIOBN hasn't been previously allocated */ | ||
987 | list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) { | ||
988 | if (stt->liobn == args->liobn) | ||
989 | return -EBUSY; | ||
990 | } | ||
991 | |||
992 | npages = kvmppc_stt_npages(args->window_size); | ||
993 | |||
994 | stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *), | ||
995 | GFP_KERNEL); | ||
996 | if (!stt) | ||
997 | goto fail; | ||
998 | |||
999 | stt->liobn = args->liobn; | ||
1000 | stt->window_size = args->window_size; | ||
1001 | stt->kvm = kvm; | ||
1002 | |||
1003 | for (i = 0; i < npages; i++) { | ||
1004 | stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
1005 | if (!stt->pages[i]) | ||
1006 | goto fail; | ||
1007 | } | ||
1008 | |||
1009 | kvm_get_kvm(kvm); | ||
1010 | |||
1011 | mutex_lock(&kvm->lock); | ||
1012 | list_add(&stt->list, &kvm->arch.spapr_tce_tables); | ||
1013 | |||
1014 | mutex_unlock(&kvm->lock); | ||
1015 | |||
1016 | return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, | ||
1017 | stt, O_RDWR); | ||
1018 | |||
1019 | fail: | ||
1020 | if (stt) { | ||
1021 | for (i = 0; i < npages; i++) | ||
1022 | if (stt->pages[i]) | ||
1023 | __free_page(stt->pages[i]); | ||
1024 | |||
1025 | kfree(stt); | ||
1026 | } | ||
1027 | return ret; | ||
1028 | } | ||
1029 | 1096 | ||
1030 | /* Work out RMLS (real mode limit selector) field value for a given RMA size. | 1097 | /* Work out RMLS (real mode limit selector) field value for a given RMA size. |
1031 | Assumes POWER7 or PPC970. */ | 1098 | Assumes POWER7 or PPC970. */ |
@@ -1108,6 +1175,38 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) | |||
1108 | return fd; | 1175 | return fd; |
1109 | } | 1176 | } |
1110 | 1177 | ||
1178 | static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, | ||
1179 | int linux_psize) | ||
1180 | { | ||
1181 | struct mmu_psize_def *def = &mmu_psize_defs[linux_psize]; | ||
1182 | |||
1183 | if (!def->shift) | ||
1184 | return; | ||
1185 | (*sps)->page_shift = def->shift; | ||
1186 | (*sps)->slb_enc = def->sllp; | ||
1187 | (*sps)->enc[0].page_shift = def->shift; | ||
1188 | (*sps)->enc[0].pte_enc = def->penc; | ||
1189 | (*sps)++; | ||
1190 | } | ||
1191 | |||
1192 | int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) | ||
1193 | { | ||
1194 | struct kvm_ppc_one_seg_page_size *sps; | ||
1195 | |||
1196 | info->flags = KVM_PPC_PAGE_SIZES_REAL; | ||
1197 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) | ||
1198 | info->flags |= KVM_PPC_1T_SEGMENTS; | ||
1199 | info->slb_size = mmu_slb_size; | ||
1200 | |||
1201 | /* We only support these sizes for now, and no muti-size segments */ | ||
1202 | sps = &info->sps[0]; | ||
1203 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K); | ||
1204 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K); | ||
1205 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M); | ||
1206 | |||
1207 | return 0; | ||
1208 | } | ||
1209 | |||
1111 | /* | 1210 | /* |
1112 | * Get (and clear) the dirty memory log for a memory slot. | 1211 | * Get (and clear) the dirty memory log for a memory slot. |
1113 | */ | 1212 | */ |
@@ -1404,12 +1503,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
1404 | return EMULATE_FAIL; | 1503 | return EMULATE_FAIL; |
1405 | } | 1504 | } |
1406 | 1505 | ||
1407 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 1506 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
1408 | { | 1507 | { |
1409 | return EMULATE_FAIL; | 1508 | return EMULATE_FAIL; |
1410 | } | 1509 | } |
1411 | 1510 | ||
1412 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 1511 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
1413 | { | 1512 | { |
1414 | return EMULATE_FAIL; | 1513 | return EMULATE_FAIL; |
1415 | } | 1514 | } |
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index d3fb4df02c41..84035a528c80 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S | |||
@@ -68,19 +68,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
68 | rotldi r10,r10,16 | 68 | rotldi r10,r10,16 |
69 | mtmsrd r10,1 | 69 | mtmsrd r10,1 |
70 | 70 | ||
71 | /* Save host PMU registers and load guest PMU registers */ | 71 | /* Save host PMU registers */ |
72 | /* R4 is live here (vcpu pointer) but not r3 or r5 */ | 72 | /* R4 is live here (vcpu pointer) but not r3 or r5 */ |
73 | li r3, 1 | 73 | li r3, 1 |
74 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | 74 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ |
75 | mfspr r7, SPRN_MMCR0 /* save MMCR0 */ | 75 | mfspr r7, SPRN_MMCR0 /* save MMCR0 */ |
76 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */ | 76 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */ |
77 | mfspr r6, SPRN_MMCRA | ||
78 | BEGIN_FTR_SECTION | ||
79 | /* On P7, clear MMCRA in order to disable SDAR updates */ | ||
80 | li r5, 0 | ||
81 | mtspr SPRN_MMCRA, r5 | ||
82 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | ||
77 | isync | 83 | isync |
78 | ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ | 84 | ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ |
79 | lbz r5, LPPACA_PMCINUSE(r3) | 85 | lbz r5, LPPACA_PMCINUSE(r3) |
80 | cmpwi r5, 0 | 86 | cmpwi r5, 0 |
81 | beq 31f /* skip if not */ | 87 | beq 31f /* skip if not */ |
82 | mfspr r5, SPRN_MMCR1 | 88 | mfspr r5, SPRN_MMCR1 |
83 | mfspr r6, SPRN_MMCRA | ||
84 | std r7, HSTATE_MMCR(r13) | 89 | std r7, HSTATE_MMCR(r13) |
85 | std r5, HSTATE_MMCR + 8(r13) | 90 | std r5, HSTATE_MMCR + 8(r13) |
86 | std r6, HSTATE_MMCR + 16(r13) | 91 | std r6, HSTATE_MMCR + 16(r13) |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index b70bf22a3ff3..a84aafce2a12 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/hvcall.h> | 26 | #include <asm/hvcall.h> |
27 | #include <asm/asm-offsets.h> | 27 | #include <asm/asm-offsets.h> |
28 | #include <asm/exception-64s.h> | 28 | #include <asm/exception-64s.h> |
29 | #include <asm/kvm_book3s_asm.h> | ||
29 | 30 | ||
30 | /***************************************************************************** | 31 | /***************************************************************************** |
31 | * * | 32 | * * |
@@ -82,6 +83,7 @@ _GLOBAL(kvmppc_hv_entry_trampoline) | |||
82 | 83 | ||
83 | #define XICS_XIRR 4 | 84 | #define XICS_XIRR 4 |
84 | #define XICS_QIRR 0xc | 85 | #define XICS_QIRR 0xc |
86 | #define XICS_IPI 2 /* interrupt source # for IPIs */ | ||
85 | 87 | ||
86 | /* | 88 | /* |
87 | * We come in here when wakened from nap mode on a secondary hw thread. | 89 | * We come in here when wakened from nap mode on a secondary hw thread. |
@@ -94,26 +96,54 @@ kvm_start_guest: | |||
94 | subi r1,r1,STACK_FRAME_OVERHEAD | 96 | subi r1,r1,STACK_FRAME_OVERHEAD |
95 | ld r2,PACATOC(r13) | 97 | ld r2,PACATOC(r13) |
96 | 98 | ||
97 | /* were we napping due to cede? */ | 99 | li r0,KVM_HWTHREAD_IN_KVM |
98 | lbz r0,HSTATE_NAPPING(r13) | 100 | stb r0,HSTATE_HWTHREAD_STATE(r13) |
99 | cmpwi r0,0 | ||
100 | bne kvm_end_cede | ||
101 | 101 | ||
102 | /* get vcpu pointer */ | 102 | /* NV GPR values from power7_idle() will no longer be valid */ |
103 | ld r4, HSTATE_KVM_VCPU(r13) | 103 | li r0,1 |
104 | stb r0,PACA_NAPSTATELOST(r13) | ||
104 | 105 | ||
105 | /* We got here with an IPI; clear it */ | 106 | /* get vcpu pointer, NULL if we have no vcpu to run */ |
106 | ld r5, HSTATE_XICS_PHYS(r13) | 107 | ld r4,HSTATE_KVM_VCPU(r13) |
107 | li r0, 0xff | 108 | cmpdi cr1,r4,0 |
108 | li r6, XICS_QIRR | 109 | |
109 | li r7, XICS_XIRR | 110 | /* Check the wake reason in SRR1 to see why we got here */ |
110 | lwzcix r8, r5, r7 /* ack the interrupt */ | 111 | mfspr r3,SPRN_SRR1 |
112 | rlwinm r3,r3,44-31,0x7 /* extract wake reason field */ | ||
113 | cmpwi r3,4 /* was it an external interrupt? */ | ||
114 | bne 27f | ||
115 | |||
116 | /* | ||
117 | * External interrupt - for now assume it is an IPI, since we | ||
118 | * should never get any other interrupts sent to offline threads. | ||
119 | * Only do this for secondary threads. | ||
120 | */ | ||
121 | beq cr1,25f | ||
122 | lwz r3,VCPU_PTID(r4) | ||
123 | cmpwi r3,0 | ||
124 | beq 27f | ||
125 | 25: ld r5,HSTATE_XICS_PHYS(r13) | ||
126 | li r0,0xff | ||
127 | li r6,XICS_QIRR | ||
128 | li r7,XICS_XIRR | ||
129 | lwzcix r8,r5,r7 /* get and ack the interrupt */ | ||
111 | sync | 130 | sync |
112 | stbcix r0, r5, r6 /* clear it */ | 131 | clrldi. r9,r8,40 /* get interrupt source ID. */ |
113 | stwcix r8, r5, r7 /* EOI it */ | 132 | beq 27f /* none there? */ |
133 | cmpwi r9,XICS_IPI | ||
134 | bne 26f | ||
135 | stbcix r0,r5,r6 /* clear IPI */ | ||
136 | 26: stwcix r8,r5,r7 /* EOI the interrupt */ | ||
114 | 137 | ||
115 | /* NV GPR values from power7_idle() will no longer be valid */ | 138 | 27: /* XXX should handle hypervisor maintenance interrupts etc. here */ |
116 | stb r0, PACA_NAPSTATELOST(r13) | 139 | |
140 | /* if we have no vcpu to run, go back to sleep */ | ||
141 | beq cr1,kvm_no_guest | ||
142 | |||
143 | /* were we napping due to cede? */ | ||
144 | lbz r0,HSTATE_NAPPING(r13) | ||
145 | cmpwi r0,0 | ||
146 | bne kvm_end_cede | ||
117 | 147 | ||
118 | .global kvmppc_hv_entry | 148 | .global kvmppc_hv_entry |
119 | kvmppc_hv_entry: | 149 | kvmppc_hv_entry: |
@@ -129,24 +159,15 @@ kvmppc_hv_entry: | |||
129 | mflr r0 | 159 | mflr r0 |
130 | std r0, HSTATE_VMHANDLER(r13) | 160 | std r0, HSTATE_VMHANDLER(r13) |
131 | 161 | ||
132 | ld r14, VCPU_GPR(r14)(r4) | 162 | /* Set partition DABR */ |
133 | ld r15, VCPU_GPR(r15)(r4) | 163 | /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ |
134 | ld r16, VCPU_GPR(r16)(r4) | 164 | li r5,3 |
135 | ld r17, VCPU_GPR(r17)(r4) | 165 | ld r6,VCPU_DABR(r4) |
136 | ld r18, VCPU_GPR(r18)(r4) | 166 | mtspr SPRN_DABRX,r5 |
137 | ld r19, VCPU_GPR(r19)(r4) | 167 | mtspr SPRN_DABR,r6 |
138 | ld r20, VCPU_GPR(r20)(r4) | 168 | BEGIN_FTR_SECTION |
139 | ld r21, VCPU_GPR(r21)(r4) | 169 | isync |
140 | ld r22, VCPU_GPR(r22)(r4) | 170 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
141 | ld r23, VCPU_GPR(r23)(r4) | ||
142 | ld r24, VCPU_GPR(r24)(r4) | ||
143 | ld r25, VCPU_GPR(r25)(r4) | ||
144 | ld r26, VCPU_GPR(r26)(r4) | ||
145 | ld r27, VCPU_GPR(r27)(r4) | ||
146 | ld r28, VCPU_GPR(r28)(r4) | ||
147 | ld r29, VCPU_GPR(r29)(r4) | ||
148 | ld r30, VCPU_GPR(r30)(r4) | ||
149 | ld r31, VCPU_GPR(r31)(r4) | ||
150 | 171 | ||
151 | /* Load guest PMU registers */ | 172 | /* Load guest PMU registers */ |
152 | /* R4 is live here (vcpu pointer) */ | 173 | /* R4 is live here (vcpu pointer) */ |
@@ -185,6 +206,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
185 | /* Load up FP, VMX and VSX registers */ | 206 | /* Load up FP, VMX and VSX registers */ |
186 | bl kvmppc_load_fp | 207 | bl kvmppc_load_fp |
187 | 208 | ||
209 | ld r14, VCPU_GPR(r14)(r4) | ||
210 | ld r15, VCPU_GPR(r15)(r4) | ||
211 | ld r16, VCPU_GPR(r16)(r4) | ||
212 | ld r17, VCPU_GPR(r17)(r4) | ||
213 | ld r18, VCPU_GPR(r18)(r4) | ||
214 | ld r19, VCPU_GPR(r19)(r4) | ||
215 | ld r20, VCPU_GPR(r20)(r4) | ||
216 | ld r21, VCPU_GPR(r21)(r4) | ||
217 | ld r22, VCPU_GPR(r22)(r4) | ||
218 | ld r23, VCPU_GPR(r23)(r4) | ||
219 | ld r24, VCPU_GPR(r24)(r4) | ||
220 | ld r25, VCPU_GPR(r25)(r4) | ||
221 | ld r26, VCPU_GPR(r26)(r4) | ||
222 | ld r27, VCPU_GPR(r27)(r4) | ||
223 | ld r28, VCPU_GPR(r28)(r4) | ||
224 | ld r29, VCPU_GPR(r29)(r4) | ||
225 | ld r30, VCPU_GPR(r30)(r4) | ||
226 | ld r31, VCPU_GPR(r31)(r4) | ||
227 | |||
188 | BEGIN_FTR_SECTION | 228 | BEGIN_FTR_SECTION |
189 | /* Switch DSCR to guest value */ | 229 | /* Switch DSCR to guest value */ |
190 | ld r5, VCPU_DSCR(r4) | 230 | ld r5, VCPU_DSCR(r4) |
@@ -226,12 +266,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
226 | mtspr SPRN_DAR, r5 | 266 | mtspr SPRN_DAR, r5 |
227 | mtspr SPRN_DSISR, r6 | 267 | mtspr SPRN_DSISR, r6 |
228 | 268 | ||
229 | /* Set partition DABR */ | ||
230 | li r5,3 | ||
231 | ld r6,VCPU_DABR(r4) | ||
232 | mtspr SPRN_DABRX,r5 | ||
233 | mtspr SPRN_DABR,r6 | ||
234 | |||
235 | BEGIN_FTR_SECTION | 269 | BEGIN_FTR_SECTION |
236 | /* Restore AMR and UAMOR, set AMOR to all 1s */ | 270 | /* Restore AMR and UAMOR, set AMOR to all 1s */ |
237 | ld r5,VCPU_AMR(r4) | 271 | ld r5,VCPU_AMR(r4) |
@@ -925,12 +959,6 @@ BEGIN_FTR_SECTION | |||
925 | mtspr SPRN_AMR,r6 | 959 | mtspr SPRN_AMR,r6 |
926 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | 960 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
927 | 961 | ||
928 | /* Restore host DABR and DABRX */ | ||
929 | ld r5,HSTATE_DABR(r13) | ||
930 | li r6,7 | ||
931 | mtspr SPRN_DABR,r5 | ||
932 | mtspr SPRN_DABRX,r6 | ||
933 | |||
934 | /* Switch DSCR back to host value */ | 962 | /* Switch DSCR back to host value */ |
935 | BEGIN_FTR_SECTION | 963 | BEGIN_FTR_SECTION |
936 | mfspr r8, SPRN_DSCR | 964 | mfspr r8, SPRN_DSCR |
@@ -969,6 +997,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
969 | std r5, VCPU_SPRG2(r9) | 997 | std r5, VCPU_SPRG2(r9) |
970 | std r6, VCPU_SPRG3(r9) | 998 | std r6, VCPU_SPRG3(r9) |
971 | 999 | ||
1000 | /* save FP state */ | ||
1001 | mr r3, r9 | ||
1002 | bl .kvmppc_save_fp | ||
1003 | |||
972 | /* Increment yield count if they have a VPA */ | 1004 | /* Increment yield count if they have a VPA */ |
973 | ld r8, VCPU_VPA(r9) /* do they have a VPA? */ | 1005 | ld r8, VCPU_VPA(r9) /* do they have a VPA? */ |
974 | cmpdi r8, 0 | 1006 | cmpdi r8, 0 |
@@ -983,6 +1015,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
983 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | 1015 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ |
984 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ | 1016 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ |
985 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | 1017 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ |
1018 | mfspr r6, SPRN_MMCRA | ||
1019 | BEGIN_FTR_SECTION | ||
1020 | /* On P7, clear MMCRA in order to disable SDAR updates */ | ||
1021 | li r7, 0 | ||
1022 | mtspr SPRN_MMCRA, r7 | ||
1023 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | ||
986 | isync | 1024 | isync |
987 | beq 21f /* if no VPA, save PMU stuff anyway */ | 1025 | beq 21f /* if no VPA, save PMU stuff anyway */ |
988 | lbz r7, LPPACA_PMCINUSE(r8) | 1026 | lbz r7, LPPACA_PMCINUSE(r8) |
@@ -991,7 +1029,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
991 | std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ | 1029 | std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ |
992 | b 22f | 1030 | b 22f |
993 | 21: mfspr r5, SPRN_MMCR1 | 1031 | 21: mfspr r5, SPRN_MMCR1 |
994 | mfspr r6, SPRN_MMCRA | ||
995 | std r4, VCPU_MMCR(r9) | 1032 | std r4, VCPU_MMCR(r9) |
996 | std r5, VCPU_MMCR + 8(r9) | 1033 | std r5, VCPU_MMCR + 8(r9) |
997 | std r6, VCPU_MMCR + 16(r9) | 1034 | std r6, VCPU_MMCR + 16(r9) |
@@ -1016,17 +1053,20 @@ BEGIN_FTR_SECTION | |||
1016 | stw r11, VCPU_PMC + 28(r9) | 1053 | stw r11, VCPU_PMC + 28(r9) |
1017 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | 1054 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) |
1018 | 22: | 1055 | 22: |
1019 | /* save FP state */ | ||
1020 | mr r3, r9 | ||
1021 | bl .kvmppc_save_fp | ||
1022 | 1056 | ||
1023 | /* Secondary threads go off to take a nap on POWER7 */ | 1057 | /* Secondary threads go off to take a nap on POWER7 */ |
1024 | BEGIN_FTR_SECTION | 1058 | BEGIN_FTR_SECTION |
1025 | lwz r0,VCPU_PTID(r3) | 1059 | lwz r0,VCPU_PTID(r9) |
1026 | cmpwi r0,0 | 1060 | cmpwi r0,0 |
1027 | bne secondary_nap | 1061 | bne secondary_nap |
1028 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | 1062 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
1029 | 1063 | ||
1064 | /* Restore host DABR and DABRX */ | ||
1065 | ld r5,HSTATE_DABR(r13) | ||
1066 | li r6,7 | ||
1067 | mtspr SPRN_DABR,r5 | ||
1068 | mtspr SPRN_DABRX,r6 | ||
1069 | |||
1030 | /* | 1070 | /* |
1031 | * Reload DEC. HDEC interrupts were disabled when | 1071 | * Reload DEC. HDEC interrupts were disabled when |
1032 | * we reloaded the host's LPCR value. | 1072 | * we reloaded the host's LPCR value. |
@@ -1363,7 +1403,12 @@ bounce_ext_interrupt: | |||
1363 | 1403 | ||
1364 | _GLOBAL(kvmppc_h_set_dabr) | 1404 | _GLOBAL(kvmppc_h_set_dabr) |
1365 | std r4,VCPU_DABR(r3) | 1405 | std r4,VCPU_DABR(r3) |
1366 | mtspr SPRN_DABR,r4 | 1406 | /* Work around P7 bug where DABR can get corrupted on mtspr */ |
1407 | 1: mtspr SPRN_DABR,r4 | ||
1408 | mfspr r5, SPRN_DABR | ||
1409 | cmpd r4, r5 | ||
1410 | bne 1b | ||
1411 | isync | ||
1367 | li r3,0 | 1412 | li r3,0 |
1368 | blr | 1413 | blr |
1369 | 1414 | ||
@@ -1445,8 +1490,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
1445 | * Take a nap until a decrementer or external interrupt occurs, | 1490 | * Take a nap until a decrementer or external interrupt occurs, |
1446 | * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR | 1491 | * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR |
1447 | */ | 1492 | */ |
1448 | li r0,0x80 | 1493 | li r0,1 |
1449 | stb r0,PACAPROCSTART(r13) | 1494 | stb r0,HSTATE_HWTHREAD_REQ(r13) |
1450 | mfspr r5,SPRN_LPCR | 1495 | mfspr r5,SPRN_LPCR |
1451 | ori r5,r5,LPCR_PECE0 | LPCR_PECE1 | 1496 | ori r5,r5,LPCR_PECE0 | LPCR_PECE1 |
1452 | mtspr SPRN_LPCR,r5 | 1497 | mtspr SPRN_LPCR,r5 |
@@ -1463,26 +1508,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
1463 | kvm_end_cede: | 1508 | kvm_end_cede: |
1464 | /* Woken by external or decrementer interrupt */ | 1509 | /* Woken by external or decrementer interrupt */ |
1465 | ld r1, HSTATE_HOST_R1(r13) | 1510 | ld r1, HSTATE_HOST_R1(r13) |
1466 | ld r2, PACATOC(r13) | ||
1467 | 1511 | ||
1468 | /* If we're a secondary thread and we got here by an IPI, ack it */ | ||
1469 | ld r4,HSTATE_KVM_VCPU(r13) | ||
1470 | lwz r3,VCPU_PTID(r4) | ||
1471 | cmpwi r3,0 | ||
1472 | beq 27f | ||
1473 | mfspr r3,SPRN_SRR1 | ||
1474 | rlwinm r3,r3,44-31,0x7 /* extract wake reason field */ | ||
1475 | cmpwi r3,4 /* was it an external interrupt? */ | ||
1476 | bne 27f | ||
1477 | ld r5, HSTATE_XICS_PHYS(r13) | ||
1478 | li r0,0xff | ||
1479 | li r6,XICS_QIRR | ||
1480 | li r7,XICS_XIRR | ||
1481 | lwzcix r8,r5,r7 /* ack the interrupt */ | ||
1482 | sync | ||
1483 | stbcix r0,r5,r6 /* clear it */ | ||
1484 | stwcix r8,r5,r7 /* EOI it */ | ||
1485 | 27: | ||
1486 | /* load up FP state */ | 1512 | /* load up FP state */ |
1487 | bl kvmppc_load_fp | 1513 | bl kvmppc_load_fp |
1488 | 1514 | ||
@@ -1580,12 +1606,17 @@ secondary_nap: | |||
1580 | stwcx. r3, 0, r4 | 1606 | stwcx. r3, 0, r4 |
1581 | bne 51b | 1607 | bne 51b |
1582 | 1608 | ||
1609 | kvm_no_guest: | ||
1610 | li r0, KVM_HWTHREAD_IN_NAP | ||
1611 | stb r0, HSTATE_HWTHREAD_STATE(r13) | ||
1612 | li r0, 0 | ||
1613 | std r0, HSTATE_KVM_VCPU(r13) | ||
1614 | |||
1583 | li r3, LPCR_PECE0 | 1615 | li r3, LPCR_PECE0 |
1584 | mfspr r4, SPRN_LPCR | 1616 | mfspr r4, SPRN_LPCR |
1585 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 | 1617 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 |
1586 | mtspr SPRN_LPCR, r4 | 1618 | mtspr SPRN_LPCR, r4 |
1587 | isync | 1619 | isync |
1588 | li r0, 0 | ||
1589 | std r0, HSTATE_SCRATCH0(r13) | 1620 | std r0, HSTATE_SCRATCH0(r13) |
1590 | ptesync | 1621 | ptesync |
1591 | ld r0, HSTATE_SCRATCH0(r13) | 1622 | ld r0, HSTATE_SCRATCH0(r13) |
@@ -1599,8 +1630,8 @@ secondary_nap: | |||
1599 | * r3 = vcpu pointer | 1630 | * r3 = vcpu pointer |
1600 | */ | 1631 | */ |
1601 | _GLOBAL(kvmppc_save_fp) | 1632 | _GLOBAL(kvmppc_save_fp) |
1602 | mfmsr r9 | 1633 | mfmsr r5 |
1603 | ori r8,r9,MSR_FP | 1634 | ori r8,r5,MSR_FP |
1604 | #ifdef CONFIG_ALTIVEC | 1635 | #ifdef CONFIG_ALTIVEC |
1605 | BEGIN_FTR_SECTION | 1636 | BEGIN_FTR_SECTION |
1606 | oris r8,r8,MSR_VEC@h | 1637 | oris r8,r8,MSR_VEC@h |
@@ -1649,7 +1680,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
1649 | #endif | 1680 | #endif |
1650 | mfspr r6,SPRN_VRSAVE | 1681 | mfspr r6,SPRN_VRSAVE |
1651 | stw r6,VCPU_VRSAVE(r3) | 1682 | stw r6,VCPU_VRSAVE(r3) |
1652 | mtmsrd r9 | 1683 | mtmsrd r5 |
1653 | isync | 1684 | isync |
1654 | blr | 1685 | blr |
1655 | 1686 | ||
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 7759053d391b..a1baec340f7e 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -120,6 +120,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |||
120 | if (msr & MSR_POW) { | 120 | if (msr & MSR_POW) { |
121 | if (!vcpu->arch.pending_exceptions) { | 121 | if (!vcpu->arch.pending_exceptions) { |
122 | kvm_vcpu_block(vcpu); | 122 | kvm_vcpu_block(vcpu); |
123 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | ||
123 | vcpu->stat.halt_wakeup++; | 124 | vcpu->stat.halt_wakeup++; |
124 | 125 | ||
125 | /* Unset POW bit after we woke up */ | 126 | /* Unset POW bit after we woke up */ |
@@ -144,6 +145,21 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |||
144 | } | 145 | } |
145 | } | 146 | } |
146 | 147 | ||
148 | /* | ||
149 | * When switching from 32 to 64-bit, we may have a stale 32-bit | ||
150 | * magic page around, we need to flush it. Typically 32-bit magic | ||
151 | * page will be instanciated when calling into RTAS. Note: We | ||
152 | * assume that such transition only happens while in kernel mode, | ||
153 | * ie, we never transition from user 32-bit to kernel 64-bit with | ||
154 | * a 32-bit magic page around. | ||
155 | */ | ||
156 | if (vcpu->arch.magic_page_pa && | ||
157 | !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) { | ||
158 | /* going from RTAS to normal kernel code */ | ||
159 | kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, | ||
160 | ~0xFFFUL); | ||
161 | } | ||
162 | |||
147 | /* Preload FPU if it's enabled */ | 163 | /* Preload FPU if it's enabled */ |
148 | if (vcpu->arch.shared->msr & MSR_FP) | 164 | if (vcpu->arch.shared->msr & MSR_FP) |
149 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | 165 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
@@ -251,6 +267,9 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
251 | { | 267 | { |
252 | ulong mp_pa = vcpu->arch.magic_page_pa; | 268 | ulong mp_pa = vcpu->arch.magic_page_pa; |
253 | 269 | ||
270 | if (!(vcpu->arch.shared->msr & MSR_SF)) | ||
271 | mp_pa = (uint32_t)mp_pa; | ||
272 | |||
254 | if (unlikely(mp_pa) && | 273 | if (unlikely(mp_pa) && |
255 | unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { | 274 | unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { |
256 | return 1; | 275 | return 1; |
@@ -351,6 +370,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
351 | /* MMIO */ | 370 | /* MMIO */ |
352 | vcpu->stat.mmio_exits++; | 371 | vcpu->stat.mmio_exits++; |
353 | vcpu->arch.paddr_accessed = pte.raddr; | 372 | vcpu->arch.paddr_accessed = pte.raddr; |
373 | vcpu->arch.vaddr_accessed = pte.eaddr; | ||
354 | r = kvmppc_emulate_mmio(run, vcpu); | 374 | r = kvmppc_emulate_mmio(run, vcpu); |
355 | if ( r == RESUME_HOST_NV ) | 375 | if ( r == RESUME_HOST_NV ) |
356 | r = RESUME_HOST; | 376 | r = RESUME_HOST; |
@@ -528,6 +548,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
528 | run->exit_reason = KVM_EXIT_UNKNOWN; | 548 | run->exit_reason = KVM_EXIT_UNKNOWN; |
529 | run->ready_for_interrupt_injection = 1; | 549 | run->ready_for_interrupt_injection = 1; |
530 | 550 | ||
551 | /* We get here with MSR.EE=0, so enable it to be a nice citizen */ | ||
552 | __hard_irq_enable(); | ||
553 | |||
531 | trace_kvm_book3s_exit(exit_nr, vcpu); | 554 | trace_kvm_book3s_exit(exit_nr, vcpu); |
532 | preempt_enable(); | 555 | preempt_enable(); |
533 | kvm_resched(vcpu); | 556 | kvm_resched(vcpu); |
@@ -617,10 +640,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
617 | break; | 640 | break; |
618 | /* We're good on these - the host merely wanted to get our attention */ | 641 | /* We're good on these - the host merely wanted to get our attention */ |
619 | case BOOK3S_INTERRUPT_DECREMENTER: | 642 | case BOOK3S_INTERRUPT_DECREMENTER: |
643 | case BOOK3S_INTERRUPT_HV_DECREMENTER: | ||
620 | vcpu->stat.dec_exits++; | 644 | vcpu->stat.dec_exits++; |
621 | r = RESUME_GUEST; | 645 | r = RESUME_GUEST; |
622 | break; | 646 | break; |
623 | case BOOK3S_INTERRUPT_EXTERNAL: | 647 | case BOOK3S_INTERRUPT_EXTERNAL: |
648 | case BOOK3S_INTERRUPT_EXTERNAL_LEVEL: | ||
649 | case BOOK3S_INTERRUPT_EXTERNAL_HV: | ||
624 | vcpu->stat.ext_intr_exits++; | 650 | vcpu->stat.ext_intr_exits++; |
625 | r = RESUME_GUEST; | 651 | r = RESUME_GUEST; |
626 | break; | 652 | break; |
@@ -628,6 +654,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
628 | r = RESUME_GUEST; | 654 | r = RESUME_GUEST; |
629 | break; | 655 | break; |
630 | case BOOK3S_INTERRUPT_PROGRAM: | 656 | case BOOK3S_INTERRUPT_PROGRAM: |
657 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: | ||
631 | { | 658 | { |
632 | enum emulation_result er; | 659 | enum emulation_result er; |
633 | struct kvmppc_book3s_shadow_vcpu *svcpu; | 660 | struct kvmppc_book3s_shadow_vcpu *svcpu; |
@@ -1131,6 +1158,31 @@ out: | |||
1131 | return r; | 1158 | return r; |
1132 | } | 1159 | } |
1133 | 1160 | ||
1161 | #ifdef CONFIG_PPC64 | ||
1162 | int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) | ||
1163 | { | ||
1164 | /* No flags */ | ||
1165 | info->flags = 0; | ||
1166 | |||
1167 | /* SLB is always 64 entries */ | ||
1168 | info->slb_size = 64; | ||
1169 | |||
1170 | /* Standard 4k base page size segment */ | ||
1171 | info->sps[0].page_shift = 12; | ||
1172 | info->sps[0].slb_enc = 0; | ||
1173 | info->sps[0].enc[0].page_shift = 12; | ||
1174 | info->sps[0].enc[0].pte_enc = 0; | ||
1175 | |||
1176 | /* Standard 16M large page size segment */ | ||
1177 | info->sps[1].page_shift = 24; | ||
1178 | info->sps[1].slb_enc = SLB_VSID_L; | ||
1179 | info->sps[1].enc[0].page_shift = 24; | ||
1180 | info->sps[1].enc[0].pte_enc = 0; | ||
1181 | |||
1182 | return 0; | ||
1183 | } | ||
1184 | #endif /* CONFIG_PPC64 */ | ||
1185 | |||
1134 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, | 1186 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
1135 | struct kvm_userspace_memory_region *mem) | 1187 | struct kvm_userspace_memory_region *mem) |
1136 | { | 1188 | { |
@@ -1144,11 +1196,18 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, | |||
1144 | 1196 | ||
1145 | int kvmppc_core_init_vm(struct kvm *kvm) | 1197 | int kvmppc_core_init_vm(struct kvm *kvm) |
1146 | { | 1198 | { |
1199 | #ifdef CONFIG_PPC64 | ||
1200 | INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); | ||
1201 | #endif | ||
1202 | |||
1147 | return 0; | 1203 | return 0; |
1148 | } | 1204 | } |
1149 | 1205 | ||
1150 | void kvmppc_core_destroy_vm(struct kvm *kvm) | 1206 | void kvmppc_core_destroy_vm(struct kvm *kvm) |
1151 | { | 1207 | { |
1208 | #ifdef CONFIG_PPC64 | ||
1209 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); | ||
1210 | #endif | ||
1152 | } | 1211 | } |
1153 | 1212 | ||
1154 | static int kvmppc_book3s_init(void) | 1213 | static int kvmppc_book3s_init(void) |
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index b9589324797b..3ff9013d6e79 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c | |||
@@ -15,6 +15,8 @@ | |||
15 | * published by the Free Software Foundation. | 15 | * published by the Free Software Foundation. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/anon_inodes.h> | ||
19 | |||
18 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
19 | #include <asm/kvm_ppc.h> | 21 | #include <asm/kvm_ppc.h> |
20 | #include <asm/kvm_book3s.h> | 22 | #include <asm/kvm_book3s.h> |
@@ -98,6 +100,83 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) | |||
98 | return EMULATE_DONE; | 100 | return EMULATE_DONE; |
99 | } | 101 | } |
100 | 102 | ||
103 | /* Request defs for kvmppc_h_pr_bulk_remove() */ | ||
104 | #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL | ||
105 | #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL | ||
106 | #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL | ||
107 | #define H_BULK_REMOVE_END 0xc000000000000000ULL | ||
108 | #define H_BULK_REMOVE_CODE 0x3000000000000000ULL | ||
109 | #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL | ||
110 | #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL | ||
111 | #define H_BULK_REMOVE_PARM 0x2000000000000000ULL | ||
112 | #define H_BULK_REMOVE_HW 0x3000000000000000ULL | ||
113 | #define H_BULK_REMOVE_RC 0x0c00000000000000ULL | ||
114 | #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL | ||
115 | #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL | ||
116 | #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL | ||
117 | #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL | ||
118 | #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL | ||
119 | #define H_BULK_REMOVE_MAX_BATCH 4 | ||
120 | |||
121 | static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) | ||
122 | { | ||
123 | int i; | ||
124 | int paramnr = 4; | ||
125 | int ret = H_SUCCESS; | ||
126 | |||
127 | for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { | ||
128 | unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i)); | ||
129 | unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1); | ||
130 | unsigned long pteg, rb, flags; | ||
131 | unsigned long pte[2]; | ||
132 | unsigned long v = 0; | ||
133 | |||
134 | if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) { | ||
135 | break; /* Exit success */ | ||
136 | } else if ((tsh & H_BULK_REMOVE_TYPE) != | ||
137 | H_BULK_REMOVE_REQUEST) { | ||
138 | ret = H_PARAMETER; | ||
139 | break; /* Exit fail */ | ||
140 | } | ||
141 | |||
142 | tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS; | ||
143 | tsh |= H_BULK_REMOVE_RESPONSE; | ||
144 | |||
145 | if ((tsh & H_BULK_REMOVE_ANDCOND) && | ||
146 | (tsh & H_BULK_REMOVE_AVPN)) { | ||
147 | tsh |= H_BULK_REMOVE_PARM; | ||
148 | kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); | ||
149 | ret = H_PARAMETER; | ||
150 | break; /* Exit fail */ | ||
151 | } | ||
152 | |||
153 | pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); | ||
154 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | ||
155 | |||
156 | /* tsl = AVPN */ | ||
157 | flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26; | ||
158 | |||
159 | if ((pte[0] & HPTE_V_VALID) == 0 || | ||
160 | ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) || | ||
161 | ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) { | ||
162 | tsh |= H_BULK_REMOVE_NOT_FOUND; | ||
163 | } else { | ||
164 | /* Splat the pteg in (userland) hpt */ | ||
165 | copy_to_user((void __user *)pteg, &v, sizeof(v)); | ||
166 | |||
167 | rb = compute_tlbie_rb(pte[0], pte[1], | ||
168 | tsh & H_BULK_REMOVE_PTEX); | ||
169 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); | ||
170 | tsh |= H_BULK_REMOVE_SUCCESS; | ||
171 | tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43; | ||
172 | } | ||
173 | kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); | ||
174 | } | ||
175 | kvmppc_set_gpr(vcpu, 3, ret); | ||
176 | |||
177 | return EMULATE_DONE; | ||
178 | } | ||
179 | |||
101 | static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | 180 | static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) |
102 | { | 181 | { |
103 | unsigned long flags = kvmppc_get_gpr(vcpu, 4); | 182 | unsigned long flags = kvmppc_get_gpr(vcpu, 4); |
@@ -134,6 +213,20 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | |||
134 | return EMULATE_DONE; | 213 | return EMULATE_DONE; |
135 | } | 214 | } |
136 | 215 | ||
216 | static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) | ||
217 | { | ||
218 | unsigned long liobn = kvmppc_get_gpr(vcpu, 4); | ||
219 | unsigned long ioba = kvmppc_get_gpr(vcpu, 5); | ||
220 | unsigned long tce = kvmppc_get_gpr(vcpu, 6); | ||
221 | long rc; | ||
222 | |||
223 | rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce); | ||
224 | if (rc == H_TOO_HARD) | ||
225 | return EMULATE_FAIL; | ||
226 | kvmppc_set_gpr(vcpu, 3, rc); | ||
227 | return EMULATE_DONE; | ||
228 | } | ||
229 | |||
137 | int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) | 230 | int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) |
138 | { | 231 | { |
139 | switch (cmd) { | 232 | switch (cmd) { |
@@ -144,12 +237,12 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) | |||
144 | case H_PROTECT: | 237 | case H_PROTECT: |
145 | return kvmppc_h_pr_protect(vcpu); | 238 | return kvmppc_h_pr_protect(vcpu); |
146 | case H_BULK_REMOVE: | 239 | case H_BULK_REMOVE: |
147 | /* We just flush all PTEs, so user space can | 240 | return kvmppc_h_pr_bulk_remove(vcpu); |
148 | handle the HPT modifications */ | 241 | case H_PUT_TCE: |
149 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 242 | return kvmppc_h_pr_put_tce(vcpu); |
150 | break; | ||
151 | case H_CEDE: | 243 | case H_CEDE: |
152 | kvm_vcpu_block(vcpu); | 244 | kvm_vcpu_block(vcpu); |
245 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | ||
153 | vcpu->stat.halt_wakeup++; | 246 | vcpu->stat.halt_wakeup++; |
154 | return EMULATE_DONE; | 247 | return EMULATE_DONE; |
155 | } | 248 | } |
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 6e6e9cef34a8..798491a268b3 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S | |||
@@ -128,24 +128,25 @@ no_dcbz32_on: | |||
128 | /* First clear RI in our current MSR value */ | 128 | /* First clear RI in our current MSR value */ |
129 | li r0, MSR_RI | 129 | li r0, MSR_RI |
130 | andc r6, r6, r0 | 130 | andc r6, r6, r0 |
131 | MTMSR_EERI(r6) | ||
132 | mtsrr0 r9 | ||
133 | mtsrr1 r4 | ||
134 | 131 | ||
135 | PPC_LL r0, SVCPU_R0(r3) | 132 | PPC_LL r0, SVCPU_R0(r3) |
136 | PPC_LL r1, SVCPU_R1(r3) | 133 | PPC_LL r1, SVCPU_R1(r3) |
137 | PPC_LL r2, SVCPU_R2(r3) | 134 | PPC_LL r2, SVCPU_R2(r3) |
138 | PPC_LL r4, SVCPU_R4(r3) | ||
139 | PPC_LL r5, SVCPU_R5(r3) | 135 | PPC_LL r5, SVCPU_R5(r3) |
140 | PPC_LL r6, SVCPU_R6(r3) | ||
141 | PPC_LL r7, SVCPU_R7(r3) | 136 | PPC_LL r7, SVCPU_R7(r3) |
142 | PPC_LL r8, SVCPU_R8(r3) | 137 | PPC_LL r8, SVCPU_R8(r3) |
143 | PPC_LL r9, SVCPU_R9(r3) | ||
144 | PPC_LL r10, SVCPU_R10(r3) | 138 | PPC_LL r10, SVCPU_R10(r3) |
145 | PPC_LL r11, SVCPU_R11(r3) | 139 | PPC_LL r11, SVCPU_R11(r3) |
146 | PPC_LL r12, SVCPU_R12(r3) | 140 | PPC_LL r12, SVCPU_R12(r3) |
147 | PPC_LL r13, SVCPU_R13(r3) | 141 | PPC_LL r13, SVCPU_R13(r3) |
148 | 142 | ||
143 | MTMSR_EERI(r6) | ||
144 | mtsrr0 r9 | ||
145 | mtsrr1 r4 | ||
146 | |||
147 | PPC_LL r4, SVCPU_R4(r3) | ||
148 | PPC_LL r6, SVCPU_R6(r3) | ||
149 | PPC_LL r9, SVCPU_R9(r3) | ||
149 | PPC_LL r3, (SVCPU_R3)(r3) | 150 | PPC_LL r3, (SVCPU_R3)(r3) |
150 | 151 | ||
151 | RFI | 152 | RFI |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index ee9e1ee9c858..72f13f4a06e0 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -17,6 +17,8 @@ | |||
17 | * | 17 | * |
18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | 18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
19 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | 19 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> |
20 | * Scott Wood <scottwood@freescale.com> | ||
21 | * Varun Sethi <varun.sethi@freescale.com> | ||
20 | */ | 22 | */ |
21 | 23 | ||
22 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
@@ -30,9 +32,12 @@ | |||
30 | #include <asm/cputable.h> | 32 | #include <asm/cputable.h> |
31 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
32 | #include <asm/kvm_ppc.h> | 34 | #include <asm/kvm_ppc.h> |
33 | #include "timing.h" | ||
34 | #include <asm/cacheflush.h> | 35 | #include <asm/cacheflush.h> |
36 | #include <asm/dbell.h> | ||
37 | #include <asm/hw_irq.h> | ||
38 | #include <asm/irq.h> | ||
35 | 39 | ||
40 | #include "timing.h" | ||
36 | #include "booke.h" | 41 | #include "booke.h" |
37 | 42 | ||
38 | unsigned long kvmppc_booke_handlers; | 43 | unsigned long kvmppc_booke_handlers; |
@@ -55,6 +60,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
55 | { "dec", VCPU_STAT(dec_exits) }, | 60 | { "dec", VCPU_STAT(dec_exits) }, |
56 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, | 61 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, |
57 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 62 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
63 | { "doorbell", VCPU_STAT(dbell_exits) }, | ||
64 | { "guest doorbell", VCPU_STAT(gdbell_exits) }, | ||
58 | { NULL } | 65 | { NULL } |
59 | }; | 66 | }; |
60 | 67 | ||
@@ -121,6 +128,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) | |||
121 | { | 128 | { |
122 | u32 old_msr = vcpu->arch.shared->msr; | 129 | u32 old_msr = vcpu->arch.shared->msr; |
123 | 130 | ||
131 | #ifdef CONFIG_KVM_BOOKE_HV | ||
132 | new_msr |= MSR_GS; | ||
133 | #endif | ||
134 | |||
124 | vcpu->arch.shared->msr = new_msr; | 135 | vcpu->arch.shared->msr = new_msr; |
125 | 136 | ||
126 | kvmppc_mmu_msr_notify(vcpu, old_msr); | 137 | kvmppc_mmu_msr_notify(vcpu, old_msr); |
@@ -195,17 +206,87 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, | |||
195 | clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); | 206 | clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); |
196 | } | 207 | } |
197 | 208 | ||
209 | static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | ||
210 | { | ||
211 | #ifdef CONFIG_KVM_BOOKE_HV | ||
212 | mtspr(SPRN_GSRR0, srr0); | ||
213 | mtspr(SPRN_GSRR1, srr1); | ||
214 | #else | ||
215 | vcpu->arch.shared->srr0 = srr0; | ||
216 | vcpu->arch.shared->srr1 = srr1; | ||
217 | #endif | ||
218 | } | ||
219 | |||
220 | static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | ||
221 | { | ||
222 | vcpu->arch.csrr0 = srr0; | ||
223 | vcpu->arch.csrr1 = srr1; | ||
224 | } | ||
225 | |||
226 | static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | ||
227 | { | ||
228 | if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) { | ||
229 | vcpu->arch.dsrr0 = srr0; | ||
230 | vcpu->arch.dsrr1 = srr1; | ||
231 | } else { | ||
232 | set_guest_csrr(vcpu, srr0, srr1); | ||
233 | } | ||
234 | } | ||
235 | |||
236 | static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | ||
237 | { | ||
238 | vcpu->arch.mcsrr0 = srr0; | ||
239 | vcpu->arch.mcsrr1 = srr1; | ||
240 | } | ||
241 | |||
242 | static unsigned long get_guest_dear(struct kvm_vcpu *vcpu) | ||
243 | { | ||
244 | #ifdef CONFIG_KVM_BOOKE_HV | ||
245 | return mfspr(SPRN_GDEAR); | ||
246 | #else | ||
247 | return vcpu->arch.shared->dar; | ||
248 | #endif | ||
249 | } | ||
250 | |||
251 | static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear) | ||
252 | { | ||
253 | #ifdef CONFIG_KVM_BOOKE_HV | ||
254 | mtspr(SPRN_GDEAR, dear); | ||
255 | #else | ||
256 | vcpu->arch.shared->dar = dear; | ||
257 | #endif | ||
258 | } | ||
259 | |||
260 | static unsigned long get_guest_esr(struct kvm_vcpu *vcpu) | ||
261 | { | ||
262 | #ifdef CONFIG_KVM_BOOKE_HV | ||
263 | return mfspr(SPRN_GESR); | ||
264 | #else | ||
265 | return vcpu->arch.shared->esr; | ||
266 | #endif | ||
267 | } | ||
268 | |||
269 | static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr) | ||
270 | { | ||
271 | #ifdef CONFIG_KVM_BOOKE_HV | ||
272 | mtspr(SPRN_GESR, esr); | ||
273 | #else | ||
274 | vcpu->arch.shared->esr = esr; | ||
275 | #endif | ||
276 | } | ||
277 | |||
198 | /* Deliver the interrupt of the corresponding priority, if possible. */ | 278 | /* Deliver the interrupt of the corresponding priority, if possible. */ |
199 | static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | 279 | static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, |
200 | unsigned int priority) | 280 | unsigned int priority) |
201 | { | 281 | { |
202 | int allowed = 0; | 282 | int allowed = 0; |
203 | ulong uninitialized_var(msr_mask); | 283 | ulong msr_mask = 0; |
204 | bool update_esr = false, update_dear = false; | 284 | bool update_esr = false, update_dear = false; |
205 | ulong crit_raw = vcpu->arch.shared->critical; | 285 | ulong crit_raw = vcpu->arch.shared->critical; |
206 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); | 286 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); |
207 | bool crit; | 287 | bool crit; |
208 | bool keep_irq = false; | 288 | bool keep_irq = false; |
289 | enum int_class int_class; | ||
209 | 290 | ||
210 | /* Truncate crit indicators in 32 bit mode */ | 291 | /* Truncate crit indicators in 32 bit mode */ |
211 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | 292 | if (!(vcpu->arch.shared->msr & MSR_SF)) { |
@@ -241,46 +322,85 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
241 | case BOOKE_IRQPRIO_AP_UNAVAIL: | 322 | case BOOKE_IRQPRIO_AP_UNAVAIL: |
242 | case BOOKE_IRQPRIO_ALIGNMENT: | 323 | case BOOKE_IRQPRIO_ALIGNMENT: |
243 | allowed = 1; | 324 | allowed = 1; |
244 | msr_mask = MSR_CE|MSR_ME|MSR_DE; | 325 | msr_mask = MSR_CE | MSR_ME | MSR_DE; |
326 | int_class = INT_CLASS_NONCRIT; | ||
245 | break; | 327 | break; |
246 | case BOOKE_IRQPRIO_CRITICAL: | 328 | case BOOKE_IRQPRIO_CRITICAL: |
247 | case BOOKE_IRQPRIO_WATCHDOG: | 329 | case BOOKE_IRQPRIO_DBELL_CRIT: |
248 | allowed = vcpu->arch.shared->msr & MSR_CE; | 330 | allowed = vcpu->arch.shared->msr & MSR_CE; |
331 | allowed = allowed && !crit; | ||
249 | msr_mask = MSR_ME; | 332 | msr_mask = MSR_ME; |
333 | int_class = INT_CLASS_CRIT; | ||
250 | break; | 334 | break; |
251 | case BOOKE_IRQPRIO_MACHINE_CHECK: | 335 | case BOOKE_IRQPRIO_MACHINE_CHECK: |
252 | allowed = vcpu->arch.shared->msr & MSR_ME; | 336 | allowed = vcpu->arch.shared->msr & MSR_ME; |
253 | msr_mask = 0; | 337 | allowed = allowed && !crit; |
338 | int_class = INT_CLASS_MC; | ||
254 | break; | 339 | break; |
255 | case BOOKE_IRQPRIO_DECREMENTER: | 340 | case BOOKE_IRQPRIO_DECREMENTER: |
256 | case BOOKE_IRQPRIO_FIT: | 341 | case BOOKE_IRQPRIO_FIT: |
257 | keep_irq = true; | 342 | keep_irq = true; |
258 | /* fall through */ | 343 | /* fall through */ |
259 | case BOOKE_IRQPRIO_EXTERNAL: | 344 | case BOOKE_IRQPRIO_EXTERNAL: |
345 | case BOOKE_IRQPRIO_DBELL: | ||
260 | allowed = vcpu->arch.shared->msr & MSR_EE; | 346 | allowed = vcpu->arch.shared->msr & MSR_EE; |
261 | allowed = allowed && !crit; | 347 | allowed = allowed && !crit; |
262 | msr_mask = MSR_CE|MSR_ME|MSR_DE; | 348 | msr_mask = MSR_CE | MSR_ME | MSR_DE; |
349 | int_class = INT_CLASS_NONCRIT; | ||
263 | break; | 350 | break; |
264 | case BOOKE_IRQPRIO_DEBUG: | 351 | case BOOKE_IRQPRIO_DEBUG: |
265 | allowed = vcpu->arch.shared->msr & MSR_DE; | 352 | allowed = vcpu->arch.shared->msr & MSR_DE; |
353 | allowed = allowed && !crit; | ||
266 | msr_mask = MSR_ME; | 354 | msr_mask = MSR_ME; |
355 | int_class = INT_CLASS_CRIT; | ||
267 | break; | 356 | break; |
268 | } | 357 | } |
269 | 358 | ||
270 | if (allowed) { | 359 | if (allowed) { |
271 | vcpu->arch.shared->srr0 = vcpu->arch.pc; | 360 | switch (int_class) { |
272 | vcpu->arch.shared->srr1 = vcpu->arch.shared->msr; | 361 | case INT_CLASS_NONCRIT: |
362 | set_guest_srr(vcpu, vcpu->arch.pc, | ||
363 | vcpu->arch.shared->msr); | ||
364 | break; | ||
365 | case INT_CLASS_CRIT: | ||
366 | set_guest_csrr(vcpu, vcpu->arch.pc, | ||
367 | vcpu->arch.shared->msr); | ||
368 | break; | ||
369 | case INT_CLASS_DBG: | ||
370 | set_guest_dsrr(vcpu, vcpu->arch.pc, | ||
371 | vcpu->arch.shared->msr); | ||
372 | break; | ||
373 | case INT_CLASS_MC: | ||
374 | set_guest_mcsrr(vcpu, vcpu->arch.pc, | ||
375 | vcpu->arch.shared->msr); | ||
376 | break; | ||
377 | } | ||
378 | |||
273 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; | 379 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; |
274 | if (update_esr == true) | 380 | if (update_esr == true) |
275 | vcpu->arch.shared->esr = vcpu->arch.queued_esr; | 381 | set_guest_esr(vcpu, vcpu->arch.queued_esr); |
276 | if (update_dear == true) | 382 | if (update_dear == true) |
277 | vcpu->arch.shared->dar = vcpu->arch.queued_dear; | 383 | set_guest_dear(vcpu, vcpu->arch.queued_dear); |
278 | kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); | 384 | kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); |
279 | 385 | ||
280 | if (!keep_irq) | 386 | if (!keep_irq) |
281 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 387 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
282 | } | 388 | } |
283 | 389 | ||
390 | #ifdef CONFIG_KVM_BOOKE_HV | ||
391 | /* | ||
392 | * If an interrupt is pending but masked, raise a guest doorbell | ||
393 | * so that we are notified when the guest enables the relevant | ||
394 | * MSR bit. | ||
395 | */ | ||
396 | if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE) | ||
397 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT); | ||
398 | if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE) | ||
399 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT); | ||
400 | if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK) | ||
401 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC); | ||
402 | #endif | ||
403 | |||
284 | return allowed; | 404 | return allowed; |
285 | } | 405 | } |
286 | 406 | ||
@@ -305,7 +425,7 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) | |||
305 | } | 425 | } |
306 | 426 | ||
307 | priority = __ffs(*pending); | 427 | priority = __ffs(*pending); |
308 | while (priority <= BOOKE_IRQPRIO_MAX) { | 428 | while (priority < BOOKE_IRQPRIO_MAX) { |
309 | if (kvmppc_booke_irqprio_deliver(vcpu, priority)) | 429 | if (kvmppc_booke_irqprio_deliver(vcpu, priority)) |
310 | break; | 430 | break; |
311 | 431 | ||
@@ -319,8 +439,9 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) | |||
319 | } | 439 | } |
320 | 440 | ||
321 | /* Check pending exceptions and deliver one, if possible. */ | 441 | /* Check pending exceptions and deliver one, if possible. */ |
322 | void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) | 442 | int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) |
323 | { | 443 | { |
444 | int r = 0; | ||
324 | WARN_ON_ONCE(!irqs_disabled()); | 445 | WARN_ON_ONCE(!irqs_disabled()); |
325 | 446 | ||
326 | kvmppc_core_check_exceptions(vcpu); | 447 | kvmppc_core_check_exceptions(vcpu); |
@@ -328,16 +449,60 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) | |||
328 | if (vcpu->arch.shared->msr & MSR_WE) { | 449 | if (vcpu->arch.shared->msr & MSR_WE) { |
329 | local_irq_enable(); | 450 | local_irq_enable(); |
330 | kvm_vcpu_block(vcpu); | 451 | kvm_vcpu_block(vcpu); |
452 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | ||
331 | local_irq_disable(); | 453 | local_irq_disable(); |
332 | 454 | ||
333 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); | 455 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); |
334 | kvmppc_core_check_exceptions(vcpu); | 456 | r = 1; |
335 | }; | 457 | }; |
458 | |||
459 | return r; | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * Common checks before entering the guest world. Call with interrupts | ||
464 | * disabled. | ||
465 | * | ||
466 | * returns !0 if a signal is pending and check_signal is true | ||
467 | */ | ||
468 | static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) | ||
469 | { | ||
470 | int r = 0; | ||
471 | |||
472 | WARN_ON_ONCE(!irqs_disabled()); | ||
473 | while (true) { | ||
474 | if (need_resched()) { | ||
475 | local_irq_enable(); | ||
476 | cond_resched(); | ||
477 | local_irq_disable(); | ||
478 | continue; | ||
479 | } | ||
480 | |||
481 | if (signal_pending(current)) { | ||
482 | r = 1; | ||
483 | break; | ||
484 | } | ||
485 | |||
486 | if (kvmppc_core_prepare_to_enter(vcpu)) { | ||
487 | /* interrupts got enabled in between, so we | ||
488 | are back at square 1 */ | ||
489 | continue; | ||
490 | } | ||
491 | |||
492 | break; | ||
493 | } | ||
494 | |||
495 | return r; | ||
336 | } | 496 | } |
337 | 497 | ||
338 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 498 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
339 | { | 499 | { |
340 | int ret; | 500 | int ret; |
501 | #ifdef CONFIG_PPC_FPU | ||
502 | unsigned int fpscr; | ||
503 | int fpexc_mode; | ||
504 | u64 fpr[32]; | ||
505 | #endif | ||
341 | 506 | ||
342 | if (!vcpu->arch.sane) { | 507 | if (!vcpu->arch.sane) { |
343 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 508 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
@@ -345,17 +510,53 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
345 | } | 510 | } |
346 | 511 | ||
347 | local_irq_disable(); | 512 | local_irq_disable(); |
348 | 513 | if (kvmppc_prepare_to_enter(vcpu)) { | |
349 | kvmppc_core_prepare_to_enter(vcpu); | ||
350 | |||
351 | if (signal_pending(current)) { | ||
352 | kvm_run->exit_reason = KVM_EXIT_INTR; | 514 | kvm_run->exit_reason = KVM_EXIT_INTR; |
353 | ret = -EINTR; | 515 | ret = -EINTR; |
354 | goto out; | 516 | goto out; |
355 | } | 517 | } |
356 | 518 | ||
357 | kvm_guest_enter(); | 519 | kvm_guest_enter(); |
520 | |||
521 | #ifdef CONFIG_PPC_FPU | ||
522 | /* Save userspace FPU state in stack */ | ||
523 | enable_kernel_fp(); | ||
524 | memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); | ||
525 | fpscr = current->thread.fpscr.val; | ||
526 | fpexc_mode = current->thread.fpexc_mode; | ||
527 | |||
528 | /* Restore guest FPU state to thread */ | ||
529 | memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr)); | ||
530 | current->thread.fpscr.val = vcpu->arch.fpscr; | ||
531 | |||
532 | /* | ||
533 | * Since we can't trap on MSR_FP in GS-mode, we consider the guest | ||
534 | * as always using the FPU. Kernel usage of FP (via | ||
535 | * enable_kernel_fp()) in this thread must not occur while | ||
536 | * vcpu->fpu_active is set. | ||
537 | */ | ||
538 | vcpu->fpu_active = 1; | ||
539 | |||
540 | kvmppc_load_guest_fp(vcpu); | ||
541 | #endif | ||
542 | |||
358 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | 543 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); |
544 | |||
545 | #ifdef CONFIG_PPC_FPU | ||
546 | kvmppc_save_guest_fp(vcpu); | ||
547 | |||
548 | vcpu->fpu_active = 0; | ||
549 | |||
550 | /* Save guest FPU state from thread */ | ||
551 | memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr)); | ||
552 | vcpu->arch.fpscr = current->thread.fpscr.val; | ||
553 | |||
554 | /* Restore userspace FPU state from stack */ | ||
555 | memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); | ||
556 | current->thread.fpscr.val = fpscr; | ||
557 | current->thread.fpexc_mode = fpexc_mode; | ||
558 | #endif | ||
559 | |||
359 | kvm_guest_exit(); | 560 | kvm_guest_exit(); |
360 | 561 | ||
361 | out: | 562 | out: |
@@ -363,6 +564,84 @@ out: | |||
363 | return ret; | 564 | return ret; |
364 | } | 565 | } |
365 | 566 | ||
567 | static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | ||
568 | { | ||
569 | enum emulation_result er; | ||
570 | |||
571 | er = kvmppc_emulate_instruction(run, vcpu); | ||
572 | switch (er) { | ||
573 | case EMULATE_DONE: | ||
574 | /* don't overwrite subtypes, just account kvm_stats */ | ||
575 | kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); | ||
576 | /* Future optimization: only reload non-volatiles if | ||
577 | * they were actually modified by emulation. */ | ||
578 | return RESUME_GUEST_NV; | ||
579 | |||
580 | case EMULATE_DO_DCR: | ||
581 | run->exit_reason = KVM_EXIT_DCR; | ||
582 | return RESUME_HOST; | ||
583 | |||
584 | case EMULATE_FAIL: | ||
585 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | ||
586 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | ||
587 | /* For debugging, encode the failing instruction and | ||
588 | * report it to userspace. */ | ||
589 | run->hw.hardware_exit_reason = ~0ULL << 32; | ||
590 | run->hw.hardware_exit_reason |= vcpu->arch.last_inst; | ||
591 | kvmppc_core_queue_program(vcpu, ESR_PIL); | ||
592 | return RESUME_HOST; | ||
593 | |||
594 | default: | ||
595 | BUG(); | ||
596 | } | ||
597 | } | ||
598 | |||
599 | static void kvmppc_fill_pt_regs(struct pt_regs *regs) | ||
600 | { | ||
601 | ulong r1, ip, msr, lr; | ||
602 | |||
603 | asm("mr %0, 1" : "=r"(r1)); | ||
604 | asm("mflr %0" : "=r"(lr)); | ||
605 | asm("mfmsr %0" : "=r"(msr)); | ||
606 | asm("bl 1f; 1: mflr %0" : "=r"(ip)); | ||
607 | |||
608 | memset(regs, 0, sizeof(*regs)); | ||
609 | regs->gpr[1] = r1; | ||
610 | regs->nip = ip; | ||
611 | regs->msr = msr; | ||
612 | regs->link = lr; | ||
613 | } | ||
614 | |||
615 | static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, | ||
616 | unsigned int exit_nr) | ||
617 | { | ||
618 | struct pt_regs regs; | ||
619 | |||
620 | switch (exit_nr) { | ||
621 | case BOOKE_INTERRUPT_EXTERNAL: | ||
622 | kvmppc_fill_pt_regs(®s); | ||
623 | do_IRQ(®s); | ||
624 | break; | ||
625 | case BOOKE_INTERRUPT_DECREMENTER: | ||
626 | kvmppc_fill_pt_regs(®s); | ||
627 | timer_interrupt(®s); | ||
628 | break; | ||
629 | #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64) | ||
630 | case BOOKE_INTERRUPT_DOORBELL: | ||
631 | kvmppc_fill_pt_regs(®s); | ||
632 | doorbell_exception(®s); | ||
633 | break; | ||
634 | #endif | ||
635 | case BOOKE_INTERRUPT_MACHINE_CHECK: | ||
636 | /* FIXME */ | ||
637 | break; | ||
638 | case BOOKE_INTERRUPT_PERFORMANCE_MONITOR: | ||
639 | kvmppc_fill_pt_regs(®s); | ||
640 | performance_monitor_exception(®s); | ||
641 | break; | ||
642 | } | ||
643 | } | ||
644 | |||
366 | /** | 645 | /** |
367 | * kvmppc_handle_exit | 646 | * kvmppc_handle_exit |
368 | * | 647 | * |
@@ -371,12 +650,14 @@ out: | |||
371 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | 650 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, |
372 | unsigned int exit_nr) | 651 | unsigned int exit_nr) |
373 | { | 652 | { |
374 | enum emulation_result er; | ||
375 | int r = RESUME_HOST; | 653 | int r = RESUME_HOST; |
376 | 654 | ||
377 | /* update before a new last_exit_type is rewritten */ | 655 | /* update before a new last_exit_type is rewritten */ |
378 | kvmppc_update_timing_stats(vcpu); | 656 | kvmppc_update_timing_stats(vcpu); |
379 | 657 | ||
658 | /* restart interrupts if they were meant for the host */ | ||
659 | kvmppc_restart_interrupt(vcpu, exit_nr); | ||
660 | |||
380 | local_irq_enable(); | 661 | local_irq_enable(); |
381 | 662 | ||
382 | run->exit_reason = KVM_EXIT_UNKNOWN; | 663 | run->exit_reason = KVM_EXIT_UNKNOWN; |
@@ -386,62 +667,74 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
386 | case BOOKE_INTERRUPT_MACHINE_CHECK: | 667 | case BOOKE_INTERRUPT_MACHINE_CHECK: |
387 | printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); | 668 | printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); |
388 | kvmppc_dump_vcpu(vcpu); | 669 | kvmppc_dump_vcpu(vcpu); |
670 | /* For debugging, send invalid exit reason to user space */ | ||
671 | run->hw.hardware_exit_reason = ~1ULL << 32; | ||
672 | run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR); | ||
389 | r = RESUME_HOST; | 673 | r = RESUME_HOST; |
390 | break; | 674 | break; |
391 | 675 | ||
392 | case BOOKE_INTERRUPT_EXTERNAL: | 676 | case BOOKE_INTERRUPT_EXTERNAL: |
393 | kvmppc_account_exit(vcpu, EXT_INTR_EXITS); | 677 | kvmppc_account_exit(vcpu, EXT_INTR_EXITS); |
394 | if (need_resched()) | ||
395 | cond_resched(); | ||
396 | r = RESUME_GUEST; | 678 | r = RESUME_GUEST; |
397 | break; | 679 | break; |
398 | 680 | ||
399 | case BOOKE_INTERRUPT_DECREMENTER: | 681 | case BOOKE_INTERRUPT_DECREMENTER: |
400 | /* Since we switched IVPR back to the host's value, the host | ||
401 | * handled this interrupt the moment we enabled interrupts. | ||
402 | * Now we just offer it a chance to reschedule the guest. */ | ||
403 | kvmppc_account_exit(vcpu, DEC_EXITS); | 682 | kvmppc_account_exit(vcpu, DEC_EXITS); |
404 | if (need_resched()) | ||
405 | cond_resched(); | ||
406 | r = RESUME_GUEST; | 683 | r = RESUME_GUEST; |
407 | break; | 684 | break; |
408 | 685 | ||
686 | case BOOKE_INTERRUPT_DOORBELL: | ||
687 | kvmppc_account_exit(vcpu, DBELL_EXITS); | ||
688 | r = RESUME_GUEST; | ||
689 | break; | ||
690 | |||
691 | case BOOKE_INTERRUPT_GUEST_DBELL_CRIT: | ||
692 | kvmppc_account_exit(vcpu, GDBELL_EXITS); | ||
693 | |||
694 | /* | ||
695 | * We are here because there is a pending guest interrupt | ||
696 | * which could not be delivered as MSR_CE or MSR_ME was not | ||
697 | * set. Once we break from here we will retry delivery. | ||
698 | */ | ||
699 | r = RESUME_GUEST; | ||
700 | break; | ||
701 | |||
702 | case BOOKE_INTERRUPT_GUEST_DBELL: | ||
703 | kvmppc_account_exit(vcpu, GDBELL_EXITS); | ||
704 | |||
705 | /* | ||
706 | * We are here because there is a pending guest interrupt | ||
707 | * which could not be delivered as MSR_EE was not set. Once | ||
708 | * we break from here we will retry delivery. | ||
709 | */ | ||
710 | r = RESUME_GUEST; | ||
711 | break; | ||
712 | |||
713 | case BOOKE_INTERRUPT_PERFORMANCE_MONITOR: | ||
714 | r = RESUME_GUEST; | ||
715 | break; | ||
716 | |||
717 | case BOOKE_INTERRUPT_HV_PRIV: | ||
718 | r = emulation_exit(run, vcpu); | ||
719 | break; | ||
720 | |||
409 | case BOOKE_INTERRUPT_PROGRAM: | 721 | case BOOKE_INTERRUPT_PROGRAM: |
410 | if (vcpu->arch.shared->msr & MSR_PR) { | 722 | if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) { |
411 | /* Program traps generated by user-level software must be handled | 723 | /* |
412 | * by the guest kernel. */ | 724 | * Program traps generated by user-level software must |
725 | * be handled by the guest kernel. | ||
726 | * | ||
727 | * In GS mode, hypervisor privileged instructions trap | ||
728 | * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are | ||
729 | * actual program interrupts, handled by the guest. | ||
730 | */ | ||
413 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); | 731 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); |
414 | r = RESUME_GUEST; | 732 | r = RESUME_GUEST; |
415 | kvmppc_account_exit(vcpu, USR_PR_INST); | 733 | kvmppc_account_exit(vcpu, USR_PR_INST); |
416 | break; | 734 | break; |
417 | } | 735 | } |
418 | 736 | ||
419 | er = kvmppc_emulate_instruction(run, vcpu); | 737 | r = emulation_exit(run, vcpu); |
420 | switch (er) { | ||
421 | case EMULATE_DONE: | ||
422 | /* don't overwrite subtypes, just account kvm_stats */ | ||
423 | kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); | ||
424 | /* Future optimization: only reload non-volatiles if | ||
425 | * they were actually modified by emulation. */ | ||
426 | r = RESUME_GUEST_NV; | ||
427 | break; | ||
428 | case EMULATE_DO_DCR: | ||
429 | run->exit_reason = KVM_EXIT_DCR; | ||
430 | r = RESUME_HOST; | ||
431 | break; | ||
432 | case EMULATE_FAIL: | ||
433 | /* XXX Deliver Program interrupt to guest. */ | ||
434 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | ||
435 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | ||
436 | /* For debugging, encode the failing instruction and | ||
437 | * report it to userspace. */ | ||
438 | run->hw.hardware_exit_reason = ~0ULL << 32; | ||
439 | run->hw.hardware_exit_reason |= vcpu->arch.last_inst; | ||
440 | r = RESUME_HOST; | ||
441 | break; | ||
442 | default: | ||
443 | BUG(); | ||
444 | } | ||
445 | break; | 738 | break; |
446 | 739 | ||
447 | case BOOKE_INTERRUPT_FP_UNAVAIL: | 740 | case BOOKE_INTERRUPT_FP_UNAVAIL: |
@@ -506,6 +799,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
506 | r = RESUME_GUEST; | 799 | r = RESUME_GUEST; |
507 | break; | 800 | break; |
508 | 801 | ||
802 | #ifdef CONFIG_KVM_BOOKE_HV | ||
803 | case BOOKE_INTERRUPT_HV_SYSCALL: | ||
804 | if (!(vcpu->arch.shared->msr & MSR_PR)) { | ||
805 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | ||
806 | } else { | ||
807 | /* | ||
808 | * hcall from guest userspace -- send privileged | ||
809 | * instruction program check. | ||
810 | */ | ||
811 | kvmppc_core_queue_program(vcpu, ESR_PPR); | ||
812 | } | ||
813 | |||
814 | r = RESUME_GUEST; | ||
815 | break; | ||
816 | #else | ||
509 | case BOOKE_INTERRUPT_SYSCALL: | 817 | case BOOKE_INTERRUPT_SYSCALL: |
510 | if (!(vcpu->arch.shared->msr & MSR_PR) && | 818 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
511 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | 819 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { |
@@ -519,6 +827,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
519 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); | 827 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); |
520 | r = RESUME_GUEST; | 828 | r = RESUME_GUEST; |
521 | break; | 829 | break; |
830 | #endif | ||
522 | 831 | ||
523 | case BOOKE_INTERRUPT_DTLB_MISS: { | 832 | case BOOKE_INTERRUPT_DTLB_MISS: { |
524 | unsigned long eaddr = vcpu->arch.fault_dear; | 833 | unsigned long eaddr = vcpu->arch.fault_dear; |
@@ -526,7 +835,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
526 | gpa_t gpaddr; | 835 | gpa_t gpaddr; |
527 | gfn_t gfn; | 836 | gfn_t gfn; |
528 | 837 | ||
529 | #ifdef CONFIG_KVM_E500 | 838 | #ifdef CONFIG_KVM_E500V2 |
530 | if (!(vcpu->arch.shared->msr & MSR_PR) && | 839 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
531 | (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { | 840 | (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { |
532 | kvmppc_map_magic(vcpu); | 841 | kvmppc_map_magic(vcpu); |
@@ -567,6 +876,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
567 | /* Guest has mapped and accessed a page which is not | 876 | /* Guest has mapped and accessed a page which is not |
568 | * actually RAM. */ | 877 | * actually RAM. */ |
569 | vcpu->arch.paddr_accessed = gpaddr; | 878 | vcpu->arch.paddr_accessed = gpaddr; |
879 | vcpu->arch.vaddr_accessed = eaddr; | ||
570 | r = kvmppc_emulate_mmio(run, vcpu); | 880 | r = kvmppc_emulate_mmio(run, vcpu); |
571 | kvmppc_account_exit(vcpu, MMIO_EXITS); | 881 | kvmppc_account_exit(vcpu, MMIO_EXITS); |
572 | } | 882 | } |
@@ -634,15 +944,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
634 | BUG(); | 944 | BUG(); |
635 | } | 945 | } |
636 | 946 | ||
637 | local_irq_disable(); | 947 | /* |
638 | 948 | * To avoid clobbering exit_reason, only check for signals if we | |
639 | kvmppc_core_prepare_to_enter(vcpu); | 949 | * aren't already exiting to userspace for some other reason. |
640 | 950 | */ | |
641 | if (!(r & RESUME_HOST)) { | 951 | if (!(r & RESUME_HOST)) { |
642 | /* To avoid clobbering exit_reason, only check for signals if | 952 | local_irq_disable(); |
643 | * we aren't already exiting to userspace for some other | 953 | if (kvmppc_prepare_to_enter(vcpu)) { |
644 | * reason. */ | ||
645 | if (signal_pending(current)) { | ||
646 | run->exit_reason = KVM_EXIT_INTR; | 954 | run->exit_reason = KVM_EXIT_INTR; |
647 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); | 955 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); |
648 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); | 956 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); |
@@ -659,12 +967,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
659 | int r; | 967 | int r; |
660 | 968 | ||
661 | vcpu->arch.pc = 0; | 969 | vcpu->arch.pc = 0; |
662 | vcpu->arch.shared->msr = 0; | ||
663 | vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; | ||
664 | vcpu->arch.shared->pir = vcpu->vcpu_id; | 970 | vcpu->arch.shared->pir = vcpu->vcpu_id; |
665 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ | 971 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
972 | kvmppc_set_msr(vcpu, 0); | ||
666 | 973 | ||
974 | #ifndef CONFIG_KVM_BOOKE_HV | ||
975 | vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; | ||
667 | vcpu->arch.shadow_pid = 1; | 976 | vcpu->arch.shadow_pid = 1; |
977 | vcpu->arch.shared->msr = 0; | ||
978 | #endif | ||
668 | 979 | ||
669 | /* Eye-catching numbers so we know if the guest takes an interrupt | 980 | /* Eye-catching numbers so we know if the guest takes an interrupt |
670 | * before it's programmed its own IVPR/IVORs. */ | 981 | * before it's programmed its own IVPR/IVORs. */ |
@@ -745,8 +1056,8 @@ static void get_sregs_base(struct kvm_vcpu *vcpu, | |||
745 | sregs->u.e.csrr0 = vcpu->arch.csrr0; | 1056 | sregs->u.e.csrr0 = vcpu->arch.csrr0; |
746 | sregs->u.e.csrr1 = vcpu->arch.csrr1; | 1057 | sregs->u.e.csrr1 = vcpu->arch.csrr1; |
747 | sregs->u.e.mcsr = vcpu->arch.mcsr; | 1058 | sregs->u.e.mcsr = vcpu->arch.mcsr; |
748 | sregs->u.e.esr = vcpu->arch.shared->esr; | 1059 | sregs->u.e.esr = get_guest_esr(vcpu); |
749 | sregs->u.e.dear = vcpu->arch.shared->dar; | 1060 | sregs->u.e.dear = get_guest_dear(vcpu); |
750 | sregs->u.e.tsr = vcpu->arch.tsr; | 1061 | sregs->u.e.tsr = vcpu->arch.tsr; |
751 | sregs->u.e.tcr = vcpu->arch.tcr; | 1062 | sregs->u.e.tcr = vcpu->arch.tcr; |
752 | sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); | 1063 | sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); |
@@ -763,8 +1074,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu, | |||
763 | vcpu->arch.csrr0 = sregs->u.e.csrr0; | 1074 | vcpu->arch.csrr0 = sregs->u.e.csrr0; |
764 | vcpu->arch.csrr1 = sregs->u.e.csrr1; | 1075 | vcpu->arch.csrr1 = sregs->u.e.csrr1; |
765 | vcpu->arch.mcsr = sregs->u.e.mcsr; | 1076 | vcpu->arch.mcsr = sregs->u.e.mcsr; |
766 | vcpu->arch.shared->esr = sregs->u.e.esr; | 1077 | set_guest_esr(vcpu, sregs->u.e.esr); |
767 | vcpu->arch.shared->dar = sregs->u.e.dear; | 1078 | set_guest_dear(vcpu, sregs->u.e.dear); |
768 | vcpu->arch.vrsave = sregs->u.e.vrsave; | 1079 | vcpu->arch.vrsave = sregs->u.e.vrsave; |
769 | kvmppc_set_tcr(vcpu, sregs->u.e.tcr); | 1080 | kvmppc_set_tcr(vcpu, sregs->u.e.tcr); |
770 | 1081 | ||
@@ -932,15 +1243,6 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, | |||
932 | { | 1243 | { |
933 | } | 1244 | } |
934 | 1245 | ||
935 | int kvmppc_core_init_vm(struct kvm *kvm) | ||
936 | { | ||
937 | return 0; | ||
938 | } | ||
939 | |||
940 | void kvmppc_core_destroy_vm(struct kvm *kvm) | ||
941 | { | ||
942 | } | ||
943 | |||
944 | void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) | 1246 | void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) |
945 | { | 1247 | { |
946 | vcpu->arch.tcr = new_tcr; | 1248 | vcpu->arch.tcr = new_tcr; |
@@ -968,8 +1270,19 @@ void kvmppc_decrementer_func(unsigned long data) | |||
968 | kvmppc_set_tsr_bits(vcpu, TSR_DIS); | 1270 | kvmppc_set_tsr_bits(vcpu, TSR_DIS); |
969 | } | 1271 | } |
970 | 1272 | ||
1273 | void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
1274 | { | ||
1275 | current->thread.kvm_vcpu = vcpu; | ||
1276 | } | ||
1277 | |||
1278 | void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) | ||
1279 | { | ||
1280 | current->thread.kvm_vcpu = NULL; | ||
1281 | } | ||
1282 | |||
971 | int __init kvmppc_booke_init(void) | 1283 | int __init kvmppc_booke_init(void) |
972 | { | 1284 | { |
1285 | #ifndef CONFIG_KVM_BOOKE_HV | ||
973 | unsigned long ivor[16]; | 1286 | unsigned long ivor[16]; |
974 | unsigned long max_ivor = 0; | 1287 | unsigned long max_ivor = 0; |
975 | int i; | 1288 | int i; |
@@ -1012,7 +1325,7 @@ int __init kvmppc_booke_init(void) | |||
1012 | } | 1325 | } |
1013 | flush_icache_range(kvmppc_booke_handlers, | 1326 | flush_icache_range(kvmppc_booke_handlers, |
1014 | kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); | 1327 | kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); |
1015 | 1328 | #endif /* !BOOKE_HV */ | |
1016 | return 0; | 1329 | return 0; |
1017 | } | 1330 | } |
1018 | 1331 | ||
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index 2fe202705a3f..ba61974c1e20 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include <asm/kvm_ppc.h> | 25 | #include <asm/kvm_ppc.h> |
26 | #include <asm/switch_to.h> | ||
26 | #include "timing.h" | 27 | #include "timing.h" |
27 | 28 | ||
28 | /* interrupt priortity ordering */ | 29 | /* interrupt priortity ordering */ |
@@ -48,7 +49,20 @@ | |||
48 | #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 | 49 | #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 |
49 | /* Internal pseudo-irqprio for level triggered externals */ | 50 | /* Internal pseudo-irqprio for level triggered externals */ |
50 | #define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20 | 51 | #define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20 |
51 | #define BOOKE_IRQPRIO_MAX 20 | 52 | #define BOOKE_IRQPRIO_DBELL 21 |
53 | #define BOOKE_IRQPRIO_DBELL_CRIT 22 | ||
54 | #define BOOKE_IRQPRIO_MAX 23 | ||
55 | |||
56 | #define BOOKE_IRQMASK_EE ((1 << BOOKE_IRQPRIO_EXTERNAL_LEVEL) | \ | ||
57 | (1 << BOOKE_IRQPRIO_PERFORMANCE_MONITOR) | \ | ||
58 | (1 << BOOKE_IRQPRIO_DBELL) | \ | ||
59 | (1 << BOOKE_IRQPRIO_DECREMENTER) | \ | ||
60 | (1 << BOOKE_IRQPRIO_FIT) | \ | ||
61 | (1 << BOOKE_IRQPRIO_EXTERNAL)) | ||
62 | |||
63 | #define BOOKE_IRQMASK_CE ((1 << BOOKE_IRQPRIO_DBELL_CRIT) | \ | ||
64 | (1 << BOOKE_IRQPRIO_WATCHDOG) | \ | ||
65 | (1 << BOOKE_IRQPRIO_CRITICAL)) | ||
52 | 66 | ||
53 | extern unsigned long kvmppc_booke_handlers; | 67 | extern unsigned long kvmppc_booke_handlers; |
54 | 68 | ||
@@ -61,8 +75,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); | |||
61 | 75 | ||
62 | int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 76 | int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, |
63 | unsigned int inst, int *advance); | 77 | unsigned int inst, int *advance); |
64 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); | 78 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); |
65 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); | 79 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); |
66 | 80 | ||
67 | /* low-level asm code to transfer guest state */ | 81 | /* low-level asm code to transfer guest state */ |
68 | void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu); | 82 | void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu); |
@@ -71,4 +85,46 @@ void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu); | |||
71 | /* high-level function, manages flags, host state */ | 85 | /* high-level function, manages flags, host state */ |
72 | void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu); | 86 | void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu); |
73 | 87 | ||
88 | void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | ||
89 | void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu); | ||
90 | |||
91 | enum int_class { | ||
92 | INT_CLASS_NONCRIT, | ||
93 | INT_CLASS_CRIT, | ||
94 | INT_CLASS_MC, | ||
95 | INT_CLASS_DBG, | ||
96 | }; | ||
97 | |||
98 | void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); | ||
99 | |||
100 | /* | ||
101 | * Load up guest vcpu FP state if it's needed. | ||
102 | * It also set the MSR_FP in thread so that host know | ||
103 | * we're holding FPU, and then host can help to save | ||
104 | * guest vcpu FP state if other threads require to use FPU. | ||
105 | * This simulates an FP unavailable fault. | ||
106 | * | ||
107 | * It requires to be called with preemption disabled. | ||
108 | */ | ||
109 | static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu) | ||
110 | { | ||
111 | #ifdef CONFIG_PPC_FPU | ||
112 | if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) { | ||
113 | load_up_fpu(); | ||
114 | current->thread.regs->msr |= MSR_FP; | ||
115 | } | ||
116 | #endif | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Save guest vcpu FP state into thread. | ||
121 | * It requires to be called with preemption disabled. | ||
122 | */ | ||
123 | static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu) | ||
124 | { | ||
125 | #ifdef CONFIG_PPC_FPU | ||
126 | if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP)) | ||
127 | giveup_fpu(current); | ||
128 | #endif | ||
129 | } | ||
74 | #endif /* __KVM_BOOKE_H__ */ | 130 | #endif /* __KVM_BOOKE_H__ */ |
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index 3e652da36534..6c76397f2af4 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c | |||
@@ -40,8 +40,8 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
40 | unsigned int inst, int *advance) | 40 | unsigned int inst, int *advance) |
41 | { | 41 | { |
42 | int emulated = EMULATE_DONE; | 42 | int emulated = EMULATE_DONE; |
43 | int rs; | 43 | int rs = get_rs(inst); |
44 | int rt; | 44 | int rt = get_rt(inst); |
45 | 45 | ||
46 | switch (get_op(inst)) { | 46 | switch (get_op(inst)) { |
47 | case 19: | 47 | case 19: |
@@ -62,19 +62,16 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
62 | switch (get_xop(inst)) { | 62 | switch (get_xop(inst)) { |
63 | 63 | ||
64 | case OP_31_XOP_MFMSR: | 64 | case OP_31_XOP_MFMSR: |
65 | rt = get_rt(inst); | ||
66 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); | 65 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); |
67 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); | 66 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); |
68 | break; | 67 | break; |
69 | 68 | ||
70 | case OP_31_XOP_MTMSR: | 69 | case OP_31_XOP_MTMSR: |
71 | rs = get_rs(inst); | ||
72 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); | 70 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); |
73 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); | 71 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); |
74 | break; | 72 | break; |
75 | 73 | ||
76 | case OP_31_XOP_WRTEE: | 74 | case OP_31_XOP_WRTEE: |
77 | rs = get_rs(inst); | ||
78 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) | 75 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) |
79 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); | 76 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); |
80 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | 77 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); |
@@ -99,22 +96,32 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
99 | return emulated; | 96 | return emulated; |
100 | } | 97 | } |
101 | 98 | ||
102 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 99 | /* |
100 | * NOTE: some of these registers are not emulated on BOOKE_HV (GS-mode). | ||
101 | * Their backing store is in real registers, and these functions | ||
102 | * will return the wrong result if called for them in another context | ||
103 | * (such as debugging). | ||
104 | */ | ||
105 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | ||
103 | { | 106 | { |
104 | int emulated = EMULATE_DONE; | 107 | int emulated = EMULATE_DONE; |
105 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
106 | 108 | ||
107 | switch (sprn) { | 109 | switch (sprn) { |
108 | case SPRN_DEAR: | 110 | case SPRN_DEAR: |
109 | vcpu->arch.shared->dar = spr_val; break; | 111 | vcpu->arch.shared->dar = spr_val; |
112 | break; | ||
110 | case SPRN_ESR: | 113 | case SPRN_ESR: |
111 | vcpu->arch.shared->esr = spr_val; break; | 114 | vcpu->arch.shared->esr = spr_val; |
115 | break; | ||
112 | case SPRN_DBCR0: | 116 | case SPRN_DBCR0: |
113 | vcpu->arch.dbcr0 = spr_val; break; | 117 | vcpu->arch.dbcr0 = spr_val; |
118 | break; | ||
114 | case SPRN_DBCR1: | 119 | case SPRN_DBCR1: |
115 | vcpu->arch.dbcr1 = spr_val; break; | 120 | vcpu->arch.dbcr1 = spr_val; |
121 | break; | ||
116 | case SPRN_DBSR: | 122 | case SPRN_DBSR: |
117 | vcpu->arch.dbsr &= ~spr_val; break; | 123 | vcpu->arch.dbsr &= ~spr_val; |
124 | break; | ||
118 | case SPRN_TSR: | 125 | case SPRN_TSR: |
119 | kvmppc_clr_tsr_bits(vcpu, spr_val); | 126 | kvmppc_clr_tsr_bits(vcpu, spr_val); |
120 | break; | 127 | break; |
@@ -122,20 +129,29 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
122 | kvmppc_set_tcr(vcpu, spr_val); | 129 | kvmppc_set_tcr(vcpu, spr_val); |
123 | break; | 130 | break; |
124 | 131 | ||
125 | /* Note: SPRG4-7 are user-readable. These values are | 132 | /* |
126 | * loaded into the real SPRGs when resuming the | 133 | * Note: SPRG4-7 are user-readable. |
127 | * guest. */ | 134 | * These values are loaded into the real SPRGs when resuming the |
135 | * guest (PR-mode only). | ||
136 | */ | ||
128 | case SPRN_SPRG4: | 137 | case SPRN_SPRG4: |
129 | vcpu->arch.shared->sprg4 = spr_val; break; | 138 | vcpu->arch.shared->sprg4 = spr_val; |
139 | break; | ||
130 | case SPRN_SPRG5: | 140 | case SPRN_SPRG5: |
131 | vcpu->arch.shared->sprg5 = spr_val; break; | 141 | vcpu->arch.shared->sprg5 = spr_val; |
142 | break; | ||
132 | case SPRN_SPRG6: | 143 | case SPRN_SPRG6: |
133 | vcpu->arch.shared->sprg6 = spr_val; break; | 144 | vcpu->arch.shared->sprg6 = spr_val; |
145 | break; | ||
134 | case SPRN_SPRG7: | 146 | case SPRN_SPRG7: |
135 | vcpu->arch.shared->sprg7 = spr_val; break; | 147 | vcpu->arch.shared->sprg7 = spr_val; |
148 | break; | ||
136 | 149 | ||
137 | case SPRN_IVPR: | 150 | case SPRN_IVPR: |
138 | vcpu->arch.ivpr = spr_val; | 151 | vcpu->arch.ivpr = spr_val; |
152 | #ifdef CONFIG_KVM_BOOKE_HV | ||
153 | mtspr(SPRN_GIVPR, spr_val); | ||
154 | #endif | ||
139 | break; | 155 | break; |
140 | case SPRN_IVOR0: | 156 | case SPRN_IVOR0: |
141 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; | 157 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; |
@@ -145,6 +161,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
145 | break; | 161 | break; |
146 | case SPRN_IVOR2: | 162 | case SPRN_IVOR2: |
147 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; | 163 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; |
164 | #ifdef CONFIG_KVM_BOOKE_HV | ||
165 | mtspr(SPRN_GIVOR2, spr_val); | ||
166 | #endif | ||
148 | break; | 167 | break; |
149 | case SPRN_IVOR3: | 168 | case SPRN_IVOR3: |
150 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; | 169 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; |
@@ -163,6 +182,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
163 | break; | 182 | break; |
164 | case SPRN_IVOR8: | 183 | case SPRN_IVOR8: |
165 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; | 184 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; |
185 | #ifdef CONFIG_KVM_BOOKE_HV | ||
186 | mtspr(SPRN_GIVOR8, spr_val); | ||
187 | #endif | ||
166 | break; | 188 | break; |
167 | case SPRN_IVOR9: | 189 | case SPRN_IVOR9: |
168 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; | 190 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; |
@@ -193,75 +215,83 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
193 | return emulated; | 215 | return emulated; |
194 | } | 216 | } |
195 | 217 | ||
196 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 218 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
197 | { | 219 | { |
198 | int emulated = EMULATE_DONE; | 220 | int emulated = EMULATE_DONE; |
199 | 221 | ||
200 | switch (sprn) { | 222 | switch (sprn) { |
201 | case SPRN_IVPR: | 223 | case SPRN_IVPR: |
202 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; | 224 | *spr_val = vcpu->arch.ivpr; |
225 | break; | ||
203 | case SPRN_DEAR: | 226 | case SPRN_DEAR: |
204 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break; | 227 | *spr_val = vcpu->arch.shared->dar; |
228 | break; | ||
205 | case SPRN_ESR: | 229 | case SPRN_ESR: |
206 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break; | 230 | *spr_val = vcpu->arch.shared->esr; |
231 | break; | ||
207 | case SPRN_DBCR0: | 232 | case SPRN_DBCR0: |
208 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break; | 233 | *spr_val = vcpu->arch.dbcr0; |
234 | break; | ||
209 | case SPRN_DBCR1: | 235 | case SPRN_DBCR1: |
210 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break; | 236 | *spr_val = vcpu->arch.dbcr1; |
237 | break; | ||
211 | case SPRN_DBSR: | 238 | case SPRN_DBSR: |
212 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break; | 239 | *spr_val = vcpu->arch.dbsr; |
240 | break; | ||
213 | case SPRN_TSR: | 241 | case SPRN_TSR: |
214 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break; | 242 | *spr_val = vcpu->arch.tsr; |
243 | break; | ||
215 | case SPRN_TCR: | 244 | case SPRN_TCR: |
216 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break; | 245 | *spr_val = vcpu->arch.tcr; |
246 | break; | ||
217 | 247 | ||
218 | case SPRN_IVOR0: | 248 | case SPRN_IVOR0: |
219 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]); | 249 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; |
220 | break; | 250 | break; |
221 | case SPRN_IVOR1: | 251 | case SPRN_IVOR1: |
222 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]); | 252 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; |
223 | break; | 253 | break; |
224 | case SPRN_IVOR2: | 254 | case SPRN_IVOR2: |
225 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); | 255 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; |
226 | break; | 256 | break; |
227 | case SPRN_IVOR3: | 257 | case SPRN_IVOR3: |
228 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]); | 258 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; |
229 | break; | 259 | break; |
230 | case SPRN_IVOR4: | 260 | case SPRN_IVOR4: |
231 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]); | 261 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; |
232 | break; | 262 | break; |
233 | case SPRN_IVOR5: | 263 | case SPRN_IVOR5: |
234 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]); | 264 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; |
235 | break; | 265 | break; |
236 | case SPRN_IVOR6: | 266 | case SPRN_IVOR6: |
237 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]); | 267 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; |
238 | break; | 268 | break; |
239 | case SPRN_IVOR7: | 269 | case SPRN_IVOR7: |
240 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]); | 270 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; |
241 | break; | 271 | break; |
242 | case SPRN_IVOR8: | 272 | case SPRN_IVOR8: |
243 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); | 273 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; |
244 | break; | 274 | break; |
245 | case SPRN_IVOR9: | 275 | case SPRN_IVOR9: |
246 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]); | 276 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; |
247 | break; | 277 | break; |
248 | case SPRN_IVOR10: | 278 | case SPRN_IVOR10: |
249 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]); | 279 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; |
250 | break; | 280 | break; |
251 | case SPRN_IVOR11: | 281 | case SPRN_IVOR11: |
252 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]); | 282 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; |
253 | break; | 283 | break; |
254 | case SPRN_IVOR12: | 284 | case SPRN_IVOR12: |
255 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]); | 285 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; |
256 | break; | 286 | break; |
257 | case SPRN_IVOR13: | 287 | case SPRN_IVOR13: |
258 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]); | 288 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; |
259 | break; | 289 | break; |
260 | case SPRN_IVOR14: | 290 | case SPRN_IVOR14: |
261 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]); | 291 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; |
262 | break; | 292 | break; |
263 | case SPRN_IVOR15: | 293 | case SPRN_IVOR15: |
264 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]); | 294 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; |
265 | break; | 295 | break; |
266 | 296 | ||
267 | default: | 297 | default: |
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index c8c4b878795a..8feec2ff3928 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S | |||
@@ -419,13 +419,13 @@ lightweight_exit: | |||
419 | * written directly to the shared area, so we | 419 | * written directly to the shared area, so we |
420 | * need to reload them here with the guest's values. | 420 | * need to reload them here with the guest's values. |
421 | */ | 421 | */ |
422 | lwz r3, VCPU_SHARED_SPRG4(r5) | 422 | PPC_LD(r3, VCPU_SHARED_SPRG4, r5) |
423 | mtspr SPRN_SPRG4W, r3 | 423 | mtspr SPRN_SPRG4W, r3 |
424 | lwz r3, VCPU_SHARED_SPRG5(r5) | 424 | PPC_LD(r3, VCPU_SHARED_SPRG5, r5) |
425 | mtspr SPRN_SPRG5W, r3 | 425 | mtspr SPRN_SPRG5W, r3 |
426 | lwz r3, VCPU_SHARED_SPRG6(r5) | 426 | PPC_LD(r3, VCPU_SHARED_SPRG6, r5) |
427 | mtspr SPRN_SPRG6W, r3 | 427 | mtspr SPRN_SPRG6W, r3 |
428 | lwz r3, VCPU_SHARED_SPRG7(r5) | 428 | PPC_LD(r3, VCPU_SHARED_SPRG7, r5) |
429 | mtspr SPRN_SPRG7W, r3 | 429 | mtspr SPRN_SPRG7W, r3 |
430 | 430 | ||
431 | #ifdef CONFIG_KVM_EXIT_TIMING | 431 | #ifdef CONFIG_KVM_EXIT_TIMING |
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S new file mode 100644 index 000000000000..6048a00515d7 --- /dev/null +++ b/arch/powerpc/kvm/bookehv_interrupts.S | |||
@@ -0,0 +1,597 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. | ||
16 | * | ||
17 | * Author: Varun Sethi <varun.sethi@freescale.com> | ||
18 | * Author: Scott Wood <scotwood@freescale.com> | ||
19 | * | ||
20 | * This file is derived from arch/powerpc/kvm/booke_interrupts.S | ||
21 | */ | ||
22 | |||
23 | #include <asm/ppc_asm.h> | ||
24 | #include <asm/kvm_asm.h> | ||
25 | #include <asm/reg.h> | ||
26 | #include <asm/mmu-44x.h> | ||
27 | #include <asm/page.h> | ||
28 | #include <asm/asm-compat.h> | ||
29 | #include <asm/asm-offsets.h> | ||
30 | #include <asm/bitsperlong.h> | ||
31 | #include <asm/thread_info.h> | ||
32 | |||
33 | #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ | ||
34 | |||
35 | #define GET_VCPU(vcpu, thread) \ | ||
36 | PPC_LL vcpu, THREAD_KVM_VCPU(thread) | ||
37 | |||
38 | #define LONGBYTES (BITS_PER_LONG / 8) | ||
39 | |||
40 | #define VCPU_GPR(n) (VCPU_GPRS + (n * LONGBYTES)) | ||
41 | #define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES)) | ||
42 | |||
43 | /* The host stack layout: */ | ||
44 | #define HOST_R1 (0 * LONGBYTES) /* Implied by stwu. */ | ||
45 | #define HOST_CALLEE_LR (1 * LONGBYTES) | ||
46 | #define HOST_RUN (2 * LONGBYTES) /* struct kvm_run */ | ||
47 | /* | ||
48 | * r2 is special: it holds 'current', and it made nonvolatile in the | ||
49 | * kernel with the -ffixed-r2 gcc option. | ||
50 | */ | ||
51 | #define HOST_R2 (3 * LONGBYTES) | ||
52 | #define HOST_CR (4 * LONGBYTES) | ||
53 | #define HOST_NV_GPRS (5 * LONGBYTES) | ||
54 | #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES)) | ||
55 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + LONGBYTES) | ||
56 | #define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */ | ||
57 | #define HOST_STACK_LR (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */ | ||
58 | |||
59 | #define NEED_EMU 0x00000001 /* emulation -- save nv regs */ | ||
60 | #define NEED_DEAR 0x00000002 /* save faulting DEAR */ | ||
61 | #define NEED_ESR 0x00000004 /* save faulting ESR */ | ||
62 | |||
63 | /* | ||
64 | * On entry: | ||
65 | * r4 = vcpu, r5 = srr0, r6 = srr1 | ||
66 | * saved in vcpu: cr, ctr, r3-r13 | ||
67 | */ | ||
68 | .macro kvm_handler_common intno, srr0, flags | ||
69 | /* Restore host stack pointer */ | ||
70 | PPC_STL r1, VCPU_GPR(r1)(r4) | ||
71 | PPC_STL r2, VCPU_GPR(r2)(r4) | ||
72 | PPC_LL r1, VCPU_HOST_STACK(r4) | ||
73 | PPC_LL r2, HOST_R2(r1) | ||
74 | |||
75 | mfspr r10, SPRN_PID | ||
76 | lwz r8, VCPU_HOST_PID(r4) | ||
77 | PPC_LL r11, VCPU_SHARED(r4) | ||
78 | PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */ | ||
79 | li r14, \intno | ||
80 | |||
81 | stw r10, VCPU_GUEST_PID(r4) | ||
82 | mtspr SPRN_PID, r8 | ||
83 | |||
84 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
85 | /* save exit time */ | ||
86 | 1: mfspr r7, SPRN_TBRU | ||
87 | mfspr r8, SPRN_TBRL | ||
88 | mfspr r9, SPRN_TBRU | ||
89 | cmpw r9, r7 | ||
90 | stw r8, VCPU_TIMING_EXIT_TBL(r4) | ||
91 | bne- 1b | ||
92 | stw r9, VCPU_TIMING_EXIT_TBU(r4) | ||
93 | #endif | ||
94 | |||
95 | oris r8, r6, MSR_CE@h | ||
96 | PPC_STD(r6, VCPU_SHARED_MSR, r11) | ||
97 | ori r8, r8, MSR_ME | MSR_RI | ||
98 | PPC_STL r5, VCPU_PC(r4) | ||
99 | |||
100 | /* | ||
101 | * Make sure CE/ME/RI are set (if appropriate for exception type) | ||
102 | * whether or not the guest had it set. Since mfmsr/mtmsr are | ||
103 | * somewhat expensive, skip in the common case where the guest | ||
104 | * had all these bits set (and thus they're still set if | ||
105 | * appropriate for the exception type). | ||
106 | */ | ||
107 | cmpw r6, r8 | ||
108 | beq 1f | ||
109 | mfmsr r7 | ||
110 | .if \srr0 != SPRN_MCSRR0 && \srr0 != SPRN_CSRR0 | ||
111 | oris r7, r7, MSR_CE@h | ||
112 | .endif | ||
113 | .if \srr0 != SPRN_MCSRR0 | ||
114 | ori r7, r7, MSR_ME | MSR_RI | ||
115 | .endif | ||
116 | mtmsr r7 | ||
117 | 1: | ||
118 | |||
119 | .if \flags & NEED_EMU | ||
120 | /* | ||
121 | * This assumes you have external PID support. | ||
122 | * To support a bookehv CPU without external PID, you'll | ||
123 | * need to look up the TLB entry and create a temporary mapping. | ||
124 | * | ||
125 | * FIXME: we don't currently handle if the lwepx faults. PR-mode | ||
126 | * booke doesn't handle it either. Since Linux doesn't use | ||
127 | * broadcast tlbivax anymore, the only way this should happen is | ||
128 | * if the guest maps its memory execute-but-not-read, or if we | ||
129 | * somehow take a TLB miss in the middle of this entry code and | ||
130 | * evict the relevant entry. On e500mc, all kernel lowmem is | ||
131 | * bolted into TLB1 large page mappings, and we don't use | ||
132 | * broadcast invalidates, so we should not take a TLB miss here. | ||
133 | * | ||
134 | * Later we'll need to deal with faults here. Disallowing guest | ||
135 | * mappings that are execute-but-not-read could be an option on | ||
136 | * e500mc, but not on chips with an LRAT if it is used. | ||
137 | */ | ||
138 | |||
139 | mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ | ||
140 | PPC_STL r15, VCPU_GPR(r15)(r4) | ||
141 | PPC_STL r16, VCPU_GPR(r16)(r4) | ||
142 | PPC_STL r17, VCPU_GPR(r17)(r4) | ||
143 | PPC_STL r18, VCPU_GPR(r18)(r4) | ||
144 | PPC_STL r19, VCPU_GPR(r19)(r4) | ||
145 | mr r8, r3 | ||
146 | PPC_STL r20, VCPU_GPR(r20)(r4) | ||
147 | rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS | ||
148 | PPC_STL r21, VCPU_GPR(r21)(r4) | ||
149 | rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR | ||
150 | PPC_STL r22, VCPU_GPR(r22)(r4) | ||
151 | rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID | ||
152 | PPC_STL r23, VCPU_GPR(r23)(r4) | ||
153 | PPC_STL r24, VCPU_GPR(r24)(r4) | ||
154 | PPC_STL r25, VCPU_GPR(r25)(r4) | ||
155 | PPC_STL r26, VCPU_GPR(r26)(r4) | ||
156 | PPC_STL r27, VCPU_GPR(r27)(r4) | ||
157 | PPC_STL r28, VCPU_GPR(r28)(r4) | ||
158 | PPC_STL r29, VCPU_GPR(r29)(r4) | ||
159 | PPC_STL r30, VCPU_GPR(r30)(r4) | ||
160 | PPC_STL r31, VCPU_GPR(r31)(r4) | ||
161 | mtspr SPRN_EPLC, r8 | ||
162 | |||
163 | /* disable preemption, so we are sure we hit the fixup handler */ | ||
164 | #ifdef CONFIG_PPC64 | ||
165 | clrrdi r8,r1,THREAD_SHIFT | ||
166 | #else | ||
167 | rlwinm r8,r1,0,0,31-THREAD_SHIFT /* current thread_info */ | ||
168 | #endif | ||
169 | li r7, 1 | ||
170 | stw r7, TI_PREEMPT(r8) | ||
171 | |||
172 | isync | ||
173 | |||
174 | /* | ||
175 | * In case the read goes wrong, we catch it and write an invalid value | ||
176 | * in LAST_INST instead. | ||
177 | */ | ||
178 | 1: lwepx r9, 0, r5 | ||
179 | 2: | ||
180 | .section .fixup, "ax" | ||
181 | 3: li r9, KVM_INST_FETCH_FAILED | ||
182 | b 2b | ||
183 | .previous | ||
184 | .section __ex_table,"a" | ||
185 | PPC_LONG_ALIGN | ||
186 | PPC_LONG 1b,3b | ||
187 | .previous | ||
188 | |||
189 | mtspr SPRN_EPLC, r3 | ||
190 | li r7, 0 | ||
191 | stw r7, TI_PREEMPT(r8) | ||
192 | stw r9, VCPU_LAST_INST(r4) | ||
193 | .endif | ||
194 | |||
195 | .if \flags & NEED_ESR | ||
196 | mfspr r8, SPRN_ESR | ||
197 | PPC_STL r8, VCPU_FAULT_ESR(r4) | ||
198 | .endif | ||
199 | |||
200 | .if \flags & NEED_DEAR | ||
201 | mfspr r9, SPRN_DEAR | ||
202 | PPC_STL r9, VCPU_FAULT_DEAR(r4) | ||
203 | .endif | ||
204 | |||
205 | b kvmppc_resume_host | ||
206 | .endm | ||
207 | |||
208 | /* | ||
209 | * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h | ||
210 | */ | ||
211 | .macro kvm_handler intno srr0, srr1, flags | ||
212 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | ||
213 | GET_VCPU(r11, r10) | ||
214 | PPC_STL r3, VCPU_GPR(r3)(r11) | ||
215 | mfspr r3, SPRN_SPRG_RSCRATCH0 | ||
216 | PPC_STL r4, VCPU_GPR(r4)(r11) | ||
217 | PPC_LL r4, THREAD_NORMSAVE(0)(r10) | ||
218 | PPC_STL r5, VCPU_GPR(r5)(r11) | ||
219 | stw r13, VCPU_CR(r11) | ||
220 | mfspr r5, \srr0 | ||
221 | PPC_STL r3, VCPU_GPR(r10)(r11) | ||
222 | PPC_LL r3, THREAD_NORMSAVE(2)(r10) | ||
223 | PPC_STL r6, VCPU_GPR(r6)(r11) | ||
224 | PPC_STL r4, VCPU_GPR(r11)(r11) | ||
225 | mfspr r6, \srr1 | ||
226 | PPC_STL r7, VCPU_GPR(r7)(r11) | ||
227 | PPC_STL r8, VCPU_GPR(r8)(r11) | ||
228 | PPC_STL r9, VCPU_GPR(r9)(r11) | ||
229 | PPC_STL r3, VCPU_GPR(r13)(r11) | ||
230 | mfctr r7 | ||
231 | PPC_STL r12, VCPU_GPR(r12)(r11) | ||
232 | PPC_STL r7, VCPU_CTR(r11) | ||
233 | mr r4, r11 | ||
234 | kvm_handler_common \intno, \srr0, \flags | ||
235 | .endm | ||
236 | |||
237 | .macro kvm_lvl_handler intno scratch srr0, srr1, flags | ||
238 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | ||
239 | mfspr r10, SPRN_SPRG_THREAD | ||
240 | GET_VCPU(r11, r10) | ||
241 | PPC_STL r3, VCPU_GPR(r3)(r11) | ||
242 | mfspr r3, \scratch | ||
243 | PPC_STL r4, VCPU_GPR(r4)(r11) | ||
244 | PPC_LL r4, GPR9(r8) | ||
245 | PPC_STL r5, VCPU_GPR(r5)(r11) | ||
246 | stw r9, VCPU_CR(r11) | ||
247 | mfspr r5, \srr0 | ||
248 | PPC_STL r3, VCPU_GPR(r8)(r11) | ||
249 | PPC_LL r3, GPR10(r8) | ||
250 | PPC_STL r6, VCPU_GPR(r6)(r11) | ||
251 | PPC_STL r4, VCPU_GPR(r9)(r11) | ||
252 | mfspr r6, \srr1 | ||
253 | PPC_LL r4, GPR11(r8) | ||
254 | PPC_STL r7, VCPU_GPR(r7)(r11) | ||
255 | PPC_STL r3, VCPU_GPR(r10)(r11) | ||
256 | mfctr r7 | ||
257 | PPC_STL r12, VCPU_GPR(r12)(r11) | ||
258 | PPC_STL r13, VCPU_GPR(r13)(r11) | ||
259 | PPC_STL r4, VCPU_GPR(r11)(r11) | ||
260 | PPC_STL r7, VCPU_CTR(r11) | ||
261 | mr r4, r11 | ||
262 | kvm_handler_common \intno, \srr0, \flags | ||
263 | .endm | ||
264 | |||
265 | kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \ | ||
266 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | ||
267 | kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \ | ||
268 | SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0 | ||
269 | kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \ | ||
270 | SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR) | ||
271 | kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR | ||
272 | kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0 | ||
273 | kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \ | ||
274 | SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR) | ||
275 | kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, NEED_ESR | ||
276 | kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 | ||
277 | kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 | ||
278 | kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 | ||
279 | kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_SRR0, SPRN_SRR1, 0 | ||
280 | kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SPRN_SRR1, 0 | ||
281 | kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \ | ||
282 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | ||
283 | kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \ | ||
284 | SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) | ||
285 | kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0 | ||
286 | kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 | ||
287 | kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0 | ||
288 | kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0 | ||
289 | kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0 | ||
290 | kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0 | ||
291 | kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \ | ||
292 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | ||
293 | kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0, SPRN_SRR1, NEED_EMU | ||
294 | kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 | ||
295 | kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_GSRR0, SPRN_GSRR1, 0 | ||
296 | kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, \ | ||
297 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | ||
298 | kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ | ||
299 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | ||
300 | kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ | ||
301 | SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0 | ||
302 | |||
303 | |||
304 | /* Registers: | ||
305 | * SPRG_SCRATCH0: guest r10 | ||
306 | * r4: vcpu pointer | ||
307 | * r11: vcpu->arch.shared | ||
308 | * r14: KVM exit number | ||
309 | */ | ||
310 | _GLOBAL(kvmppc_resume_host) | ||
311 | /* Save remaining volatile guest register state to vcpu. */ | ||
312 | mfspr r3, SPRN_VRSAVE | ||
313 | PPC_STL r0, VCPU_GPR(r0)(r4) | ||
314 | mflr r5 | ||
315 | mfspr r6, SPRN_SPRG4 | ||
316 | PPC_STL r5, VCPU_LR(r4) | ||
317 | mfspr r7, SPRN_SPRG5 | ||
318 | stw r3, VCPU_VRSAVE(r4) | ||
319 | PPC_STD(r6, VCPU_SHARED_SPRG4, r11) | ||
320 | mfspr r8, SPRN_SPRG6 | ||
321 | PPC_STD(r7, VCPU_SHARED_SPRG5, r11) | ||
322 | mfspr r9, SPRN_SPRG7 | ||
323 | PPC_STD(r8, VCPU_SHARED_SPRG6, r11) | ||
324 | mfxer r3 | ||
325 | PPC_STD(r9, VCPU_SHARED_SPRG7, r11) | ||
326 | |||
327 | /* save guest MAS registers and restore host mas4 & mas6 */ | ||
328 | mfspr r5, SPRN_MAS0 | ||
329 | PPC_STL r3, VCPU_XER(r4) | ||
330 | mfspr r6, SPRN_MAS1 | ||
331 | stw r5, VCPU_SHARED_MAS0(r11) | ||
332 | mfspr r7, SPRN_MAS2 | ||
333 | stw r6, VCPU_SHARED_MAS1(r11) | ||
334 | PPC_STD(r7, VCPU_SHARED_MAS2, r11) | ||
335 | mfspr r5, SPRN_MAS3 | ||
336 | mfspr r6, SPRN_MAS4 | ||
337 | stw r5, VCPU_SHARED_MAS7_3+4(r11) | ||
338 | mfspr r7, SPRN_MAS6 | ||
339 | stw r6, VCPU_SHARED_MAS4(r11) | ||
340 | mfspr r5, SPRN_MAS7 | ||
341 | lwz r6, VCPU_HOST_MAS4(r4) | ||
342 | stw r7, VCPU_SHARED_MAS6(r11) | ||
343 | lwz r8, VCPU_HOST_MAS6(r4) | ||
344 | mtspr SPRN_MAS4, r6 | ||
345 | stw r5, VCPU_SHARED_MAS7_3+0(r11) | ||
346 | mtspr SPRN_MAS6, r8 | ||
347 | /* Enable MAS register updates via exception */ | ||
348 | mfspr r3, SPRN_EPCR | ||
349 | rlwinm r3, r3, 0, ~SPRN_EPCR_DMIUH | ||
350 | mtspr SPRN_EPCR, r3 | ||
351 | isync | ||
352 | |||
353 | /* Switch to kernel stack and jump to handler. */ | ||
354 | PPC_LL r3, HOST_RUN(r1) | ||
355 | mr r5, r14 /* intno */ | ||
356 | mr r14, r4 /* Save vcpu pointer. */ | ||
357 | bl kvmppc_handle_exit | ||
358 | |||
359 | /* Restore vcpu pointer and the nonvolatiles we used. */ | ||
360 | mr r4, r14 | ||
361 | PPC_LL r14, VCPU_GPR(r14)(r4) | ||
362 | |||
363 | andi. r5, r3, RESUME_FLAG_NV | ||
364 | beq skip_nv_load | ||
365 | PPC_LL r15, VCPU_GPR(r15)(r4) | ||
366 | PPC_LL r16, VCPU_GPR(r16)(r4) | ||
367 | PPC_LL r17, VCPU_GPR(r17)(r4) | ||
368 | PPC_LL r18, VCPU_GPR(r18)(r4) | ||
369 | PPC_LL r19, VCPU_GPR(r19)(r4) | ||
370 | PPC_LL r20, VCPU_GPR(r20)(r4) | ||
371 | PPC_LL r21, VCPU_GPR(r21)(r4) | ||
372 | PPC_LL r22, VCPU_GPR(r22)(r4) | ||
373 | PPC_LL r23, VCPU_GPR(r23)(r4) | ||
374 | PPC_LL r24, VCPU_GPR(r24)(r4) | ||
375 | PPC_LL r25, VCPU_GPR(r25)(r4) | ||
376 | PPC_LL r26, VCPU_GPR(r26)(r4) | ||
377 | PPC_LL r27, VCPU_GPR(r27)(r4) | ||
378 | PPC_LL r28, VCPU_GPR(r28)(r4) | ||
379 | PPC_LL r29, VCPU_GPR(r29)(r4) | ||
380 | PPC_LL r30, VCPU_GPR(r30)(r4) | ||
381 | PPC_LL r31, VCPU_GPR(r31)(r4) | ||
382 | skip_nv_load: | ||
383 | /* Should we return to the guest? */ | ||
384 | andi. r5, r3, RESUME_FLAG_HOST | ||
385 | beq lightweight_exit | ||
386 | |||
387 | srawi r3, r3, 2 /* Shift -ERR back down. */ | ||
388 | |||
389 | heavyweight_exit: | ||
390 | /* Not returning to guest. */ | ||
391 | PPC_LL r5, HOST_STACK_LR(r1) | ||
392 | lwz r6, HOST_CR(r1) | ||
393 | |||
394 | /* | ||
395 | * We already saved guest volatile register state; now save the | ||
396 | * non-volatiles. | ||
397 | */ | ||
398 | |||
399 | PPC_STL r15, VCPU_GPR(r15)(r4) | ||
400 | PPC_STL r16, VCPU_GPR(r16)(r4) | ||
401 | PPC_STL r17, VCPU_GPR(r17)(r4) | ||
402 | PPC_STL r18, VCPU_GPR(r18)(r4) | ||
403 | PPC_STL r19, VCPU_GPR(r19)(r4) | ||
404 | PPC_STL r20, VCPU_GPR(r20)(r4) | ||
405 | PPC_STL r21, VCPU_GPR(r21)(r4) | ||
406 | PPC_STL r22, VCPU_GPR(r22)(r4) | ||
407 | PPC_STL r23, VCPU_GPR(r23)(r4) | ||
408 | PPC_STL r24, VCPU_GPR(r24)(r4) | ||
409 | PPC_STL r25, VCPU_GPR(r25)(r4) | ||
410 | PPC_STL r26, VCPU_GPR(r26)(r4) | ||
411 | PPC_STL r27, VCPU_GPR(r27)(r4) | ||
412 | PPC_STL r28, VCPU_GPR(r28)(r4) | ||
413 | PPC_STL r29, VCPU_GPR(r29)(r4) | ||
414 | PPC_STL r30, VCPU_GPR(r30)(r4) | ||
415 | PPC_STL r31, VCPU_GPR(r31)(r4) | ||
416 | |||
417 | /* Load host non-volatile register state from host stack. */ | ||
418 | PPC_LL r14, HOST_NV_GPR(r14)(r1) | ||
419 | PPC_LL r15, HOST_NV_GPR(r15)(r1) | ||
420 | PPC_LL r16, HOST_NV_GPR(r16)(r1) | ||
421 | PPC_LL r17, HOST_NV_GPR(r17)(r1) | ||
422 | PPC_LL r18, HOST_NV_GPR(r18)(r1) | ||
423 | PPC_LL r19, HOST_NV_GPR(r19)(r1) | ||
424 | PPC_LL r20, HOST_NV_GPR(r20)(r1) | ||
425 | PPC_LL r21, HOST_NV_GPR(r21)(r1) | ||
426 | PPC_LL r22, HOST_NV_GPR(r22)(r1) | ||
427 | PPC_LL r23, HOST_NV_GPR(r23)(r1) | ||
428 | PPC_LL r24, HOST_NV_GPR(r24)(r1) | ||
429 | PPC_LL r25, HOST_NV_GPR(r25)(r1) | ||
430 | PPC_LL r26, HOST_NV_GPR(r26)(r1) | ||
431 | PPC_LL r27, HOST_NV_GPR(r27)(r1) | ||
432 | PPC_LL r28, HOST_NV_GPR(r28)(r1) | ||
433 | PPC_LL r29, HOST_NV_GPR(r29)(r1) | ||
434 | PPC_LL r30, HOST_NV_GPR(r30)(r1) | ||
435 | PPC_LL r31, HOST_NV_GPR(r31)(r1) | ||
436 | |||
437 | /* Return to kvm_vcpu_run(). */ | ||
438 | mtlr r5 | ||
439 | mtcr r6 | ||
440 | addi r1, r1, HOST_STACK_SIZE | ||
441 | /* r3 still contains the return code from kvmppc_handle_exit(). */ | ||
442 | blr | ||
443 | |||
444 | /* Registers: | ||
445 | * r3: kvm_run pointer | ||
446 | * r4: vcpu pointer | ||
447 | */ | ||
448 | _GLOBAL(__kvmppc_vcpu_run) | ||
449 | stwu r1, -HOST_STACK_SIZE(r1) | ||
450 | PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */ | ||
451 | |||
452 | /* Save host state to stack. */ | ||
453 | PPC_STL r3, HOST_RUN(r1) | ||
454 | mflr r3 | ||
455 | mfcr r5 | ||
456 | PPC_STL r3, HOST_STACK_LR(r1) | ||
457 | |||
458 | stw r5, HOST_CR(r1) | ||
459 | |||
460 | /* Save host non-volatile register state to stack. */ | ||
461 | PPC_STL r14, HOST_NV_GPR(r14)(r1) | ||
462 | PPC_STL r15, HOST_NV_GPR(r15)(r1) | ||
463 | PPC_STL r16, HOST_NV_GPR(r16)(r1) | ||
464 | PPC_STL r17, HOST_NV_GPR(r17)(r1) | ||
465 | PPC_STL r18, HOST_NV_GPR(r18)(r1) | ||
466 | PPC_STL r19, HOST_NV_GPR(r19)(r1) | ||
467 | PPC_STL r20, HOST_NV_GPR(r20)(r1) | ||
468 | PPC_STL r21, HOST_NV_GPR(r21)(r1) | ||
469 | PPC_STL r22, HOST_NV_GPR(r22)(r1) | ||
470 | PPC_STL r23, HOST_NV_GPR(r23)(r1) | ||
471 | PPC_STL r24, HOST_NV_GPR(r24)(r1) | ||
472 | PPC_STL r25, HOST_NV_GPR(r25)(r1) | ||
473 | PPC_STL r26, HOST_NV_GPR(r26)(r1) | ||
474 | PPC_STL r27, HOST_NV_GPR(r27)(r1) | ||
475 | PPC_STL r28, HOST_NV_GPR(r28)(r1) | ||
476 | PPC_STL r29, HOST_NV_GPR(r29)(r1) | ||
477 | PPC_STL r30, HOST_NV_GPR(r30)(r1) | ||
478 | PPC_STL r31, HOST_NV_GPR(r31)(r1) | ||
479 | |||
480 | /* Load guest non-volatiles. */ | ||
481 | PPC_LL r14, VCPU_GPR(r14)(r4) | ||
482 | PPC_LL r15, VCPU_GPR(r15)(r4) | ||
483 | PPC_LL r16, VCPU_GPR(r16)(r4) | ||
484 | PPC_LL r17, VCPU_GPR(r17)(r4) | ||
485 | PPC_LL r18, VCPU_GPR(r18)(r4) | ||
486 | PPC_LL r19, VCPU_GPR(r19)(r4) | ||
487 | PPC_LL r20, VCPU_GPR(r20)(r4) | ||
488 | PPC_LL r21, VCPU_GPR(r21)(r4) | ||
489 | PPC_LL r22, VCPU_GPR(r22)(r4) | ||
490 | PPC_LL r23, VCPU_GPR(r23)(r4) | ||
491 | PPC_LL r24, VCPU_GPR(r24)(r4) | ||
492 | PPC_LL r25, VCPU_GPR(r25)(r4) | ||
493 | PPC_LL r26, VCPU_GPR(r26)(r4) | ||
494 | PPC_LL r27, VCPU_GPR(r27)(r4) | ||
495 | PPC_LL r28, VCPU_GPR(r28)(r4) | ||
496 | PPC_LL r29, VCPU_GPR(r29)(r4) | ||
497 | PPC_LL r30, VCPU_GPR(r30)(r4) | ||
498 | PPC_LL r31, VCPU_GPR(r31)(r4) | ||
499 | |||
500 | |||
501 | lightweight_exit: | ||
502 | PPC_STL r2, HOST_R2(r1) | ||
503 | |||
504 | mfspr r3, SPRN_PID | ||
505 | stw r3, VCPU_HOST_PID(r4) | ||
506 | lwz r3, VCPU_GUEST_PID(r4) | ||
507 | mtspr SPRN_PID, r3 | ||
508 | |||
509 | PPC_LL r11, VCPU_SHARED(r4) | ||
510 | /* Disable MAS register updates via exception */ | ||
511 | mfspr r3, SPRN_EPCR | ||
512 | oris r3, r3, SPRN_EPCR_DMIUH@h | ||
513 | mtspr SPRN_EPCR, r3 | ||
514 | isync | ||
515 | /* Save host mas4 and mas6 and load guest MAS registers */ | ||
516 | mfspr r3, SPRN_MAS4 | ||
517 | stw r3, VCPU_HOST_MAS4(r4) | ||
518 | mfspr r3, SPRN_MAS6 | ||
519 | stw r3, VCPU_HOST_MAS6(r4) | ||
520 | lwz r3, VCPU_SHARED_MAS0(r11) | ||
521 | lwz r5, VCPU_SHARED_MAS1(r11) | ||
522 | PPC_LD(r6, VCPU_SHARED_MAS2, r11) | ||
523 | lwz r7, VCPU_SHARED_MAS7_3+4(r11) | ||
524 | lwz r8, VCPU_SHARED_MAS4(r11) | ||
525 | mtspr SPRN_MAS0, r3 | ||
526 | mtspr SPRN_MAS1, r5 | ||
527 | mtspr SPRN_MAS2, r6 | ||
528 | mtspr SPRN_MAS3, r7 | ||
529 | mtspr SPRN_MAS4, r8 | ||
530 | lwz r3, VCPU_SHARED_MAS6(r11) | ||
531 | lwz r5, VCPU_SHARED_MAS7_3+0(r11) | ||
532 | mtspr SPRN_MAS6, r3 | ||
533 | mtspr SPRN_MAS7, r5 | ||
534 | |||
535 | /* | ||
536 | * Host interrupt handlers may have clobbered these guest-readable | ||
537 | * SPRGs, so we need to reload them here with the guest's values. | ||
538 | */ | ||
539 | lwz r3, VCPU_VRSAVE(r4) | ||
540 | PPC_LD(r5, VCPU_SHARED_SPRG4, r11) | ||
541 | mtspr SPRN_VRSAVE, r3 | ||
542 | PPC_LD(r6, VCPU_SHARED_SPRG5, r11) | ||
543 | mtspr SPRN_SPRG4W, r5 | ||
544 | PPC_LD(r7, VCPU_SHARED_SPRG6, r11) | ||
545 | mtspr SPRN_SPRG5W, r6 | ||
546 | PPC_LD(r8, VCPU_SHARED_SPRG7, r11) | ||
547 | mtspr SPRN_SPRG6W, r7 | ||
548 | mtspr SPRN_SPRG7W, r8 | ||
549 | |||
550 | /* Load some guest volatiles. */ | ||
551 | PPC_LL r3, VCPU_LR(r4) | ||
552 | PPC_LL r5, VCPU_XER(r4) | ||
553 | PPC_LL r6, VCPU_CTR(r4) | ||
554 | lwz r7, VCPU_CR(r4) | ||
555 | PPC_LL r8, VCPU_PC(r4) | ||
556 | PPC_LD(r9, VCPU_SHARED_MSR, r11) | ||
557 | PPC_LL r0, VCPU_GPR(r0)(r4) | ||
558 | PPC_LL r1, VCPU_GPR(r1)(r4) | ||
559 | PPC_LL r2, VCPU_GPR(r2)(r4) | ||
560 | PPC_LL r10, VCPU_GPR(r10)(r4) | ||
561 | PPC_LL r11, VCPU_GPR(r11)(r4) | ||
562 | PPC_LL r12, VCPU_GPR(r12)(r4) | ||
563 | PPC_LL r13, VCPU_GPR(r13)(r4) | ||
564 | mtlr r3 | ||
565 | mtxer r5 | ||
566 | mtctr r6 | ||
567 | mtsrr0 r8 | ||
568 | mtsrr1 r9 | ||
569 | |||
570 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
571 | /* save enter time */ | ||
572 | 1: | ||
573 | mfspr r6, SPRN_TBRU | ||
574 | mfspr r9, SPRN_TBRL | ||
575 | mfspr r8, SPRN_TBRU | ||
576 | cmpw r8, r6 | ||
577 | stw r9, VCPU_TIMING_LAST_ENTER_TBL(r4) | ||
578 | bne 1b | ||
579 | stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4) | ||
580 | #endif | ||
581 | |||
582 | /* | ||
583 | * Don't execute any instruction which can change CR after | ||
584 | * below instruction. | ||
585 | */ | ||
586 | mtcr r7 | ||
587 | |||
588 | /* Finish loading guest volatiles and jump to guest. */ | ||
589 | PPC_LL r5, VCPU_GPR(r5)(r4) | ||
590 | PPC_LL r6, VCPU_GPR(r6)(r4) | ||
591 | PPC_LL r7, VCPU_GPR(r7)(r4) | ||
592 | PPC_LL r8, VCPU_GPR(r8)(r4) | ||
593 | PPC_LL r9, VCPU_GPR(r9)(r4) | ||
594 | |||
595 | PPC_LL r3, VCPU_GPR(r3)(r4) | ||
596 | PPC_LL r4, VCPU_GPR(r4)(r4) | ||
597 | rfi | ||
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index ddcd896fa2ff..b479ed77c515 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c | |||
@@ -20,11 +20,282 @@ | |||
20 | #include <asm/reg.h> | 20 | #include <asm/reg.h> |
21 | #include <asm/cputable.h> | 21 | #include <asm/cputable.h> |
22 | #include <asm/tlbflush.h> | 22 | #include <asm/tlbflush.h> |
23 | #include <asm/kvm_e500.h> | ||
24 | #include <asm/kvm_ppc.h> | 23 | #include <asm/kvm_ppc.h> |
25 | 24 | ||
25 | #include "../mm/mmu_decl.h" | ||
26 | #include "booke.h" | 26 | #include "booke.h" |
27 | #include "e500_tlb.h" | 27 | #include "e500.h" |
28 | |||
29 | struct id { | ||
30 | unsigned long val; | ||
31 | struct id **pentry; | ||
32 | }; | ||
33 | |||
34 | #define NUM_TIDS 256 | ||
35 | |||
36 | /* | ||
37 | * This table provide mappings from: | ||
38 | * (guestAS,guestTID,guestPR) --> ID of physical cpu | ||
39 | * guestAS [0..1] | ||
40 | * guestTID [0..255] | ||
41 | * guestPR [0..1] | ||
42 | * ID [1..255] | ||
43 | * Each vcpu keeps one vcpu_id_table. | ||
44 | */ | ||
45 | struct vcpu_id_table { | ||
46 | struct id id[2][NUM_TIDS][2]; | ||
47 | }; | ||
48 | |||
49 | /* | ||
50 | * This table provide reversed mappings of vcpu_id_table: | ||
51 | * ID --> address of vcpu_id_table item. | ||
52 | * Each physical core has one pcpu_id_table. | ||
53 | */ | ||
54 | struct pcpu_id_table { | ||
55 | struct id *entry[NUM_TIDS]; | ||
56 | }; | ||
57 | |||
58 | static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids); | ||
59 | |||
60 | /* This variable keeps last used shadow ID on local core. | ||
61 | * The valid range of shadow ID is [1..255] */ | ||
62 | static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid); | ||
63 | |||
64 | /* | ||
65 | * Allocate a free shadow id and setup a valid sid mapping in given entry. | ||
66 | * A mapping is only valid when vcpu_id_table and pcpu_id_table are match. | ||
67 | * | ||
68 | * The caller must have preemption disabled, and keep it that way until | ||
69 | * it has finished with the returned shadow id (either written into the | ||
70 | * TLB or arch.shadow_pid, or discarded). | ||
71 | */ | ||
72 | static inline int local_sid_setup_one(struct id *entry) | ||
73 | { | ||
74 | unsigned long sid; | ||
75 | int ret = -1; | ||
76 | |||
77 | sid = ++(__get_cpu_var(pcpu_last_used_sid)); | ||
78 | if (sid < NUM_TIDS) { | ||
79 | __get_cpu_var(pcpu_sids).entry[sid] = entry; | ||
80 | entry->val = sid; | ||
81 | entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid]; | ||
82 | ret = sid; | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * If sid == NUM_TIDS, we've run out of sids. We return -1, and | ||
87 | * the caller will invalidate everything and start over. | ||
88 | * | ||
89 | * sid > NUM_TIDS indicates a race, which we disable preemption to | ||
90 | * avoid. | ||
91 | */ | ||
92 | WARN_ON(sid > NUM_TIDS); | ||
93 | |||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * Check if given entry contain a valid shadow id mapping. | ||
99 | * An ID mapping is considered valid only if | ||
100 | * both vcpu and pcpu know this mapping. | ||
101 | * | ||
102 | * The caller must have preemption disabled, and keep it that way until | ||
103 | * it has finished with the returned shadow id (either written into the | ||
104 | * TLB or arch.shadow_pid, or discarded). | ||
105 | */ | ||
106 | static inline int local_sid_lookup(struct id *entry) | ||
107 | { | ||
108 | if (entry && entry->val != 0 && | ||
109 | __get_cpu_var(pcpu_sids).entry[entry->val] == entry && | ||
110 | entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val]) | ||
111 | return entry->val; | ||
112 | return -1; | ||
113 | } | ||
114 | |||
115 | /* Invalidate all id mappings on local core -- call with preempt disabled */ | ||
116 | static inline void local_sid_destroy_all(void) | ||
117 | { | ||
118 | __get_cpu_var(pcpu_last_used_sid) = 0; | ||
119 | memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids))); | ||
120 | } | ||
121 | |||
122 | static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
123 | { | ||
124 | vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL); | ||
125 | return vcpu_e500->idt; | ||
126 | } | ||
127 | |||
128 | static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
129 | { | ||
130 | kfree(vcpu_e500->idt); | ||
131 | vcpu_e500->idt = NULL; | ||
132 | } | ||
133 | |||
134 | /* Map guest pid to shadow. | ||
135 | * We use PID to keep shadow of current guest non-zero PID, | ||
136 | * and use PID1 to keep shadow of guest zero PID. | ||
137 | * So that guest tlbe with TID=0 can be accessed at any time */ | ||
138 | static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
139 | { | ||
140 | preempt_disable(); | ||
141 | vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, | ||
142 | get_cur_as(&vcpu_e500->vcpu), | ||
143 | get_cur_pid(&vcpu_e500->vcpu), | ||
144 | get_cur_pr(&vcpu_e500->vcpu), 1); | ||
145 | vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500, | ||
146 | get_cur_as(&vcpu_e500->vcpu), 0, | ||
147 | get_cur_pr(&vcpu_e500->vcpu), 1); | ||
148 | preempt_enable(); | ||
149 | } | ||
150 | |||
151 | /* Invalidate all mappings on vcpu */ | ||
152 | static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
153 | { | ||
154 | memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table)); | ||
155 | |||
156 | /* Update shadow pid when mappings are changed */ | ||
157 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
158 | } | ||
159 | |||
160 | /* Invalidate one ID mapping on vcpu */ | ||
161 | static inline void kvmppc_e500_id_table_reset_one( | ||
162 | struct kvmppc_vcpu_e500 *vcpu_e500, | ||
163 | int as, int pid, int pr) | ||
164 | { | ||
165 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
166 | |||
167 | BUG_ON(as >= 2); | ||
168 | BUG_ON(pid >= NUM_TIDS); | ||
169 | BUG_ON(pr >= 2); | ||
170 | |||
171 | idt->id[as][pid][pr].val = 0; | ||
172 | idt->id[as][pid][pr].pentry = NULL; | ||
173 | |||
174 | /* Update shadow pid when mappings are changed */ | ||
175 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * Map guest (vcpu,AS,ID,PR) to physical core shadow id. | ||
180 | * This function first lookup if a valid mapping exists, | ||
181 | * if not, then creates a new one. | ||
182 | * | ||
183 | * The caller must have preemption disabled, and keep it that way until | ||
184 | * it has finished with the returned shadow id (either written into the | ||
185 | * TLB or arch.shadow_pid, or discarded). | ||
186 | */ | ||
187 | unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
188 | unsigned int as, unsigned int gid, | ||
189 | unsigned int pr, int avoid_recursion) | ||
190 | { | ||
191 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
192 | int sid; | ||
193 | |||
194 | BUG_ON(as >= 2); | ||
195 | BUG_ON(gid >= NUM_TIDS); | ||
196 | BUG_ON(pr >= 2); | ||
197 | |||
198 | sid = local_sid_lookup(&idt->id[as][gid][pr]); | ||
199 | |||
200 | while (sid <= 0) { | ||
201 | /* No mapping yet */ | ||
202 | sid = local_sid_setup_one(&idt->id[as][gid][pr]); | ||
203 | if (sid <= 0) { | ||
204 | _tlbil_all(); | ||
205 | local_sid_destroy_all(); | ||
206 | } | ||
207 | |||
208 | /* Update shadow pid when mappings are changed */ | ||
209 | if (!avoid_recursion) | ||
210 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
211 | } | ||
212 | |||
213 | return sid; | ||
214 | } | ||
215 | |||
216 | unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu, | ||
217 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
218 | { | ||
219 | return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe), | ||
220 | get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0); | ||
221 | } | ||
222 | |||
223 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) | ||
224 | { | ||
225 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
226 | |||
227 | if (vcpu->arch.pid != pid) { | ||
228 | vcpu_e500->pid[0] = vcpu->arch.pid = pid; | ||
229 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
230 | } | ||
231 | } | ||
232 | |||
233 | /* gtlbe must not be mapped by more than one host tlbe */ | ||
234 | void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
235 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
236 | { | ||
237 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
238 | unsigned int pr, tid, ts, pid; | ||
239 | u32 val, eaddr; | ||
240 | unsigned long flags; | ||
241 | |||
242 | ts = get_tlb_ts(gtlbe); | ||
243 | tid = get_tlb_tid(gtlbe); | ||
244 | |||
245 | preempt_disable(); | ||
246 | |||
247 | /* One guest ID may be mapped to two shadow IDs */ | ||
248 | for (pr = 0; pr < 2; pr++) { | ||
249 | /* | ||
250 | * The shadow PID can have a valid mapping on at most one | ||
251 | * host CPU. In the common case, it will be valid on this | ||
252 | * CPU, in which case we do a local invalidation of the | ||
253 | * specific address. | ||
254 | * | ||
255 | * If the shadow PID is not valid on the current host CPU, | ||
256 | * we invalidate the entire shadow PID. | ||
257 | */ | ||
258 | pid = local_sid_lookup(&idt->id[ts][tid][pr]); | ||
259 | if (pid <= 0) { | ||
260 | kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr); | ||
261 | continue; | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * The guest is invalidating a 4K entry which is in a PID | ||
266 | * that has a valid shadow mapping on this host CPU. We | ||
267 | * search host TLB to invalidate it's shadow TLB entry, | ||
268 | * similar to __tlbil_va except that we need to look in AS1. | ||
269 | */ | ||
270 | val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS; | ||
271 | eaddr = get_tlb_eaddr(gtlbe); | ||
272 | |||
273 | local_irq_save(flags); | ||
274 | |||
275 | mtspr(SPRN_MAS6, val); | ||
276 | asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr)); | ||
277 | val = mfspr(SPRN_MAS1); | ||
278 | if (val & MAS1_VALID) { | ||
279 | mtspr(SPRN_MAS1, val & ~MAS1_VALID); | ||
280 | asm volatile("tlbwe"); | ||
281 | } | ||
282 | |||
283 | local_irq_restore(flags); | ||
284 | } | ||
285 | |||
286 | preempt_enable(); | ||
287 | } | ||
288 | |||
289 | void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
290 | { | ||
291 | kvmppc_e500_id_table_reset_all(vcpu_e500); | ||
292 | } | ||
293 | |||
294 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | ||
295 | { | ||
296 | /* Recalc shadow pid since MSR changes */ | ||
297 | kvmppc_e500_recalc_shadow_pid(to_e500(vcpu)); | ||
298 | } | ||
28 | 299 | ||
29 | void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) | 300 | void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) |
30 | { | 301 | { |
@@ -36,17 +307,20 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |||
36 | 307 | ||
37 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 308 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
38 | { | 309 | { |
39 | kvmppc_e500_tlb_load(vcpu, cpu); | 310 | kvmppc_booke_vcpu_load(vcpu, cpu); |
311 | |||
312 | /* Shadow PID may be expired on local core */ | ||
313 | kvmppc_e500_recalc_shadow_pid(to_e500(vcpu)); | ||
40 | } | 314 | } |
41 | 315 | ||
42 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 316 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
43 | { | 317 | { |
44 | kvmppc_e500_tlb_put(vcpu); | ||
45 | |||
46 | #ifdef CONFIG_SPE | 318 | #ifdef CONFIG_SPE |
47 | if (vcpu->arch.shadow_msr & MSR_SPE) | 319 | if (vcpu->arch.shadow_msr & MSR_SPE) |
48 | kvmppc_vcpu_disable_spe(vcpu); | 320 | kvmppc_vcpu_disable_spe(vcpu); |
49 | #endif | 321 | #endif |
322 | |||
323 | kvmppc_booke_vcpu_put(vcpu); | ||
50 | } | 324 | } |
51 | 325 | ||
52 | int kvmppc_core_check_processor_compat(void) | 326 | int kvmppc_core_check_processor_compat(void) |
@@ -61,6 +335,23 @@ int kvmppc_core_check_processor_compat(void) | |||
61 | return r; | 335 | return r; |
62 | } | 336 | } |
63 | 337 | ||
338 | static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
339 | { | ||
340 | struct kvm_book3e_206_tlb_entry *tlbe; | ||
341 | |||
342 | /* Insert large initial mapping for guest. */ | ||
343 | tlbe = get_entry(vcpu_e500, 1, 0); | ||
344 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M); | ||
345 | tlbe->mas2 = 0; | ||
346 | tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK; | ||
347 | |||
348 | /* 4K map for serial output. Used by kernel wrapper. */ | ||
349 | tlbe = get_entry(vcpu_e500, 1, 1); | ||
350 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K); | ||
351 | tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; | ||
352 | tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; | ||
353 | } | ||
354 | |||
64 | int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | 355 | int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) |
65 | { | 356 | { |
66 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 357 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
@@ -76,32 +367,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | |||
76 | return 0; | 367 | return 0; |
77 | } | 368 | } |
78 | 369 | ||
79 | /* 'linear_address' is actually an encoding of AS|PID|EADDR . */ | ||
80 | int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, | ||
81 | struct kvm_translation *tr) | ||
82 | { | ||
83 | int index; | ||
84 | gva_t eaddr; | ||
85 | u8 pid; | ||
86 | u8 as; | ||
87 | |||
88 | eaddr = tr->linear_address; | ||
89 | pid = (tr->linear_address >> 32) & 0xff; | ||
90 | as = (tr->linear_address >> 40) & 0x1; | ||
91 | |||
92 | index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as); | ||
93 | if (index < 0) { | ||
94 | tr->valid = 0; | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); | ||
99 | /* XXX what does "writeable" and "usermode" even mean? */ | ||
100 | tr->valid = 1; | ||
101 | |||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 370 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) |
106 | { | 371 | { |
107 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 372 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
@@ -115,19 +380,6 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
115 | sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; | 380 | sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; |
116 | sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; | 381 | sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; |
117 | 382 | ||
118 | sregs->u.e.mas0 = vcpu->arch.shared->mas0; | ||
119 | sregs->u.e.mas1 = vcpu->arch.shared->mas1; | ||
120 | sregs->u.e.mas2 = vcpu->arch.shared->mas2; | ||
121 | sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3; | ||
122 | sregs->u.e.mas4 = vcpu->arch.shared->mas4; | ||
123 | sregs->u.e.mas6 = vcpu->arch.shared->mas6; | ||
124 | |||
125 | sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG); | ||
126 | sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg; | ||
127 | sregs->u.e.tlbcfg[1] = vcpu_e500->tlb1cfg; | ||
128 | sregs->u.e.tlbcfg[2] = 0; | ||
129 | sregs->u.e.tlbcfg[3] = 0; | ||
130 | |||
131 | sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; | 383 | sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; |
132 | sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; | 384 | sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; |
133 | sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; | 385 | sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; |
@@ -135,11 +387,13 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
135 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; | 387 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; |
136 | 388 | ||
137 | kvmppc_get_sregs_ivor(vcpu, sregs); | 389 | kvmppc_get_sregs_ivor(vcpu, sregs); |
390 | kvmppc_get_sregs_e500_tlb(vcpu, sregs); | ||
138 | } | 391 | } |
139 | 392 | ||
140 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 393 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) |
141 | { | 394 | { |
142 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 395 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
396 | int ret; | ||
143 | 397 | ||
144 | if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { | 398 | if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { |
145 | vcpu_e500->svr = sregs->u.e.impl.fsl.svr; | 399 | vcpu_e500->svr = sregs->u.e.impl.fsl.svr; |
@@ -147,14 +401,9 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
147 | vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; | 401 | vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; |
148 | } | 402 | } |
149 | 403 | ||
150 | if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { | 404 | ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs); |
151 | vcpu->arch.shared->mas0 = sregs->u.e.mas0; | 405 | if (ret < 0) |
152 | vcpu->arch.shared->mas1 = sregs->u.e.mas1; | 406 | return ret; |
153 | vcpu->arch.shared->mas2 = sregs->u.e.mas2; | ||
154 | vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3; | ||
155 | vcpu->arch.shared->mas4 = sregs->u.e.mas4; | ||
156 | vcpu->arch.shared->mas6 = sregs->u.e.mas6; | ||
157 | } | ||
158 | 407 | ||
159 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) | 408 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) |
160 | return 0; | 409 | return 0; |
@@ -193,9 +442,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
193 | if (err) | 442 | if (err) |
194 | goto free_vcpu; | 443 | goto free_vcpu; |
195 | 444 | ||
445 | if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) | ||
446 | goto uninit_vcpu; | ||
447 | |||
196 | err = kvmppc_e500_tlb_init(vcpu_e500); | 448 | err = kvmppc_e500_tlb_init(vcpu_e500); |
197 | if (err) | 449 | if (err) |
198 | goto uninit_vcpu; | 450 | goto uninit_id; |
199 | 451 | ||
200 | vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); | 452 | vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); |
201 | if (!vcpu->arch.shared) | 453 | if (!vcpu->arch.shared) |
@@ -205,6 +457,8 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
205 | 457 | ||
206 | uninit_tlb: | 458 | uninit_tlb: |
207 | kvmppc_e500_tlb_uninit(vcpu_e500); | 459 | kvmppc_e500_tlb_uninit(vcpu_e500); |
460 | uninit_id: | ||
461 | kvmppc_e500_id_table_free(vcpu_e500); | ||
208 | uninit_vcpu: | 462 | uninit_vcpu: |
209 | kvm_vcpu_uninit(vcpu); | 463 | kvm_vcpu_uninit(vcpu); |
210 | free_vcpu: | 464 | free_vcpu: |
@@ -218,11 +472,21 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
218 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 472 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
219 | 473 | ||
220 | free_page((unsigned long)vcpu->arch.shared); | 474 | free_page((unsigned long)vcpu->arch.shared); |
221 | kvm_vcpu_uninit(vcpu); | ||
222 | kvmppc_e500_tlb_uninit(vcpu_e500); | 475 | kvmppc_e500_tlb_uninit(vcpu_e500); |
476 | kvmppc_e500_id_table_free(vcpu_e500); | ||
477 | kvm_vcpu_uninit(vcpu); | ||
223 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); | 478 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); |
224 | } | 479 | } |
225 | 480 | ||
481 | int kvmppc_core_init_vm(struct kvm *kvm) | ||
482 | { | ||
483 | return 0; | ||
484 | } | ||
485 | |||
486 | void kvmppc_core_destroy_vm(struct kvm *kvm) | ||
487 | { | ||
488 | } | ||
489 | |||
226 | static int __init kvmppc_e500_init(void) | 490 | static int __init kvmppc_e500_init(void) |
227 | { | 491 | { |
228 | int r, i; | 492 | int r, i; |
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h new file mode 100644 index 000000000000..aa8b81428bf4 --- /dev/null +++ b/arch/powerpc/kvm/e500.h | |||
@@ -0,0 +1,306 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Author: Yu Liu <yu.liu@freescale.com> | ||
5 | * Scott Wood <scottwood@freescale.com> | ||
6 | * Ashish Kalra <ashish.kalra@freescale.com> | ||
7 | * Varun Sethi <varun.sethi@freescale.com> | ||
8 | * | ||
9 | * Description: | ||
10 | * This file is based on arch/powerpc/kvm/44x_tlb.h and | ||
11 | * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <hollisb@us.ibm.com>, | ||
12 | * Copyright IBM Corp. 2007-2008 | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License, version 2, as | ||
16 | * published by the Free Software Foundation. | ||
17 | */ | ||
18 | |||
19 | #ifndef KVM_E500_H | ||
20 | #define KVM_E500_H | ||
21 | |||
22 | #include <linux/kvm_host.h> | ||
23 | #include <asm/mmu-book3e.h> | ||
24 | #include <asm/tlb.h> | ||
25 | |||
26 | #define E500_PID_NUM 3 | ||
27 | #define E500_TLB_NUM 2 | ||
28 | |||
29 | #define E500_TLB_VALID 1 | ||
30 | #define E500_TLB_DIRTY 2 | ||
31 | #define E500_TLB_BITMAP 4 | ||
32 | |||
33 | struct tlbe_ref { | ||
34 | pfn_t pfn; | ||
35 | unsigned int flags; /* E500_TLB_* */ | ||
36 | }; | ||
37 | |||
38 | struct tlbe_priv { | ||
39 | struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ | ||
40 | }; | ||
41 | |||
42 | #ifdef CONFIG_KVM_E500V2 | ||
43 | struct vcpu_id_table; | ||
44 | #endif | ||
45 | |||
46 | struct kvmppc_e500_tlb_params { | ||
47 | int entries, ways, sets; | ||
48 | }; | ||
49 | |||
50 | struct kvmppc_vcpu_e500 { | ||
51 | struct kvm_vcpu vcpu; | ||
52 | |||
53 | /* Unmodified copy of the guest's TLB -- shared with host userspace. */ | ||
54 | struct kvm_book3e_206_tlb_entry *gtlb_arch; | ||
55 | |||
56 | /* Starting entry number in gtlb_arch[] */ | ||
57 | int gtlb_offset[E500_TLB_NUM]; | ||
58 | |||
59 | /* KVM internal information associated with each guest TLB entry */ | ||
60 | struct tlbe_priv *gtlb_priv[E500_TLB_NUM]; | ||
61 | |||
62 | struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM]; | ||
63 | |||
64 | unsigned int gtlb_nv[E500_TLB_NUM]; | ||
65 | |||
66 | /* | ||
67 | * information associated with each host TLB entry -- | ||
68 | * TLB1 only for now. If/when guest TLB1 entries can be | ||
69 | * mapped with host TLB0, this will be used for that too. | ||
70 | * | ||
71 | * We don't want to use this for guest TLB0 because then we'd | ||
72 | * have the overhead of doing the translation again even if | ||
73 | * the entry is still in the guest TLB (e.g. we swapped out | ||
74 | * and back, and our host TLB entries got evicted). | ||
75 | */ | ||
76 | struct tlbe_ref *tlb_refs[E500_TLB_NUM]; | ||
77 | unsigned int host_tlb1_nv; | ||
78 | |||
79 | u32 svr; | ||
80 | u32 l1csr0; | ||
81 | u32 l1csr1; | ||
82 | u32 hid0; | ||
83 | u32 hid1; | ||
84 | u64 mcar; | ||
85 | |||
86 | struct page **shared_tlb_pages; | ||
87 | int num_shared_tlb_pages; | ||
88 | |||
89 | u64 *g2h_tlb1_map; | ||
90 | unsigned int *h2g_tlb1_rmap; | ||
91 | |||
92 | /* Minimum and maximum address mapped my TLB1 */ | ||
93 | unsigned long tlb1_min_eaddr; | ||
94 | unsigned long tlb1_max_eaddr; | ||
95 | |||
96 | #ifdef CONFIG_KVM_E500V2 | ||
97 | u32 pid[E500_PID_NUM]; | ||
98 | |||
99 | /* vcpu id table */ | ||
100 | struct vcpu_id_table *idt; | ||
101 | #endif | ||
102 | }; | ||
103 | |||
104 | static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu) | ||
105 | { | ||
106 | return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu); | ||
107 | } | ||
108 | |||
109 | |||
110 | /* This geometry is the legacy default -- can be overridden by userspace */ | ||
111 | #define KVM_E500_TLB0_WAY_SIZE 128 | ||
112 | #define KVM_E500_TLB0_WAY_NUM 2 | ||
113 | |||
114 | #define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM) | ||
115 | #define KVM_E500_TLB1_SIZE 16 | ||
116 | |||
117 | #define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF)) | ||
118 | #define tlbsel_of(index) ((index) >> 16) | ||
119 | #define esel_of(index) ((index) & 0xFFFF) | ||
120 | |||
121 | #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW) | ||
122 | #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW) | ||
123 | #define MAS2_ATTRIB_MASK \ | ||
124 | (MAS2_X0 | MAS2_X1) | ||
125 | #define MAS3_ATTRIB_MASK \ | ||
126 | (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \ | ||
127 | | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) | ||
128 | |||
129 | int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
130 | ulong value); | ||
131 | int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu); | ||
132 | int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu); | ||
133 | int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb); | ||
134 | int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb); | ||
135 | int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb); | ||
136 | int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500); | ||
137 | void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500); | ||
138 | |||
139 | void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | ||
140 | int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | ||
141 | |||
142 | |||
143 | #ifdef CONFIG_KVM_E500V2 | ||
144 | unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
145 | unsigned int as, unsigned int gid, | ||
146 | unsigned int pr, int avoid_recursion); | ||
147 | #endif | ||
148 | |||
149 | /* TLB helper functions */ | ||
150 | static inline unsigned int | ||
151 | get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
152 | { | ||
153 | return (tlbe->mas1 >> 7) & 0x1f; | ||
154 | } | ||
155 | |||
156 | static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
157 | { | ||
158 | return tlbe->mas2 & 0xfffff000; | ||
159 | } | ||
160 | |||
161 | static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
162 | { | ||
163 | unsigned int pgsize = get_tlb_size(tlbe); | ||
164 | return 1ULL << 10 << pgsize; | ||
165 | } | ||
166 | |||
167 | static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
168 | { | ||
169 | u64 bytes = get_tlb_bytes(tlbe); | ||
170 | return get_tlb_eaddr(tlbe) + bytes - 1; | ||
171 | } | ||
172 | |||
173 | static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
174 | { | ||
175 | return tlbe->mas7_3 & ~0xfffULL; | ||
176 | } | ||
177 | |||
178 | static inline unsigned int | ||
179 | get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
180 | { | ||
181 | return (tlbe->mas1 >> 16) & 0xff; | ||
182 | } | ||
183 | |||
184 | static inline unsigned int | ||
185 | get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
186 | { | ||
187 | return (tlbe->mas1 >> 12) & 0x1; | ||
188 | } | ||
189 | |||
190 | static inline unsigned int | ||
191 | get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
192 | { | ||
193 | return (tlbe->mas1 >> 31) & 0x1; | ||
194 | } | ||
195 | |||
196 | static inline unsigned int | ||
197 | get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
198 | { | ||
199 | return (tlbe->mas1 >> 30) & 0x1; | ||
200 | } | ||
201 | |||
202 | static inline unsigned int | ||
203 | get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
204 | { | ||
205 | return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; | ||
206 | } | ||
207 | |||
208 | static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu) | ||
209 | { | ||
210 | return vcpu->arch.pid & 0xff; | ||
211 | } | ||
212 | |||
213 | static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu) | ||
214 | { | ||
215 | return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS)); | ||
216 | } | ||
217 | |||
218 | static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu) | ||
219 | { | ||
220 | return !!(vcpu->arch.shared->msr & MSR_PR); | ||
221 | } | ||
222 | |||
223 | static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu) | ||
224 | { | ||
225 | return (vcpu->arch.shared->mas6 >> 16) & 0xff; | ||
226 | } | ||
227 | |||
228 | static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu) | ||
229 | { | ||
230 | return vcpu->arch.shared->mas6 & 0x1; | ||
231 | } | ||
232 | |||
233 | static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu) | ||
234 | { | ||
235 | /* | ||
236 | * Manual says that tlbsel has 2 bits wide. | ||
237 | * Since we only have two TLBs, only lower bit is used. | ||
238 | */ | ||
239 | return (vcpu->arch.shared->mas0 >> 28) & 0x1; | ||
240 | } | ||
241 | |||
242 | static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu) | ||
243 | { | ||
244 | return vcpu->arch.shared->mas0 & 0xfff; | ||
245 | } | ||
246 | |||
247 | static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu) | ||
248 | { | ||
249 | return (vcpu->arch.shared->mas0 >> 16) & 0xfff; | ||
250 | } | ||
251 | |||
252 | static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | ||
253 | const struct kvm_book3e_206_tlb_entry *tlbe) | ||
254 | { | ||
255 | gpa_t gpa; | ||
256 | |||
257 | if (!get_tlb_v(tlbe)) | ||
258 | return 0; | ||
259 | |||
260 | #ifndef CONFIG_KVM_BOOKE_HV | ||
261 | /* Does it match current guest AS? */ | ||
262 | /* XXX what about IS != DS? */ | ||
263 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS)) | ||
264 | return 0; | ||
265 | #endif | ||
266 | |||
267 | gpa = get_tlb_raddr(tlbe); | ||
268 | if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) | ||
269 | /* Mapping is not for RAM. */ | ||
270 | return 0; | ||
271 | |||
272 | return 1; | ||
273 | } | ||
274 | |||
275 | static inline struct kvm_book3e_206_tlb_entry *get_entry( | ||
276 | struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry) | ||
277 | { | ||
278 | int offset = vcpu_e500->gtlb_offset[tlbsel]; | ||
279 | return &vcpu_e500->gtlb_arch[offset + entry]; | ||
280 | } | ||
281 | |||
282 | void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
283 | struct kvm_book3e_206_tlb_entry *gtlbe); | ||
284 | void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500); | ||
285 | |||
286 | #ifdef CONFIG_KVM_BOOKE_HV | ||
287 | #define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe) | ||
288 | #define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu) | ||
289 | #define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS) | ||
290 | #else | ||
291 | unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu, | ||
292 | struct kvm_book3e_206_tlb_entry *gtlbe); | ||
293 | |||
294 | static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu) | ||
295 | { | ||
296 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
297 | unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf; | ||
298 | |||
299 | return vcpu_e500->pid[tidseld]; | ||
300 | } | ||
301 | |||
302 | /* Force TS=1 for all guest mappings. */ | ||
303 | #define get_tlb_sts(gtlbe) (MAS1_TS) | ||
304 | #endif /* !BOOKE_HV */ | ||
305 | |||
306 | #endif /* KVM_E500_H */ | ||
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 6d0b2bd54fb0..8b99e076dc81 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c | |||
@@ -14,27 +14,96 @@ | |||
14 | 14 | ||
15 | #include <asm/kvm_ppc.h> | 15 | #include <asm/kvm_ppc.h> |
16 | #include <asm/disassemble.h> | 16 | #include <asm/disassemble.h> |
17 | #include <asm/kvm_e500.h> | 17 | #include <asm/dbell.h> |
18 | 18 | ||
19 | #include "booke.h" | 19 | #include "booke.h" |
20 | #include "e500_tlb.h" | 20 | #include "e500.h" |
21 | 21 | ||
22 | #define XOP_MSGSND 206 | ||
23 | #define XOP_MSGCLR 238 | ||
22 | #define XOP_TLBIVAX 786 | 24 | #define XOP_TLBIVAX 786 |
23 | #define XOP_TLBSX 914 | 25 | #define XOP_TLBSX 914 |
24 | #define XOP_TLBRE 946 | 26 | #define XOP_TLBRE 946 |
25 | #define XOP_TLBWE 978 | 27 | #define XOP_TLBWE 978 |
28 | #define XOP_TLBILX 18 | ||
29 | |||
30 | #ifdef CONFIG_KVM_E500MC | ||
31 | static int dbell2prio(ulong param) | ||
32 | { | ||
33 | int msg = param & PPC_DBELL_TYPE_MASK; | ||
34 | int prio = -1; | ||
35 | |||
36 | switch (msg) { | ||
37 | case PPC_DBELL_TYPE(PPC_DBELL): | ||
38 | prio = BOOKE_IRQPRIO_DBELL; | ||
39 | break; | ||
40 | case PPC_DBELL_TYPE(PPC_DBELL_CRIT): | ||
41 | prio = BOOKE_IRQPRIO_DBELL_CRIT; | ||
42 | break; | ||
43 | default: | ||
44 | break; | ||
45 | } | ||
46 | |||
47 | return prio; | ||
48 | } | ||
49 | |||
50 | static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) | ||
51 | { | ||
52 | ulong param = vcpu->arch.gpr[rb]; | ||
53 | int prio = dbell2prio(param); | ||
54 | |||
55 | if (prio < 0) | ||
56 | return EMULATE_FAIL; | ||
57 | |||
58 | clear_bit(prio, &vcpu->arch.pending_exceptions); | ||
59 | return EMULATE_DONE; | ||
60 | } | ||
61 | |||
62 | static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) | ||
63 | { | ||
64 | ulong param = vcpu->arch.gpr[rb]; | ||
65 | int prio = dbell2prio(rb); | ||
66 | int pir = param & PPC_DBELL_PIR_MASK; | ||
67 | int i; | ||
68 | struct kvm_vcpu *cvcpu; | ||
69 | |||
70 | if (prio < 0) | ||
71 | return EMULATE_FAIL; | ||
72 | |||
73 | kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) { | ||
74 | int cpir = cvcpu->arch.shared->pir; | ||
75 | if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) { | ||
76 | set_bit(prio, &cvcpu->arch.pending_exceptions); | ||
77 | kvm_vcpu_kick(cvcpu); | ||
78 | } | ||
79 | } | ||
80 | |||
81 | return EMULATE_DONE; | ||
82 | } | ||
83 | #endif | ||
26 | 84 | ||
27 | int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 85 | int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, |
28 | unsigned int inst, int *advance) | 86 | unsigned int inst, int *advance) |
29 | { | 87 | { |
30 | int emulated = EMULATE_DONE; | 88 | int emulated = EMULATE_DONE; |
31 | int ra; | 89 | int ra = get_ra(inst); |
32 | int rb; | 90 | int rb = get_rb(inst); |
91 | int rt = get_rt(inst); | ||
33 | 92 | ||
34 | switch (get_op(inst)) { | 93 | switch (get_op(inst)) { |
35 | case 31: | 94 | case 31: |
36 | switch (get_xop(inst)) { | 95 | switch (get_xop(inst)) { |
37 | 96 | ||
97 | #ifdef CONFIG_KVM_E500MC | ||
98 | case XOP_MSGSND: | ||
99 | emulated = kvmppc_e500_emul_msgsnd(vcpu, rb); | ||
100 | break; | ||
101 | |||
102 | case XOP_MSGCLR: | ||
103 | emulated = kvmppc_e500_emul_msgclr(vcpu, rb); | ||
104 | break; | ||
105 | #endif | ||
106 | |||
38 | case XOP_TLBRE: | 107 | case XOP_TLBRE: |
39 | emulated = kvmppc_e500_emul_tlbre(vcpu); | 108 | emulated = kvmppc_e500_emul_tlbre(vcpu); |
40 | break; | 109 | break; |
@@ -44,13 +113,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
44 | break; | 113 | break; |
45 | 114 | ||
46 | case XOP_TLBSX: | 115 | case XOP_TLBSX: |
47 | rb = get_rb(inst); | ||
48 | emulated = kvmppc_e500_emul_tlbsx(vcpu,rb); | 116 | emulated = kvmppc_e500_emul_tlbsx(vcpu,rb); |
49 | break; | 117 | break; |
50 | 118 | ||
119 | case XOP_TLBILX: | ||
120 | emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb); | ||
121 | break; | ||
122 | |||
51 | case XOP_TLBIVAX: | 123 | case XOP_TLBIVAX: |
52 | ra = get_ra(inst); | ||
53 | rb = get_rb(inst); | ||
54 | emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb); | 124 | emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb); |
55 | break; | 125 | break; |
56 | 126 | ||
@@ -70,52 +140,63 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
70 | return emulated; | 140 | return emulated; |
71 | } | 141 | } |
72 | 142 | ||
73 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 143 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
74 | { | 144 | { |
75 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 145 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
76 | int emulated = EMULATE_DONE; | 146 | int emulated = EMULATE_DONE; |
77 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
78 | 147 | ||
79 | switch (sprn) { | 148 | switch (sprn) { |
149 | #ifndef CONFIG_KVM_BOOKE_HV | ||
80 | case SPRN_PID: | 150 | case SPRN_PID: |
81 | kvmppc_set_pid(vcpu, spr_val); | 151 | kvmppc_set_pid(vcpu, spr_val); |
82 | break; | 152 | break; |
83 | case SPRN_PID1: | 153 | case SPRN_PID1: |
84 | if (spr_val != 0) | 154 | if (spr_val != 0) |
85 | return EMULATE_FAIL; | 155 | return EMULATE_FAIL; |
86 | vcpu_e500->pid[1] = spr_val; break; | 156 | vcpu_e500->pid[1] = spr_val; |
157 | break; | ||
87 | case SPRN_PID2: | 158 | case SPRN_PID2: |
88 | if (spr_val != 0) | 159 | if (spr_val != 0) |
89 | return EMULATE_FAIL; | 160 | return EMULATE_FAIL; |
90 | vcpu_e500->pid[2] = spr_val; break; | 161 | vcpu_e500->pid[2] = spr_val; |
162 | break; | ||
91 | case SPRN_MAS0: | 163 | case SPRN_MAS0: |
92 | vcpu->arch.shared->mas0 = spr_val; break; | 164 | vcpu->arch.shared->mas0 = spr_val; |
165 | break; | ||
93 | case SPRN_MAS1: | 166 | case SPRN_MAS1: |
94 | vcpu->arch.shared->mas1 = spr_val; break; | 167 | vcpu->arch.shared->mas1 = spr_val; |
168 | break; | ||
95 | case SPRN_MAS2: | 169 | case SPRN_MAS2: |
96 | vcpu->arch.shared->mas2 = spr_val; break; | 170 | vcpu->arch.shared->mas2 = spr_val; |
171 | break; | ||
97 | case SPRN_MAS3: | 172 | case SPRN_MAS3: |
98 | vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff; | 173 | vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff; |
99 | vcpu->arch.shared->mas7_3 |= spr_val; | 174 | vcpu->arch.shared->mas7_3 |= spr_val; |
100 | break; | 175 | break; |
101 | case SPRN_MAS4: | 176 | case SPRN_MAS4: |
102 | vcpu->arch.shared->mas4 = spr_val; break; | 177 | vcpu->arch.shared->mas4 = spr_val; |
178 | break; | ||
103 | case SPRN_MAS6: | 179 | case SPRN_MAS6: |
104 | vcpu->arch.shared->mas6 = spr_val; break; | 180 | vcpu->arch.shared->mas6 = spr_val; |
181 | break; | ||
105 | case SPRN_MAS7: | 182 | case SPRN_MAS7: |
106 | vcpu->arch.shared->mas7_3 &= (u64)0xffffffff; | 183 | vcpu->arch.shared->mas7_3 &= (u64)0xffffffff; |
107 | vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32; | 184 | vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32; |
108 | break; | 185 | break; |
186 | #endif | ||
109 | case SPRN_L1CSR0: | 187 | case SPRN_L1CSR0: |
110 | vcpu_e500->l1csr0 = spr_val; | 188 | vcpu_e500->l1csr0 = spr_val; |
111 | vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); | 189 | vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); |
112 | break; | 190 | break; |
113 | case SPRN_L1CSR1: | 191 | case SPRN_L1CSR1: |
114 | vcpu_e500->l1csr1 = spr_val; break; | 192 | vcpu_e500->l1csr1 = spr_val; |
193 | break; | ||
115 | case SPRN_HID0: | 194 | case SPRN_HID0: |
116 | vcpu_e500->hid0 = spr_val; break; | 195 | vcpu_e500->hid0 = spr_val; |
196 | break; | ||
117 | case SPRN_HID1: | 197 | case SPRN_HID1: |
118 | vcpu_e500->hid1 = spr_val; break; | 198 | vcpu_e500->hid1 = spr_val; |
199 | break; | ||
119 | 200 | ||
120 | case SPRN_MMUCSR0: | 201 | case SPRN_MMUCSR0: |
121 | emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, | 202 | emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, |
@@ -135,81 +216,112 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
135 | case SPRN_IVOR35: | 216 | case SPRN_IVOR35: |
136 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val; | 217 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val; |
137 | break; | 218 | break; |
138 | 219 | #ifdef CONFIG_KVM_BOOKE_HV | |
220 | case SPRN_IVOR36: | ||
221 | vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val; | ||
222 | break; | ||
223 | case SPRN_IVOR37: | ||
224 | vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val; | ||
225 | break; | ||
226 | #endif | ||
139 | default: | 227 | default: |
140 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); | 228 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val); |
141 | } | 229 | } |
142 | 230 | ||
143 | return emulated; | 231 | return emulated; |
144 | } | 232 | } |
145 | 233 | ||
146 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 234 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
147 | { | 235 | { |
148 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 236 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
149 | int emulated = EMULATE_DONE; | 237 | int emulated = EMULATE_DONE; |
150 | unsigned long val; | ||
151 | 238 | ||
152 | switch (sprn) { | 239 | switch (sprn) { |
240 | #ifndef CONFIG_KVM_BOOKE_HV | ||
153 | case SPRN_PID: | 241 | case SPRN_PID: |
154 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break; | 242 | *spr_val = vcpu_e500->pid[0]; |
243 | break; | ||
155 | case SPRN_PID1: | 244 | case SPRN_PID1: |
156 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break; | 245 | *spr_val = vcpu_e500->pid[1]; |
246 | break; | ||
157 | case SPRN_PID2: | 247 | case SPRN_PID2: |
158 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break; | 248 | *spr_val = vcpu_e500->pid[2]; |
249 | break; | ||
159 | case SPRN_MAS0: | 250 | case SPRN_MAS0: |
160 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break; | 251 | *spr_val = vcpu->arch.shared->mas0; |
252 | break; | ||
161 | case SPRN_MAS1: | 253 | case SPRN_MAS1: |
162 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break; | 254 | *spr_val = vcpu->arch.shared->mas1; |
255 | break; | ||
163 | case SPRN_MAS2: | 256 | case SPRN_MAS2: |
164 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break; | 257 | *spr_val = vcpu->arch.shared->mas2; |
258 | break; | ||
165 | case SPRN_MAS3: | 259 | case SPRN_MAS3: |
166 | val = (u32)vcpu->arch.shared->mas7_3; | 260 | *spr_val = (u32)vcpu->arch.shared->mas7_3; |
167 | kvmppc_set_gpr(vcpu, rt, val); | ||
168 | break; | 261 | break; |
169 | case SPRN_MAS4: | 262 | case SPRN_MAS4: |
170 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break; | 263 | *spr_val = vcpu->arch.shared->mas4; |
264 | break; | ||
171 | case SPRN_MAS6: | 265 | case SPRN_MAS6: |
172 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break; | 266 | *spr_val = vcpu->arch.shared->mas6; |
267 | break; | ||
173 | case SPRN_MAS7: | 268 | case SPRN_MAS7: |
174 | val = vcpu->arch.shared->mas7_3 >> 32; | 269 | *spr_val = vcpu->arch.shared->mas7_3 >> 32; |
175 | kvmppc_set_gpr(vcpu, rt, val); | ||
176 | break; | 270 | break; |
271 | #endif | ||
177 | case SPRN_TLB0CFG: | 272 | case SPRN_TLB0CFG: |
178 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break; | 273 | *spr_val = vcpu->arch.tlbcfg[0]; |
274 | break; | ||
179 | case SPRN_TLB1CFG: | 275 | case SPRN_TLB1CFG: |
180 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break; | 276 | *spr_val = vcpu->arch.tlbcfg[1]; |
277 | break; | ||
181 | case SPRN_L1CSR0: | 278 | case SPRN_L1CSR0: |
182 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break; | 279 | *spr_val = vcpu_e500->l1csr0; |
280 | break; | ||
183 | case SPRN_L1CSR1: | 281 | case SPRN_L1CSR1: |
184 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break; | 282 | *spr_val = vcpu_e500->l1csr1; |
283 | break; | ||
185 | case SPRN_HID0: | 284 | case SPRN_HID0: |
186 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break; | 285 | *spr_val = vcpu_e500->hid0; |
286 | break; | ||
187 | case SPRN_HID1: | 287 | case SPRN_HID1: |
188 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break; | 288 | *spr_val = vcpu_e500->hid1; |
289 | break; | ||
189 | case SPRN_SVR: | 290 | case SPRN_SVR: |
190 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->svr); break; | 291 | *spr_val = vcpu_e500->svr; |
292 | break; | ||
191 | 293 | ||
192 | case SPRN_MMUCSR0: | 294 | case SPRN_MMUCSR0: |
193 | kvmppc_set_gpr(vcpu, rt, 0); break; | 295 | *spr_val = 0; |
296 | break; | ||
194 | 297 | ||
195 | case SPRN_MMUCFG: | 298 | case SPRN_MMUCFG: |
196 | kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break; | 299 | *spr_val = vcpu->arch.mmucfg; |
300 | break; | ||
197 | 301 | ||
198 | /* extra exceptions */ | 302 | /* extra exceptions */ |
199 | case SPRN_IVOR32: | 303 | case SPRN_IVOR32: |
200 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]); | 304 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; |
201 | break; | 305 | break; |
202 | case SPRN_IVOR33: | 306 | case SPRN_IVOR33: |
203 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]); | 307 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; |
204 | break; | 308 | break; |
205 | case SPRN_IVOR34: | 309 | case SPRN_IVOR34: |
206 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]); | 310 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; |
207 | break; | 311 | break; |
208 | case SPRN_IVOR35: | 312 | case SPRN_IVOR35: |
209 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]); | 313 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; |
314 | break; | ||
315 | #ifdef CONFIG_KVM_BOOKE_HV | ||
316 | case SPRN_IVOR36: | ||
317 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; | ||
318 | break; | ||
319 | case SPRN_IVOR37: | ||
320 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; | ||
210 | break; | 321 | break; |
322 | #endif | ||
211 | default: | 323 | default: |
212 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); | 324 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val); |
213 | } | 325 | } |
214 | 326 | ||
215 | return emulated; | 327 | return emulated; |
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index 6e53e4164de1..c510fc961302 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c | |||
@@ -2,6 +2,9 @@ | |||
2 | * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. | 2 | * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * Author: Yu Liu, yu.liu@freescale.com | 4 | * Author: Yu Liu, yu.liu@freescale.com |
5 | * Scott Wood, scottwood@freescale.com | ||
6 | * Ashish Kalra, ashish.kalra@freescale.com | ||
7 | * Varun Sethi, varun.sethi@freescale.com | ||
5 | * | 8 | * |
6 | * Description: | 9 | * Description: |
7 | * This file is based on arch/powerpc/kvm/44x_tlb.c, | 10 | * This file is based on arch/powerpc/kvm/44x_tlb.c, |
@@ -26,210 +29,15 @@ | |||
26 | #include <linux/vmalloc.h> | 29 | #include <linux/vmalloc.h> |
27 | #include <linux/hugetlb.h> | 30 | #include <linux/hugetlb.h> |
28 | #include <asm/kvm_ppc.h> | 31 | #include <asm/kvm_ppc.h> |
29 | #include <asm/kvm_e500.h> | ||
30 | 32 | ||
31 | #include "../mm/mmu_decl.h" | 33 | #include "e500.h" |
32 | #include "e500_tlb.h" | ||
33 | #include "trace.h" | 34 | #include "trace.h" |
34 | #include "timing.h" | 35 | #include "timing.h" |
35 | 36 | ||
36 | #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) | 37 | #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) |
37 | 38 | ||
38 | struct id { | ||
39 | unsigned long val; | ||
40 | struct id **pentry; | ||
41 | }; | ||
42 | |||
43 | #define NUM_TIDS 256 | ||
44 | |||
45 | /* | ||
46 | * This table provide mappings from: | ||
47 | * (guestAS,guestTID,guestPR) --> ID of physical cpu | ||
48 | * guestAS [0..1] | ||
49 | * guestTID [0..255] | ||
50 | * guestPR [0..1] | ||
51 | * ID [1..255] | ||
52 | * Each vcpu keeps one vcpu_id_table. | ||
53 | */ | ||
54 | struct vcpu_id_table { | ||
55 | struct id id[2][NUM_TIDS][2]; | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * This table provide reversed mappings of vcpu_id_table: | ||
60 | * ID --> address of vcpu_id_table item. | ||
61 | * Each physical core has one pcpu_id_table. | ||
62 | */ | ||
63 | struct pcpu_id_table { | ||
64 | struct id *entry[NUM_TIDS]; | ||
65 | }; | ||
66 | |||
67 | static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids); | ||
68 | |||
69 | /* This variable keeps last used shadow ID on local core. | ||
70 | * The valid range of shadow ID is [1..255] */ | ||
71 | static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid); | ||
72 | |||
73 | static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; | 39 | static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; |
74 | 40 | ||
75 | static struct kvm_book3e_206_tlb_entry *get_entry( | ||
76 | struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry) | ||
77 | { | ||
78 | int offset = vcpu_e500->gtlb_offset[tlbsel]; | ||
79 | return &vcpu_e500->gtlb_arch[offset + entry]; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Allocate a free shadow id and setup a valid sid mapping in given entry. | ||
84 | * A mapping is only valid when vcpu_id_table and pcpu_id_table are match. | ||
85 | * | ||
86 | * The caller must have preemption disabled, and keep it that way until | ||
87 | * it has finished with the returned shadow id (either written into the | ||
88 | * TLB or arch.shadow_pid, or discarded). | ||
89 | */ | ||
90 | static inline int local_sid_setup_one(struct id *entry) | ||
91 | { | ||
92 | unsigned long sid; | ||
93 | int ret = -1; | ||
94 | |||
95 | sid = ++(__get_cpu_var(pcpu_last_used_sid)); | ||
96 | if (sid < NUM_TIDS) { | ||
97 | __get_cpu_var(pcpu_sids).entry[sid] = entry; | ||
98 | entry->val = sid; | ||
99 | entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid]; | ||
100 | ret = sid; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * If sid == NUM_TIDS, we've run out of sids. We return -1, and | ||
105 | * the caller will invalidate everything and start over. | ||
106 | * | ||
107 | * sid > NUM_TIDS indicates a race, which we disable preemption to | ||
108 | * avoid. | ||
109 | */ | ||
110 | WARN_ON(sid > NUM_TIDS); | ||
111 | |||
112 | return ret; | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Check if given entry contain a valid shadow id mapping. | ||
117 | * An ID mapping is considered valid only if | ||
118 | * both vcpu and pcpu know this mapping. | ||
119 | * | ||
120 | * The caller must have preemption disabled, and keep it that way until | ||
121 | * it has finished with the returned shadow id (either written into the | ||
122 | * TLB or arch.shadow_pid, or discarded). | ||
123 | */ | ||
124 | static inline int local_sid_lookup(struct id *entry) | ||
125 | { | ||
126 | if (entry && entry->val != 0 && | ||
127 | __get_cpu_var(pcpu_sids).entry[entry->val] == entry && | ||
128 | entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val]) | ||
129 | return entry->val; | ||
130 | return -1; | ||
131 | } | ||
132 | |||
133 | /* Invalidate all id mappings on local core -- call with preempt disabled */ | ||
134 | static inline void local_sid_destroy_all(void) | ||
135 | { | ||
136 | __get_cpu_var(pcpu_last_used_sid) = 0; | ||
137 | memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids))); | ||
138 | } | ||
139 | |||
140 | static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
141 | { | ||
142 | vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL); | ||
143 | return vcpu_e500->idt; | ||
144 | } | ||
145 | |||
146 | static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
147 | { | ||
148 | kfree(vcpu_e500->idt); | ||
149 | } | ||
150 | |||
151 | /* Invalidate all mappings on vcpu */ | ||
152 | static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
153 | { | ||
154 | memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table)); | ||
155 | |||
156 | /* Update shadow pid when mappings are changed */ | ||
157 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
158 | } | ||
159 | |||
160 | /* Invalidate one ID mapping on vcpu */ | ||
161 | static inline void kvmppc_e500_id_table_reset_one( | ||
162 | struct kvmppc_vcpu_e500 *vcpu_e500, | ||
163 | int as, int pid, int pr) | ||
164 | { | ||
165 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
166 | |||
167 | BUG_ON(as >= 2); | ||
168 | BUG_ON(pid >= NUM_TIDS); | ||
169 | BUG_ON(pr >= 2); | ||
170 | |||
171 | idt->id[as][pid][pr].val = 0; | ||
172 | idt->id[as][pid][pr].pentry = NULL; | ||
173 | |||
174 | /* Update shadow pid when mappings are changed */ | ||
175 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * Map guest (vcpu,AS,ID,PR) to physical core shadow id. | ||
180 | * This function first lookup if a valid mapping exists, | ||
181 | * if not, then creates a new one. | ||
182 | * | ||
183 | * The caller must have preemption disabled, and keep it that way until | ||
184 | * it has finished with the returned shadow id (either written into the | ||
185 | * TLB or arch.shadow_pid, or discarded). | ||
186 | */ | ||
187 | static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
188 | unsigned int as, unsigned int gid, | ||
189 | unsigned int pr, int avoid_recursion) | ||
190 | { | ||
191 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
192 | int sid; | ||
193 | |||
194 | BUG_ON(as >= 2); | ||
195 | BUG_ON(gid >= NUM_TIDS); | ||
196 | BUG_ON(pr >= 2); | ||
197 | |||
198 | sid = local_sid_lookup(&idt->id[as][gid][pr]); | ||
199 | |||
200 | while (sid <= 0) { | ||
201 | /* No mapping yet */ | ||
202 | sid = local_sid_setup_one(&idt->id[as][gid][pr]); | ||
203 | if (sid <= 0) { | ||
204 | _tlbil_all(); | ||
205 | local_sid_destroy_all(); | ||
206 | } | ||
207 | |||
208 | /* Update shadow pid when mappings are changed */ | ||
209 | if (!avoid_recursion) | ||
210 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
211 | } | ||
212 | |||
213 | return sid; | ||
214 | } | ||
215 | |||
216 | /* Map guest pid to shadow. | ||
217 | * We use PID to keep shadow of current guest non-zero PID, | ||
218 | * and use PID1 to keep shadow of guest zero PID. | ||
219 | * So that guest tlbe with TID=0 can be accessed at any time */ | ||
220 | void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
221 | { | ||
222 | preempt_disable(); | ||
223 | vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, | ||
224 | get_cur_as(&vcpu_e500->vcpu), | ||
225 | get_cur_pid(&vcpu_e500->vcpu), | ||
226 | get_cur_pr(&vcpu_e500->vcpu), 1); | ||
227 | vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500, | ||
228 | get_cur_as(&vcpu_e500->vcpu), 0, | ||
229 | get_cur_pr(&vcpu_e500->vcpu), 1); | ||
230 | preempt_enable(); | ||
231 | } | ||
232 | |||
233 | static inline unsigned int gtlb0_get_next_victim( | 41 | static inline unsigned int gtlb0_get_next_victim( |
234 | struct kvmppc_vcpu_e500 *vcpu_e500) | 42 | struct kvmppc_vcpu_e500 *vcpu_e500) |
235 | { | 43 | { |
@@ -258,6 +66,7 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) | |||
258 | /* Mask off reserved bits. */ | 66 | /* Mask off reserved bits. */ |
259 | mas3 &= MAS3_ATTRIB_MASK; | 67 | mas3 &= MAS3_ATTRIB_MASK; |
260 | 68 | ||
69 | #ifndef CONFIG_KVM_BOOKE_HV | ||
261 | if (!usermode) { | 70 | if (!usermode) { |
262 | /* Guest is in supervisor mode, | 71 | /* Guest is in supervisor mode, |
263 | * so we need to translate guest | 72 | * so we need to translate guest |
@@ -265,8 +74,9 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) | |||
265 | mas3 &= ~E500_TLB_USER_PERM_MASK; | 74 | mas3 &= ~E500_TLB_USER_PERM_MASK; |
266 | mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; | 75 | mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; |
267 | } | 76 | } |
268 | 77 | mas3 |= E500_TLB_SUPER_PERM_MASK; | |
269 | return mas3 | E500_TLB_SUPER_PERM_MASK; | 78 | #endif |
79 | return mas3; | ||
270 | } | 80 | } |
271 | 81 | ||
272 | static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) | 82 | static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) |
@@ -292,7 +102,16 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, | |||
292 | mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); | 102 | mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); |
293 | mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); | 103 | mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); |
294 | mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); | 104 | mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); |
105 | #ifdef CONFIG_KVM_BOOKE_HV | ||
106 | mtspr(SPRN_MAS8, stlbe->mas8); | ||
107 | #endif | ||
295 | asm volatile("isync; tlbwe" : : : "memory"); | 108 | asm volatile("isync; tlbwe" : : : "memory"); |
109 | |||
110 | #ifdef CONFIG_KVM_BOOKE_HV | ||
111 | /* Must clear mas8 for other host tlbwe's */ | ||
112 | mtspr(SPRN_MAS8, 0); | ||
113 | isync(); | ||
114 | #endif | ||
296 | local_irq_restore(flags); | 115 | local_irq_restore(flags); |
297 | 116 | ||
298 | trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, | 117 | trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, |
@@ -337,6 +156,7 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
337 | } | 156 | } |
338 | } | 157 | } |
339 | 158 | ||
159 | #ifdef CONFIG_KVM_E500V2 | ||
340 | void kvmppc_map_magic(struct kvm_vcpu *vcpu) | 160 | void kvmppc_map_magic(struct kvm_vcpu *vcpu) |
341 | { | 161 | { |
342 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 162 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
@@ -361,75 +181,41 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu) | |||
361 | __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); | 181 | __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); |
362 | preempt_enable(); | 182 | preempt_enable(); |
363 | } | 183 | } |
364 | 184 | #endif | |
365 | void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu) | ||
366 | { | ||
367 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
368 | |||
369 | /* Shadow PID may be expired on local core */ | ||
370 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
371 | } | ||
372 | |||
373 | void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu) | ||
374 | { | ||
375 | } | ||
376 | 185 | ||
377 | static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, | 186 | static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, |
378 | int tlbsel, int esel) | 187 | int tlbsel, int esel) |
379 | { | 188 | { |
380 | struct kvm_book3e_206_tlb_entry *gtlbe = | 189 | struct kvm_book3e_206_tlb_entry *gtlbe = |
381 | get_entry(vcpu_e500, tlbsel, esel); | 190 | get_entry(vcpu_e500, tlbsel, esel); |
382 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
383 | unsigned int pr, tid, ts, pid; | ||
384 | u32 val, eaddr; | ||
385 | unsigned long flags; | ||
386 | |||
387 | ts = get_tlb_ts(gtlbe); | ||
388 | tid = get_tlb_tid(gtlbe); | ||
389 | |||
390 | preempt_disable(); | ||
391 | |||
392 | /* One guest ID may be mapped to two shadow IDs */ | ||
393 | for (pr = 0; pr < 2; pr++) { | ||
394 | /* | ||
395 | * The shadow PID can have a valid mapping on at most one | ||
396 | * host CPU. In the common case, it will be valid on this | ||
397 | * CPU, in which case (for TLB0) we do a local invalidation | ||
398 | * of the specific address. | ||
399 | * | ||
400 | * If the shadow PID is not valid on the current host CPU, or | ||
401 | * if we're invalidating a TLB1 entry, we invalidate the | ||
402 | * entire shadow PID. | ||
403 | */ | ||
404 | if (tlbsel == 1 || | ||
405 | (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) { | ||
406 | kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr); | ||
407 | continue; | ||
408 | } | ||
409 | 191 | ||
410 | /* | 192 | if (tlbsel == 1 && |
411 | * The guest is invalidating a TLB0 entry which is in a PID | 193 | vcpu_e500->gtlb_priv[1][esel].ref.flags & E500_TLB_BITMAP) { |
412 | * that has a valid shadow mapping on this host CPU. We | 194 | u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; |
413 | * search host TLB0 to invalidate it's shadow TLB entry, | 195 | int hw_tlb_indx; |
414 | * similar to __tlbil_va except that we need to look in AS1. | 196 | unsigned long flags; |
415 | */ | ||
416 | val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS; | ||
417 | eaddr = get_tlb_eaddr(gtlbe); | ||
418 | 197 | ||
419 | local_irq_save(flags); | 198 | local_irq_save(flags); |
420 | 199 | while (tmp) { | |
421 | mtspr(SPRN_MAS6, val); | 200 | hw_tlb_indx = __ilog2_u64(tmp & -tmp); |
422 | asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr)); | 201 | mtspr(SPRN_MAS0, |
423 | val = mfspr(SPRN_MAS1); | 202 | MAS0_TLBSEL(1) | |
424 | if (val & MAS1_VALID) { | 203 | MAS0_ESEL(to_htlb1_esel(hw_tlb_indx))); |
425 | mtspr(SPRN_MAS1, val & ~MAS1_VALID); | 204 | mtspr(SPRN_MAS1, 0); |
426 | asm volatile("tlbwe"); | 205 | asm volatile("tlbwe"); |
206 | vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; | ||
207 | tmp &= tmp - 1; | ||
427 | } | 208 | } |
428 | 209 | mb(); | |
210 | vcpu_e500->g2h_tlb1_map[esel] = 0; | ||
211 | vcpu_e500->gtlb_priv[1][esel].ref.flags &= ~E500_TLB_BITMAP; | ||
429 | local_irq_restore(flags); | 212 | local_irq_restore(flags); |
213 | |||
214 | return; | ||
430 | } | 215 | } |
431 | 216 | ||
432 | preempt_enable(); | 217 | /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ |
218 | kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); | ||
433 | } | 219 | } |
434 | 220 | ||
435 | static int tlb0_set_base(gva_t addr, int sets, int ways) | 221 | static int tlb0_set_base(gva_t addr, int sets, int ways) |
@@ -475,6 +261,9 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
475 | set_base = gtlb0_set_base(vcpu_e500, eaddr); | 261 | set_base = gtlb0_set_base(vcpu_e500, eaddr); |
476 | size = vcpu_e500->gtlb_params[0].ways; | 262 | size = vcpu_e500->gtlb_params[0].ways; |
477 | } else { | 263 | } else { |
264 | if (eaddr < vcpu_e500->tlb1_min_eaddr || | ||
265 | eaddr > vcpu_e500->tlb1_max_eaddr) | ||
266 | return -1; | ||
478 | set_base = 0; | 267 | set_base = 0; |
479 | } | 268 | } |
480 | 269 | ||
@@ -530,6 +319,16 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) | |||
530 | } | 319 | } |
531 | } | 320 | } |
532 | 321 | ||
322 | static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
323 | { | ||
324 | if (vcpu_e500->g2h_tlb1_map) | ||
325 | memset(vcpu_e500->g2h_tlb1_map, | ||
326 | sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0); | ||
327 | if (vcpu_e500->h2g_tlb1_rmap) | ||
328 | memset(vcpu_e500->h2g_tlb1_rmap, | ||
329 | sizeof(unsigned int) * host_tlb_params[1].entries, 0); | ||
330 | } | ||
331 | |||
533 | static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) | 332 | static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) |
534 | { | 333 | { |
535 | int tlbsel = 0; | 334 | int tlbsel = 0; |
@@ -547,7 +346,7 @@ static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
547 | int stlbsel = 1; | 346 | int stlbsel = 1; |
548 | int i; | 347 | int i; |
549 | 348 | ||
550 | kvmppc_e500_id_table_reset_all(vcpu_e500); | 349 | kvmppc_e500_tlbil_all(vcpu_e500); |
551 | 350 | ||
552 | for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { | 351 | for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { |
553 | struct tlbe_ref *ref = | 352 | struct tlbe_ref *ref = |
@@ -562,19 +361,18 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, | |||
562 | unsigned int eaddr, int as) | 361 | unsigned int eaddr, int as) |
563 | { | 362 | { |
564 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 363 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
565 | unsigned int victim, pidsel, tsized; | 364 | unsigned int victim, tsized; |
566 | int tlbsel; | 365 | int tlbsel; |
567 | 366 | ||
568 | /* since we only have two TLBs, only lower bit is used. */ | 367 | /* since we only have two TLBs, only lower bit is used. */ |
569 | tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; | 368 | tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; |
570 | victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; | 369 | victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; |
571 | pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf; | ||
572 | tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; | 370 | tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; |
573 | 371 | ||
574 | vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) | 372 | vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) |
575 | | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); | 373 | | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); |
576 | vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) | 374 | vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) |
577 | | MAS1_TID(vcpu_e500->pid[pidsel]) | 375 | | MAS1_TID(get_tlbmiss_tid(vcpu)) |
578 | | MAS1_TSIZE(tsized); | 376 | | MAS1_TSIZE(tsized); |
579 | vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) | 377 | vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) |
580 | | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); | 378 | | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); |
@@ -586,23 +384,26 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, | |||
586 | 384 | ||
587 | /* TID must be supplied by the caller */ | 385 | /* TID must be supplied by the caller */ |
588 | static inline void kvmppc_e500_setup_stlbe( | 386 | static inline void kvmppc_e500_setup_stlbe( |
589 | struct kvmppc_vcpu_e500 *vcpu_e500, | 387 | struct kvm_vcpu *vcpu, |
590 | struct kvm_book3e_206_tlb_entry *gtlbe, | 388 | struct kvm_book3e_206_tlb_entry *gtlbe, |
591 | int tsize, struct tlbe_ref *ref, u64 gvaddr, | 389 | int tsize, struct tlbe_ref *ref, u64 gvaddr, |
592 | struct kvm_book3e_206_tlb_entry *stlbe) | 390 | struct kvm_book3e_206_tlb_entry *stlbe) |
593 | { | 391 | { |
594 | pfn_t pfn = ref->pfn; | 392 | pfn_t pfn = ref->pfn; |
393 | u32 pr = vcpu->arch.shared->msr & MSR_PR; | ||
595 | 394 | ||
596 | BUG_ON(!(ref->flags & E500_TLB_VALID)); | 395 | BUG_ON(!(ref->flags & E500_TLB_VALID)); |
597 | 396 | ||
598 | /* Force TS=1 IPROT=0 for all guest mappings. */ | 397 | /* Force IPROT=0 for all guest mappings. */ |
599 | stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID; | 398 | stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; |
600 | stlbe->mas2 = (gvaddr & MAS2_EPN) | 399 | stlbe->mas2 = (gvaddr & MAS2_EPN) | |
601 | | e500_shadow_mas2_attrib(gtlbe->mas2, | 400 | e500_shadow_mas2_attrib(gtlbe->mas2, pr); |
602 | vcpu_e500->vcpu.arch.shared->msr & MSR_PR); | 401 | stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | |
603 | stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | 402 | e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); |
604 | | e500_shadow_mas3_attrib(gtlbe->mas7_3, | 403 | |
605 | vcpu_e500->vcpu.arch.shared->msr & MSR_PR); | 404 | #ifdef CONFIG_KVM_BOOKE_HV |
405 | stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid; | ||
406 | #endif | ||
606 | } | 407 | } |
607 | 408 | ||
608 | static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | 409 | static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, |
@@ -736,7 +537,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
736 | kvmppc_e500_ref_release(ref); | 537 | kvmppc_e500_ref_release(ref); |
737 | kvmppc_e500_ref_setup(ref, gtlbe, pfn); | 538 | kvmppc_e500_ref_setup(ref, gtlbe, pfn); |
738 | 539 | ||
739 | kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, ref, gvaddr, stlbe); | 540 | kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, |
541 | ref, gvaddr, stlbe); | ||
740 | } | 542 | } |
741 | 543 | ||
742 | /* XXX only map the one-one case, for now use TLB0 */ | 544 | /* XXX only map the one-one case, for now use TLB0 */ |
@@ -760,7 +562,7 @@ static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
760 | /* XXX for both one-one and one-to-many , for now use TLB1 */ | 562 | /* XXX for both one-one and one-to-many , for now use TLB1 */ |
761 | static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | 563 | static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, |
762 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, | 564 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, |
763 | struct kvm_book3e_206_tlb_entry *stlbe) | 565 | struct kvm_book3e_206_tlb_entry *stlbe, int esel) |
764 | { | 566 | { |
765 | struct tlbe_ref *ref; | 567 | struct tlbe_ref *ref; |
766 | unsigned int victim; | 568 | unsigned int victim; |
@@ -773,15 +575,74 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
773 | ref = &vcpu_e500->tlb_refs[1][victim]; | 575 | ref = &vcpu_e500->tlb_refs[1][victim]; |
774 | kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref); | 576 | kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref); |
775 | 577 | ||
578 | vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << victim; | ||
579 | vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; | ||
580 | if (vcpu_e500->h2g_tlb1_rmap[victim]) { | ||
581 | unsigned int idx = vcpu_e500->h2g_tlb1_rmap[victim]; | ||
582 | vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << victim); | ||
583 | } | ||
584 | vcpu_e500->h2g_tlb1_rmap[victim] = esel; | ||
585 | |||
776 | return victim; | 586 | return victim; |
777 | } | 587 | } |
778 | 588 | ||
779 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | 589 | static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) |
590 | { | ||
591 | int size = vcpu_e500->gtlb_params[1].entries; | ||
592 | unsigned int offset; | ||
593 | gva_t eaddr; | ||
594 | int i; | ||
595 | |||
596 | vcpu_e500->tlb1_min_eaddr = ~0UL; | ||
597 | vcpu_e500->tlb1_max_eaddr = 0; | ||
598 | offset = vcpu_e500->gtlb_offset[1]; | ||
599 | |||
600 | for (i = 0; i < size; i++) { | ||
601 | struct kvm_book3e_206_tlb_entry *tlbe = | ||
602 | &vcpu_e500->gtlb_arch[offset + i]; | ||
603 | |||
604 | if (!get_tlb_v(tlbe)) | ||
605 | continue; | ||
606 | |||
607 | eaddr = get_tlb_eaddr(tlbe); | ||
608 | vcpu_e500->tlb1_min_eaddr = | ||
609 | min(vcpu_e500->tlb1_min_eaddr, eaddr); | ||
610 | |||
611 | eaddr = get_tlb_end(tlbe); | ||
612 | vcpu_e500->tlb1_max_eaddr = | ||
613 | max(vcpu_e500->tlb1_max_eaddr, eaddr); | ||
614 | } | ||
615 | } | ||
616 | |||
617 | static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
618 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
780 | { | 619 | { |
620 | unsigned long start, end, size; | ||
621 | |||
622 | size = get_tlb_bytes(gtlbe); | ||
623 | start = get_tlb_eaddr(gtlbe) & ~(size - 1); | ||
624 | end = start + size - 1; | ||
625 | |||
626 | return vcpu_e500->tlb1_min_eaddr == start || | ||
627 | vcpu_e500->tlb1_max_eaddr == end; | ||
628 | } | ||
629 | |||
630 | /* This function is supposed to be called for a adding a new valid tlb entry */ | ||
631 | static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu, | ||
632 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
633 | { | ||
634 | unsigned long start, end, size; | ||
781 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 635 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
782 | 636 | ||
783 | /* Recalc shadow pid since MSR changes */ | 637 | if (!get_tlb_v(gtlbe)) |
784 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | 638 | return; |
639 | |||
640 | size = get_tlb_bytes(gtlbe); | ||
641 | start = get_tlb_eaddr(gtlbe) & ~(size - 1); | ||
642 | end = start + size - 1; | ||
643 | |||
644 | vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start); | ||
645 | vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end); | ||
785 | } | 646 | } |
786 | 647 | ||
787 | static inline int kvmppc_e500_gtlbe_invalidate( | 648 | static inline int kvmppc_e500_gtlbe_invalidate( |
@@ -794,6 +655,9 @@ static inline int kvmppc_e500_gtlbe_invalidate( | |||
794 | if (unlikely(get_tlb_iprot(gtlbe))) | 655 | if (unlikely(get_tlb_iprot(gtlbe))) |
795 | return -1; | 656 | return -1; |
796 | 657 | ||
658 | if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) | ||
659 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
660 | |||
797 | gtlbe->mas1 = 0; | 661 | gtlbe->mas1 = 0; |
798 | 662 | ||
799 | return 0; | 663 | return 0; |
@@ -811,7 +675,7 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) | |||
811 | kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); | 675 | kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); |
812 | 676 | ||
813 | /* Invalidate all vcpu id mappings */ | 677 | /* Invalidate all vcpu id mappings */ |
814 | kvmppc_e500_id_table_reset_all(vcpu_e500); | 678 | kvmppc_e500_tlbil_all(vcpu_e500); |
815 | 679 | ||
816 | return EMULATE_DONE; | 680 | return EMULATE_DONE; |
817 | } | 681 | } |
@@ -844,7 +708,59 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) | |||
844 | } | 708 | } |
845 | 709 | ||
846 | /* Invalidate all vcpu id mappings */ | 710 | /* Invalidate all vcpu id mappings */ |
847 | kvmppc_e500_id_table_reset_all(vcpu_e500); | 711 | kvmppc_e500_tlbil_all(vcpu_e500); |
712 | |||
713 | return EMULATE_DONE; | ||
714 | } | ||
715 | |||
716 | static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, | ||
717 | int pid, int rt) | ||
718 | { | ||
719 | struct kvm_book3e_206_tlb_entry *tlbe; | ||
720 | int tid, esel; | ||
721 | |||
722 | /* invalidate all entries */ | ||
723 | for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { | ||
724 | tlbe = get_entry(vcpu_e500, tlbsel, esel); | ||
725 | tid = get_tlb_tid(tlbe); | ||
726 | if (rt == 0 || tid == pid) { | ||
727 | inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); | ||
728 | kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); | ||
729 | } | ||
730 | } | ||
731 | } | ||
732 | |||
733 | static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid, | ||
734 | int ra, int rb) | ||
735 | { | ||
736 | int tlbsel, esel; | ||
737 | gva_t ea; | ||
738 | |||
739 | ea = kvmppc_get_gpr(&vcpu_e500->vcpu, rb); | ||
740 | if (ra) | ||
741 | ea += kvmppc_get_gpr(&vcpu_e500->vcpu, ra); | ||
742 | |||
743 | for (tlbsel = 0; tlbsel < 2; tlbsel++) { | ||
744 | esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1); | ||
745 | if (esel >= 0) { | ||
746 | inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); | ||
747 | kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); | ||
748 | break; | ||
749 | } | ||
750 | } | ||
751 | } | ||
752 | |||
753 | int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb) | ||
754 | { | ||
755 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
756 | int pid = get_cur_spid(vcpu); | ||
757 | |||
758 | if (rt == 0 || rt == 1) { | ||
759 | tlbilx_all(vcpu_e500, 0, pid, rt); | ||
760 | tlbilx_all(vcpu_e500, 1, pid, rt); | ||
761 | } else if (rt == 3) { | ||
762 | tlbilx_one(vcpu_e500, pid, ra, rb); | ||
763 | } | ||
848 | 764 | ||
849 | return EMULATE_DONE; | 765 | return EMULATE_DONE; |
850 | } | 766 | } |
@@ -929,9 +845,7 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
929 | int stid; | 845 | int stid; |
930 | 846 | ||
931 | preempt_disable(); | 847 | preempt_disable(); |
932 | stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe), | 848 | stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); |
933 | get_tlb_tid(gtlbe), | ||
934 | get_cur_pr(&vcpu_e500->vcpu), 0); | ||
935 | 849 | ||
936 | stlbe->mas1 |= MAS1_TID(stid); | 850 | stlbe->mas1 |= MAS1_TID(stid); |
937 | write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); | 851 | write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); |
@@ -941,16 +855,21 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
941 | int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | 855 | int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) |
942 | { | 856 | { |
943 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 857 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
944 | struct kvm_book3e_206_tlb_entry *gtlbe; | 858 | struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; |
945 | int tlbsel, esel; | 859 | int tlbsel, esel, stlbsel, sesel; |
860 | int recal = 0; | ||
946 | 861 | ||
947 | tlbsel = get_tlb_tlbsel(vcpu); | 862 | tlbsel = get_tlb_tlbsel(vcpu); |
948 | esel = get_tlb_esel(vcpu, tlbsel); | 863 | esel = get_tlb_esel(vcpu, tlbsel); |
949 | 864 | ||
950 | gtlbe = get_entry(vcpu_e500, tlbsel, esel); | 865 | gtlbe = get_entry(vcpu_e500, tlbsel, esel); |
951 | 866 | ||
952 | if (get_tlb_v(gtlbe)) | 867 | if (get_tlb_v(gtlbe)) { |
953 | inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); | 868 | inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); |
869 | if ((tlbsel == 1) && | ||
870 | kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) | ||
871 | recal = 1; | ||
872 | } | ||
954 | 873 | ||
955 | gtlbe->mas1 = vcpu->arch.shared->mas1; | 874 | gtlbe->mas1 = vcpu->arch.shared->mas1; |
956 | gtlbe->mas2 = vcpu->arch.shared->mas2; | 875 | gtlbe->mas2 = vcpu->arch.shared->mas2; |
@@ -959,10 +878,20 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
959 | trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, | 878 | trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, |
960 | gtlbe->mas2, gtlbe->mas7_3); | 879 | gtlbe->mas2, gtlbe->mas7_3); |
961 | 880 | ||
881 | if (tlbsel == 1) { | ||
882 | /* | ||
883 | * If a valid tlb1 entry is overwritten then recalculate the | ||
884 | * min/max TLB1 map address range otherwise no need to look | ||
885 | * in tlb1 array. | ||
886 | */ | ||
887 | if (recal) | ||
888 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
889 | else | ||
890 | kvmppc_set_tlb1map_range(vcpu, gtlbe); | ||
891 | } | ||
892 | |||
962 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ | 893 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ |
963 | if (tlbe_is_host_safe(vcpu, gtlbe)) { | 894 | if (tlbe_is_host_safe(vcpu, gtlbe)) { |
964 | struct kvm_book3e_206_tlb_entry stlbe; | ||
965 | int stlbsel, sesel; | ||
966 | u64 eaddr; | 895 | u64 eaddr; |
967 | u64 raddr; | 896 | u64 raddr; |
968 | 897 | ||
@@ -989,7 +918,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
989 | * are mapped on the fly. */ | 918 | * are mapped on the fly. */ |
990 | stlbsel = 1; | 919 | stlbsel = 1; |
991 | sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, | 920 | sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, |
992 | raddr >> PAGE_SHIFT, gtlbe, &stlbe); | 921 | raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel); |
993 | break; | 922 | break; |
994 | 923 | ||
995 | default: | 924 | default: |
@@ -1003,6 +932,48 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
1003 | return EMULATE_DONE; | 932 | return EMULATE_DONE; |
1004 | } | 933 | } |
1005 | 934 | ||
935 | static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, | ||
936 | gva_t eaddr, unsigned int pid, int as) | ||
937 | { | ||
938 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
939 | int esel, tlbsel; | ||
940 | |||
941 | for (tlbsel = 0; tlbsel < 2; tlbsel++) { | ||
942 | esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); | ||
943 | if (esel >= 0) | ||
944 | return index_of(tlbsel, esel); | ||
945 | } | ||
946 | |||
947 | return -1; | ||
948 | } | ||
949 | |||
950 | /* 'linear_address' is actually an encoding of AS|PID|EADDR . */ | ||
951 | int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, | ||
952 | struct kvm_translation *tr) | ||
953 | { | ||
954 | int index; | ||
955 | gva_t eaddr; | ||
956 | u8 pid; | ||
957 | u8 as; | ||
958 | |||
959 | eaddr = tr->linear_address; | ||
960 | pid = (tr->linear_address >> 32) & 0xff; | ||
961 | as = (tr->linear_address >> 40) & 0x1; | ||
962 | |||
963 | index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as); | ||
964 | if (index < 0) { | ||
965 | tr->valid = 0; | ||
966 | return 0; | ||
967 | } | ||
968 | |||
969 | tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); | ||
970 | /* XXX what does "writeable" and "usermode" even mean? */ | ||
971 | tr->valid = 1; | ||
972 | |||
973 | return 0; | ||
974 | } | ||
975 | |||
976 | |||
1006 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | 977 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
1007 | { | 978 | { |
1008 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); | 979 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); |
@@ -1066,7 +1037,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |||
1066 | sesel = 0; /* unused */ | 1037 | sesel = 0; /* unused */ |
1067 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; | 1038 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; |
1068 | 1039 | ||
1069 | kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K, | 1040 | kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, |
1070 | &priv->ref, eaddr, &stlbe); | 1041 | &priv->ref, eaddr, &stlbe); |
1071 | break; | 1042 | break; |
1072 | 1043 | ||
@@ -1075,7 +1046,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |||
1075 | 1046 | ||
1076 | stlbsel = 1; | 1047 | stlbsel = 1; |
1077 | sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, | 1048 | sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, |
1078 | gtlbe, &stlbe); | 1049 | gtlbe, &stlbe, esel); |
1079 | break; | 1050 | break; |
1080 | } | 1051 | } |
1081 | 1052 | ||
@@ -1087,52 +1058,13 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |||
1087 | write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); | 1058 | write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); |
1088 | } | 1059 | } |
1089 | 1060 | ||
1090 | int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, | ||
1091 | gva_t eaddr, unsigned int pid, int as) | ||
1092 | { | ||
1093 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
1094 | int esel, tlbsel; | ||
1095 | |||
1096 | for (tlbsel = 0; tlbsel < 2; tlbsel++) { | ||
1097 | esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); | ||
1098 | if (esel >= 0) | ||
1099 | return index_of(tlbsel, esel); | ||
1100 | } | ||
1101 | |||
1102 | return -1; | ||
1103 | } | ||
1104 | |||
1105 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) | ||
1106 | { | ||
1107 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
1108 | |||
1109 | if (vcpu->arch.pid != pid) { | ||
1110 | vcpu_e500->pid[0] = vcpu->arch.pid = pid; | ||
1111 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
1112 | } | ||
1113 | } | ||
1114 | |||
1115 | void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
1116 | { | ||
1117 | struct kvm_book3e_206_tlb_entry *tlbe; | ||
1118 | |||
1119 | /* Insert large initial mapping for guest. */ | ||
1120 | tlbe = get_entry(vcpu_e500, 1, 0); | ||
1121 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M); | ||
1122 | tlbe->mas2 = 0; | ||
1123 | tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK; | ||
1124 | |||
1125 | /* 4K map for serial output. Used by kernel wrapper. */ | ||
1126 | tlbe = get_entry(vcpu_e500, 1, 1); | ||
1127 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K); | ||
1128 | tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; | ||
1129 | tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; | ||
1130 | } | ||
1131 | |||
1132 | static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) | 1061 | static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) |
1133 | { | 1062 | { |
1134 | int i; | 1063 | int i; |
1135 | 1064 | ||
1065 | clear_tlb1_bitmap(vcpu_e500); | ||
1066 | kfree(vcpu_e500->g2h_tlb1_map); | ||
1067 | |||
1136 | clear_tlb_refs(vcpu_e500); | 1068 | clear_tlb_refs(vcpu_e500); |
1137 | kfree(vcpu_e500->gtlb_priv[0]); | 1069 | kfree(vcpu_e500->gtlb_priv[0]); |
1138 | kfree(vcpu_e500->gtlb_priv[1]); | 1070 | kfree(vcpu_e500->gtlb_priv[1]); |
@@ -1155,6 +1087,36 @@ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
1155 | vcpu_e500->gtlb_arch = NULL; | 1087 | vcpu_e500->gtlb_arch = NULL; |
1156 | } | 1088 | } |
1157 | 1089 | ||
1090 | void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
1091 | { | ||
1092 | sregs->u.e.mas0 = vcpu->arch.shared->mas0; | ||
1093 | sregs->u.e.mas1 = vcpu->arch.shared->mas1; | ||
1094 | sregs->u.e.mas2 = vcpu->arch.shared->mas2; | ||
1095 | sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3; | ||
1096 | sregs->u.e.mas4 = vcpu->arch.shared->mas4; | ||
1097 | sregs->u.e.mas6 = vcpu->arch.shared->mas6; | ||
1098 | |||
1099 | sregs->u.e.mmucfg = vcpu->arch.mmucfg; | ||
1100 | sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0]; | ||
1101 | sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1]; | ||
1102 | sregs->u.e.tlbcfg[2] = 0; | ||
1103 | sregs->u.e.tlbcfg[3] = 0; | ||
1104 | } | ||
1105 | |||
1106 | int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
1107 | { | ||
1108 | if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { | ||
1109 | vcpu->arch.shared->mas0 = sregs->u.e.mas0; | ||
1110 | vcpu->arch.shared->mas1 = sregs->u.e.mas1; | ||
1111 | vcpu->arch.shared->mas2 = sregs->u.e.mas2; | ||
1112 | vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3; | ||
1113 | vcpu->arch.shared->mas4 = sregs->u.e.mas4; | ||
1114 | vcpu->arch.shared->mas6 = sregs->u.e.mas6; | ||
1115 | } | ||
1116 | |||
1117 | return 0; | ||
1118 | } | ||
1119 | |||
1158 | int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | 1120 | int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, |
1159 | struct kvm_config_tlb *cfg) | 1121 | struct kvm_config_tlb *cfg) |
1160 | { | 1122 | { |
@@ -1163,6 +1125,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | |||
1163 | char *virt; | 1125 | char *virt; |
1164 | struct page **pages; | 1126 | struct page **pages; |
1165 | struct tlbe_priv *privs[2] = {}; | 1127 | struct tlbe_priv *privs[2] = {}; |
1128 | u64 *g2h_bitmap = NULL; | ||
1166 | size_t array_len; | 1129 | size_t array_len; |
1167 | u32 sets; | 1130 | u32 sets; |
1168 | int num_pages, ret, i; | 1131 | int num_pages, ret, i; |
@@ -1224,10 +1187,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | |||
1224 | if (!privs[0] || !privs[1]) | 1187 | if (!privs[0] || !privs[1]) |
1225 | goto err_put_page; | 1188 | goto err_put_page; |
1226 | 1189 | ||
1190 | g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1], | ||
1191 | GFP_KERNEL); | ||
1192 | if (!g2h_bitmap) | ||
1193 | goto err_put_page; | ||
1194 | |||
1227 | free_gtlb(vcpu_e500); | 1195 | free_gtlb(vcpu_e500); |
1228 | 1196 | ||
1229 | vcpu_e500->gtlb_priv[0] = privs[0]; | 1197 | vcpu_e500->gtlb_priv[0] = privs[0]; |
1230 | vcpu_e500->gtlb_priv[1] = privs[1]; | 1198 | vcpu_e500->gtlb_priv[1] = privs[1]; |
1199 | vcpu_e500->g2h_tlb1_map = g2h_bitmap; | ||
1231 | 1200 | ||
1232 | vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *) | 1201 | vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *) |
1233 | (virt + (cfg->array & (PAGE_SIZE - 1))); | 1202 | (virt + (cfg->array & (PAGE_SIZE - 1))); |
@@ -1238,14 +1207,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | |||
1238 | vcpu_e500->gtlb_offset[0] = 0; | 1207 | vcpu_e500->gtlb_offset[0] = 0; |
1239 | vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0]; | 1208 | vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0]; |
1240 | 1209 | ||
1241 | vcpu_e500->tlb0cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | 1210 | vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE; |
1211 | |||
1212 | vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | ||
1242 | if (params.tlb_sizes[0] <= 2048) | 1213 | if (params.tlb_sizes[0] <= 2048) |
1243 | vcpu_e500->tlb0cfg |= params.tlb_sizes[0]; | 1214 | vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0]; |
1244 | vcpu_e500->tlb0cfg |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT; | 1215 | vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT; |
1245 | 1216 | ||
1246 | vcpu_e500->tlb1cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | 1217 | vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); |
1247 | vcpu_e500->tlb1cfg |= params.tlb_sizes[1]; | 1218 | vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1]; |
1248 | vcpu_e500->tlb1cfg |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT; | 1219 | vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT; |
1249 | 1220 | ||
1250 | vcpu_e500->shared_tlb_pages = pages; | 1221 | vcpu_e500->shared_tlb_pages = pages; |
1251 | vcpu_e500->num_shared_tlb_pages = num_pages; | 1222 | vcpu_e500->num_shared_tlb_pages = num_pages; |
@@ -1256,6 +1227,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | |||
1256 | vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; | 1227 | vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; |
1257 | vcpu_e500->gtlb_params[1].sets = 1; | 1228 | vcpu_e500->gtlb_params[1].sets = 1; |
1258 | 1229 | ||
1230 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
1259 | return 0; | 1231 | return 0; |
1260 | 1232 | ||
1261 | err_put_page: | 1233 | err_put_page: |
@@ -1274,13 +1246,14 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, | |||
1274 | struct kvm_dirty_tlb *dirty) | 1246 | struct kvm_dirty_tlb *dirty) |
1275 | { | 1247 | { |
1276 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 1248 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
1277 | 1249 | kvmppc_recalc_tlb1map_range(vcpu_e500); | |
1278 | clear_tlb_refs(vcpu_e500); | 1250 | clear_tlb_refs(vcpu_e500); |
1279 | return 0; | 1251 | return 0; |
1280 | } | 1252 | } |
1281 | 1253 | ||
1282 | int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) | 1254 | int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) |
1283 | { | 1255 | { |
1256 | struct kvm_vcpu *vcpu = &vcpu_e500->vcpu; | ||
1284 | int entry_size = sizeof(struct kvm_book3e_206_tlb_entry); | 1257 | int entry_size = sizeof(struct kvm_book3e_206_tlb_entry); |
1285 | int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE; | 1258 | int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE; |
1286 | 1259 | ||
@@ -1357,22 +1330,32 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
1357 | if (!vcpu_e500->gtlb_priv[1]) | 1330 | if (!vcpu_e500->gtlb_priv[1]) |
1358 | goto err; | 1331 | goto err; |
1359 | 1332 | ||
1360 | if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) | 1333 | vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) * |
1334 | vcpu_e500->gtlb_params[1].entries, | ||
1335 | GFP_KERNEL); | ||
1336 | if (!vcpu_e500->g2h_tlb1_map) | ||
1337 | goto err; | ||
1338 | |||
1339 | vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * | ||
1340 | host_tlb_params[1].entries, | ||
1341 | GFP_KERNEL); | ||
1342 | if (!vcpu_e500->h2g_tlb1_rmap) | ||
1361 | goto err; | 1343 | goto err; |
1362 | 1344 | ||
1363 | /* Init TLB configuration register */ | 1345 | /* Init TLB configuration register */ |
1364 | vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & | 1346 | vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) & |
1365 | ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | 1347 | ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); |
1366 | vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[0].entries; | 1348 | vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries; |
1367 | vcpu_e500->tlb0cfg |= | 1349 | vcpu->arch.tlbcfg[0] |= |
1368 | vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT; | 1350 | vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT; |
1369 | 1351 | ||
1370 | vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & | 1352 | vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) & |
1371 | ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | 1353 | ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); |
1372 | vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[1].entries; | 1354 | vcpu->arch.tlbcfg[1] |= vcpu_e500->gtlb_params[1].entries; |
1373 | vcpu_e500->tlb0cfg |= | 1355 | vcpu->arch.tlbcfg[1] |= |
1374 | vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; | 1356 | vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; |
1375 | 1357 | ||
1358 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
1376 | return 0; | 1359 | return 0; |
1377 | 1360 | ||
1378 | err: | 1361 | err: |
@@ -1385,8 +1368,7 @@ err: | |||
1385 | void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) | 1368 | void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) |
1386 | { | 1369 | { |
1387 | free_gtlb(vcpu_e500); | 1370 | free_gtlb(vcpu_e500); |
1388 | kvmppc_e500_id_table_free(vcpu_e500); | 1371 | kfree(vcpu_e500->h2g_tlb1_rmap); |
1389 | |||
1390 | kfree(vcpu_e500->tlb_refs[0]); | 1372 | kfree(vcpu_e500->tlb_refs[0]); |
1391 | kfree(vcpu_e500->tlb_refs[1]); | 1373 | kfree(vcpu_e500->tlb_refs[1]); |
1392 | } | 1374 | } |
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h deleted file mode 100644 index 5c6d2d7bf058..000000000000 --- a/arch/powerpc/kvm/e500_tlb.h +++ /dev/null | |||
@@ -1,174 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Author: Yu Liu, yu.liu@freescale.com | ||
5 | * | ||
6 | * Description: | ||
7 | * This file is based on arch/powerpc/kvm/44x_tlb.h, | ||
8 | * by Hollis Blanchard <hollisb@us.ibm.com>. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License, version 2, as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #ifndef __KVM_E500_TLB_H__ | ||
16 | #define __KVM_E500_TLB_H__ | ||
17 | |||
18 | #include <linux/kvm_host.h> | ||
19 | #include <asm/mmu-book3e.h> | ||
20 | #include <asm/tlb.h> | ||
21 | #include <asm/kvm_e500.h> | ||
22 | |||
23 | /* This geometry is the legacy default -- can be overridden by userspace */ | ||
24 | #define KVM_E500_TLB0_WAY_SIZE 128 | ||
25 | #define KVM_E500_TLB0_WAY_NUM 2 | ||
26 | |||
27 | #define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM) | ||
28 | #define KVM_E500_TLB1_SIZE 16 | ||
29 | |||
30 | #define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF)) | ||
31 | #define tlbsel_of(index) ((index) >> 16) | ||
32 | #define esel_of(index) ((index) & 0xFFFF) | ||
33 | |||
34 | #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW) | ||
35 | #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW) | ||
36 | #define MAS2_ATTRIB_MASK \ | ||
37 | (MAS2_X0 | MAS2_X1) | ||
38 | #define MAS3_ATTRIB_MASK \ | ||
39 | (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \ | ||
40 | | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) | ||
41 | |||
42 | extern void kvmppc_dump_tlbs(struct kvm_vcpu *); | ||
43 | extern int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *, ulong); | ||
44 | extern int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *); | ||
45 | extern int kvmppc_e500_emul_tlbre(struct kvm_vcpu *); | ||
46 | extern int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *, int, int); | ||
47 | extern int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *, int); | ||
48 | extern int kvmppc_e500_tlb_search(struct kvm_vcpu *, gva_t, unsigned int, int); | ||
49 | extern void kvmppc_e500_tlb_put(struct kvm_vcpu *); | ||
50 | extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int); | ||
51 | extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *); | ||
52 | extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *); | ||
53 | extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *); | ||
54 | extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *); | ||
55 | |||
56 | /* TLB helper functions */ | ||
57 | static inline unsigned int | ||
58 | get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
59 | { | ||
60 | return (tlbe->mas1 >> 7) & 0x1f; | ||
61 | } | ||
62 | |||
63 | static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
64 | { | ||
65 | return tlbe->mas2 & 0xfffff000; | ||
66 | } | ||
67 | |||
68 | static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
69 | { | ||
70 | unsigned int pgsize = get_tlb_size(tlbe); | ||
71 | return 1ULL << 10 << pgsize; | ||
72 | } | ||
73 | |||
74 | static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
75 | { | ||
76 | u64 bytes = get_tlb_bytes(tlbe); | ||
77 | return get_tlb_eaddr(tlbe) + bytes - 1; | ||
78 | } | ||
79 | |||
80 | static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
81 | { | ||
82 | return tlbe->mas7_3 & ~0xfffULL; | ||
83 | } | ||
84 | |||
85 | static inline unsigned int | ||
86 | get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
87 | { | ||
88 | return (tlbe->mas1 >> 16) & 0xff; | ||
89 | } | ||
90 | |||
91 | static inline unsigned int | ||
92 | get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
93 | { | ||
94 | return (tlbe->mas1 >> 12) & 0x1; | ||
95 | } | ||
96 | |||
97 | static inline unsigned int | ||
98 | get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
99 | { | ||
100 | return (tlbe->mas1 >> 31) & 0x1; | ||
101 | } | ||
102 | |||
103 | static inline unsigned int | ||
104 | get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
105 | { | ||
106 | return (tlbe->mas1 >> 30) & 0x1; | ||
107 | } | ||
108 | |||
109 | static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu) | ||
110 | { | ||
111 | return vcpu->arch.pid & 0xff; | ||
112 | } | ||
113 | |||
114 | static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu) | ||
115 | { | ||
116 | return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS)); | ||
117 | } | ||
118 | |||
119 | static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu) | ||
120 | { | ||
121 | return !!(vcpu->arch.shared->msr & MSR_PR); | ||
122 | } | ||
123 | |||
124 | static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu) | ||
125 | { | ||
126 | return (vcpu->arch.shared->mas6 >> 16) & 0xff; | ||
127 | } | ||
128 | |||
129 | static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu) | ||
130 | { | ||
131 | return vcpu->arch.shared->mas6 & 0x1; | ||
132 | } | ||
133 | |||
134 | static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu) | ||
135 | { | ||
136 | /* | ||
137 | * Manual says that tlbsel has 2 bits wide. | ||
138 | * Since we only have two TLBs, only lower bit is used. | ||
139 | */ | ||
140 | return (vcpu->arch.shared->mas0 >> 28) & 0x1; | ||
141 | } | ||
142 | |||
143 | static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu) | ||
144 | { | ||
145 | return vcpu->arch.shared->mas0 & 0xfff; | ||
146 | } | ||
147 | |||
148 | static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu) | ||
149 | { | ||
150 | return (vcpu->arch.shared->mas0 >> 16) & 0xfff; | ||
151 | } | ||
152 | |||
153 | static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | ||
154 | const struct kvm_book3e_206_tlb_entry *tlbe) | ||
155 | { | ||
156 | gpa_t gpa; | ||
157 | |||
158 | if (!get_tlb_v(tlbe)) | ||
159 | return 0; | ||
160 | |||
161 | /* Does it match current guest AS? */ | ||
162 | /* XXX what about IS != DS? */ | ||
163 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS)) | ||
164 | return 0; | ||
165 | |||
166 | gpa = get_tlb_raddr(tlbe); | ||
167 | if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) | ||
168 | /* Mapping is not for RAM. */ | ||
169 | return 0; | ||
170 | |||
171 | return 1; | ||
172 | } | ||
173 | |||
174 | #endif /* __KVM_E500_TLB_H__ */ | ||
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c new file mode 100644 index 000000000000..fe6c1de6b701 --- /dev/null +++ b/arch/powerpc/kvm/e500mc.c | |||
@@ -0,0 +1,342 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Author: Varun Sethi, <varun.sethi@freescale.com> | ||
5 | * | ||
6 | * Description: | ||
7 | * This file is derived from arch/powerpc/kvm/e500.c, | ||
8 | * by Yu Liu <yu.liu@freescale.com>. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License, version 2, as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kvm_host.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/export.h> | ||
19 | |||
20 | #include <asm/reg.h> | ||
21 | #include <asm/cputable.h> | ||
22 | #include <asm/tlbflush.h> | ||
23 | #include <asm/kvm_ppc.h> | ||
24 | #include <asm/dbell.h> | ||
25 | |||
26 | #include "booke.h" | ||
27 | #include "e500.h" | ||
28 | |||
29 | void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type) | ||
30 | { | ||
31 | enum ppc_dbell dbell_type; | ||
32 | unsigned long tag; | ||
33 | |||
34 | switch (type) { | ||
35 | case INT_CLASS_NONCRIT: | ||
36 | dbell_type = PPC_G_DBELL; | ||
37 | break; | ||
38 | case INT_CLASS_CRIT: | ||
39 | dbell_type = PPC_G_DBELL_CRIT; | ||
40 | break; | ||
41 | case INT_CLASS_MC: | ||
42 | dbell_type = PPC_G_DBELL_MC; | ||
43 | break; | ||
44 | default: | ||
45 | WARN_ONCE(1, "%s: unknown int type %d\n", __func__, type); | ||
46 | return; | ||
47 | } | ||
48 | |||
49 | |||
50 | tag = PPC_DBELL_LPID(vcpu->kvm->arch.lpid) | vcpu->vcpu_id; | ||
51 | mb(); | ||
52 | ppc_msgsnd(dbell_type, 0, tag); | ||
53 | } | ||
54 | |||
55 | /* gtlbe must not be mapped by more than one host tlb entry */ | ||
56 | void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
57 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
58 | { | ||
59 | unsigned int tid, ts; | ||
60 | u32 val, eaddr, lpid; | ||
61 | unsigned long flags; | ||
62 | |||
63 | ts = get_tlb_ts(gtlbe); | ||
64 | tid = get_tlb_tid(gtlbe); | ||
65 | lpid = vcpu_e500->vcpu.kvm->arch.lpid; | ||
66 | |||
67 | /* We search the host TLB to invalidate its shadow TLB entry */ | ||
68 | val = (tid << 16) | ts; | ||
69 | eaddr = get_tlb_eaddr(gtlbe); | ||
70 | |||
71 | local_irq_save(flags); | ||
72 | |||
73 | mtspr(SPRN_MAS6, val); | ||
74 | mtspr(SPRN_MAS5, MAS5_SGS | lpid); | ||
75 | |||
76 | asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr)); | ||
77 | val = mfspr(SPRN_MAS1); | ||
78 | if (val & MAS1_VALID) { | ||
79 | mtspr(SPRN_MAS1, val & ~MAS1_VALID); | ||
80 | asm volatile("tlbwe"); | ||
81 | } | ||
82 | mtspr(SPRN_MAS5, 0); | ||
83 | /* NOTE: tlbsx also updates mas8, so clear it for host tlbwe */ | ||
84 | mtspr(SPRN_MAS8, 0); | ||
85 | isync(); | ||
86 | |||
87 | local_irq_restore(flags); | ||
88 | } | ||
89 | |||
90 | void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
91 | { | ||
92 | unsigned long flags; | ||
93 | |||
94 | local_irq_save(flags); | ||
95 | mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.kvm->arch.lpid); | ||
96 | asm volatile("tlbilxlpid"); | ||
97 | mtspr(SPRN_MAS5, 0); | ||
98 | local_irq_restore(flags); | ||
99 | } | ||
100 | |||
101 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) | ||
102 | { | ||
103 | vcpu->arch.pid = pid; | ||
104 | } | ||
105 | |||
106 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | ||
107 | { | ||
108 | } | ||
109 | |||
110 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
111 | { | ||
112 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
113 | |||
114 | kvmppc_booke_vcpu_load(vcpu, cpu); | ||
115 | |||
116 | mtspr(SPRN_LPID, vcpu->kvm->arch.lpid); | ||
117 | mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); | ||
118 | mtspr(SPRN_GPIR, vcpu->vcpu_id); | ||
119 | mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp); | ||
120 | mtspr(SPRN_EPLC, vcpu->arch.eplc); | ||
121 | mtspr(SPRN_EPSC, vcpu->arch.epsc); | ||
122 | |||
123 | mtspr(SPRN_GIVPR, vcpu->arch.ivpr); | ||
124 | mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); | ||
125 | mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); | ||
126 | mtspr(SPRN_GSPRG0, (unsigned long)vcpu->arch.shared->sprg0); | ||
127 | mtspr(SPRN_GSPRG1, (unsigned long)vcpu->arch.shared->sprg1); | ||
128 | mtspr(SPRN_GSPRG2, (unsigned long)vcpu->arch.shared->sprg2); | ||
129 | mtspr(SPRN_GSPRG3, (unsigned long)vcpu->arch.shared->sprg3); | ||
130 | |||
131 | mtspr(SPRN_GSRR0, vcpu->arch.shared->srr0); | ||
132 | mtspr(SPRN_GSRR1, vcpu->arch.shared->srr1); | ||
133 | |||
134 | mtspr(SPRN_GEPR, vcpu->arch.epr); | ||
135 | mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); | ||
136 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); | ||
137 | |||
138 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR)) | ||
139 | kvmppc_e500_tlbil_all(vcpu_e500); | ||
140 | |||
141 | kvmppc_load_guest_fp(vcpu); | ||
142 | } | ||
143 | |||
144 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | ||
145 | { | ||
146 | vcpu->arch.eplc = mfspr(SPRN_EPLC); | ||
147 | vcpu->arch.epsc = mfspr(SPRN_EPSC); | ||
148 | |||
149 | vcpu->arch.shared->sprg0 = mfspr(SPRN_GSPRG0); | ||
150 | vcpu->arch.shared->sprg1 = mfspr(SPRN_GSPRG1); | ||
151 | vcpu->arch.shared->sprg2 = mfspr(SPRN_GSPRG2); | ||
152 | vcpu->arch.shared->sprg3 = mfspr(SPRN_GSPRG3); | ||
153 | |||
154 | vcpu->arch.shared->srr0 = mfspr(SPRN_GSRR0); | ||
155 | vcpu->arch.shared->srr1 = mfspr(SPRN_GSRR1); | ||
156 | |||
157 | vcpu->arch.epr = mfspr(SPRN_GEPR); | ||
158 | vcpu->arch.shared->dar = mfspr(SPRN_GDEAR); | ||
159 | vcpu->arch.shared->esr = mfspr(SPRN_GESR); | ||
160 | |||
161 | vcpu->arch.oldpir = mfspr(SPRN_PIR); | ||
162 | |||
163 | kvmppc_booke_vcpu_put(vcpu); | ||
164 | } | ||
165 | |||
166 | int kvmppc_core_check_processor_compat(void) | ||
167 | { | ||
168 | int r; | ||
169 | |||
170 | if (strcmp(cur_cpu_spec->cpu_name, "e500mc") == 0) | ||
171 | r = 0; | ||
172 | else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0) | ||
173 | r = 0; | ||
174 | else | ||
175 | r = -ENOTSUPP; | ||
176 | |||
177 | return r; | ||
178 | } | ||
179 | |||
180 | int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | ||
181 | { | ||
182 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
183 | |||
184 | vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \ | ||
185 | SPRN_EPCR_DUVD; | ||
186 | vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP; | ||
187 | vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT); | ||
188 | vcpu->arch.epsc = vcpu->arch.eplc; | ||
189 | |||
190 | vcpu->arch.pvr = mfspr(SPRN_PVR); | ||
191 | vcpu_e500->svr = mfspr(SPRN_SVR); | ||
192 | |||
193 | vcpu->arch.cpu_type = KVM_CPU_E500MC; | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
199 | { | ||
200 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
201 | |||
202 | sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_PM | | ||
203 | KVM_SREGS_E_PC; | ||
204 | sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL; | ||
205 | |||
206 | sregs->u.e.impl.fsl.features = 0; | ||
207 | sregs->u.e.impl.fsl.svr = vcpu_e500->svr; | ||
208 | sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; | ||
209 | sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; | ||
210 | |||
211 | kvmppc_get_sregs_e500_tlb(vcpu, sregs); | ||
212 | |||
213 | sregs->u.e.ivor_high[3] = | ||
214 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; | ||
215 | sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; | ||
216 | sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; | ||
217 | |||
218 | kvmppc_get_sregs_ivor(vcpu, sregs); | ||
219 | } | ||
220 | |||
221 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
222 | { | ||
223 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
224 | int ret; | ||
225 | |||
226 | if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { | ||
227 | vcpu_e500->svr = sregs->u.e.impl.fsl.svr; | ||
228 | vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0; | ||
229 | vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; | ||
230 | } | ||
231 | |||
232 | ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs); | ||
233 | if (ret < 0) | ||
234 | return ret; | ||
235 | |||
236 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) | ||
237 | return 0; | ||
238 | |||
239 | if (sregs->u.e.features & KVM_SREGS_E_PM) { | ||
240 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = | ||
241 | sregs->u.e.ivor_high[3]; | ||
242 | } | ||
243 | |||
244 | if (sregs->u.e.features & KVM_SREGS_E_PC) { | ||
245 | vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = | ||
246 | sregs->u.e.ivor_high[4]; | ||
247 | vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = | ||
248 | sregs->u.e.ivor_high[5]; | ||
249 | } | ||
250 | |||
251 | return kvmppc_set_sregs_ivor(vcpu, sregs); | ||
252 | } | ||
253 | |||
254 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | ||
255 | { | ||
256 | struct kvmppc_vcpu_e500 *vcpu_e500; | ||
257 | struct kvm_vcpu *vcpu; | ||
258 | int err; | ||
259 | |||
260 | vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | ||
261 | if (!vcpu_e500) { | ||
262 | err = -ENOMEM; | ||
263 | goto out; | ||
264 | } | ||
265 | vcpu = &vcpu_e500->vcpu; | ||
266 | |||
267 | /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */ | ||
268 | vcpu->arch.oldpir = 0xffffffff; | ||
269 | |||
270 | err = kvm_vcpu_init(vcpu, kvm, id); | ||
271 | if (err) | ||
272 | goto free_vcpu; | ||
273 | |||
274 | err = kvmppc_e500_tlb_init(vcpu_e500); | ||
275 | if (err) | ||
276 | goto uninit_vcpu; | ||
277 | |||
278 | vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
279 | if (!vcpu->arch.shared) | ||
280 | goto uninit_tlb; | ||
281 | |||
282 | return vcpu; | ||
283 | |||
284 | uninit_tlb: | ||
285 | kvmppc_e500_tlb_uninit(vcpu_e500); | ||
286 | uninit_vcpu: | ||
287 | kvm_vcpu_uninit(vcpu); | ||
288 | |||
289 | free_vcpu: | ||
290 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); | ||
291 | out: | ||
292 | return ERR_PTR(err); | ||
293 | } | ||
294 | |||
295 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | ||
296 | { | ||
297 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
298 | |||
299 | free_page((unsigned long)vcpu->arch.shared); | ||
300 | kvmppc_e500_tlb_uninit(vcpu_e500); | ||
301 | kvm_vcpu_uninit(vcpu); | ||
302 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); | ||
303 | } | ||
304 | |||
305 | int kvmppc_core_init_vm(struct kvm *kvm) | ||
306 | { | ||
307 | int lpid; | ||
308 | |||
309 | lpid = kvmppc_alloc_lpid(); | ||
310 | if (lpid < 0) | ||
311 | return lpid; | ||
312 | |||
313 | kvm->arch.lpid = lpid; | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | void kvmppc_core_destroy_vm(struct kvm *kvm) | ||
318 | { | ||
319 | kvmppc_free_lpid(kvm->arch.lpid); | ||
320 | } | ||
321 | |||
322 | static int __init kvmppc_e500mc_init(void) | ||
323 | { | ||
324 | int r; | ||
325 | |||
326 | r = kvmppc_booke_init(); | ||
327 | if (r) | ||
328 | return r; | ||
329 | |||
330 | kvmppc_init_lpid(64); | ||
331 | kvmppc_claim_lpid(0); /* host */ | ||
332 | |||
333 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); | ||
334 | } | ||
335 | |||
336 | static void __exit kvmppc_e500mc_exit(void) | ||
337 | { | ||
338 | kvmppc_booke_exit(); | ||
339 | } | ||
340 | |||
341 | module_init(kvmppc_e500mc_init); | ||
342 | module_exit(kvmppc_e500mc_exit); | ||
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 968f40101883..f90e86dea7a2 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/string.h> | 24 | #include <linux/string.h> |
25 | #include <linux/kvm_host.h> | 25 | #include <linux/kvm_host.h> |
26 | #include <linux/clockchips.h> | ||
26 | 27 | ||
27 | #include <asm/reg.h> | 28 | #include <asm/reg.h> |
28 | #include <asm/time.h> | 29 | #include <asm/time.h> |
@@ -35,7 +36,9 @@ | |||
35 | #define OP_TRAP 3 | 36 | #define OP_TRAP 3 |
36 | #define OP_TRAP_64 2 | 37 | #define OP_TRAP_64 2 |
37 | 38 | ||
39 | #define OP_31_XOP_TRAP 4 | ||
38 | #define OP_31_XOP_LWZX 23 | 40 | #define OP_31_XOP_LWZX 23 |
41 | #define OP_31_XOP_TRAP_64 68 | ||
39 | #define OP_31_XOP_LBZX 87 | 42 | #define OP_31_XOP_LBZX 87 |
40 | #define OP_31_XOP_STWX 151 | 43 | #define OP_31_XOP_STWX 151 |
41 | #define OP_31_XOP_STBX 215 | 44 | #define OP_31_XOP_STBX 215 |
@@ -102,8 +105,12 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | |||
102 | */ | 105 | */ |
103 | 106 | ||
104 | dec_time = vcpu->arch.dec; | 107 | dec_time = vcpu->arch.dec; |
105 | dec_time *= 1000; | 108 | /* |
106 | do_div(dec_time, tb_ticks_per_usec); | 109 | * Guest timebase ticks at the same frequency as host decrementer. |
110 | * So use the host decrementer calculations for decrementer emulation. | ||
111 | */ | ||
112 | dec_time = dec_time << decrementer_clockevent.shift; | ||
113 | do_div(dec_time, decrementer_clockevent.mult); | ||
107 | dec_nsec = do_div(dec_time, NSEC_PER_SEC); | 114 | dec_nsec = do_div(dec_time, NSEC_PER_SEC); |
108 | hrtimer_start(&vcpu->arch.dec_timer, | 115 | hrtimer_start(&vcpu->arch.dec_timer, |
109 | ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); | 116 | ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); |
@@ -141,14 +148,13 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) | |||
141 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | 148 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) |
142 | { | 149 | { |
143 | u32 inst = kvmppc_get_last_inst(vcpu); | 150 | u32 inst = kvmppc_get_last_inst(vcpu); |
144 | u32 ea; | 151 | int ra = get_ra(inst); |
145 | int ra; | 152 | int rs = get_rs(inst); |
146 | int rb; | 153 | int rt = get_rt(inst); |
147 | int rs; | 154 | int sprn = get_sprn(inst); |
148 | int rt; | ||
149 | int sprn; | ||
150 | enum emulation_result emulated = EMULATE_DONE; | 155 | enum emulation_result emulated = EMULATE_DONE; |
151 | int advance = 1; | 156 | int advance = 1; |
157 | ulong spr_val = 0; | ||
152 | 158 | ||
153 | /* this default type might be overwritten by subcategories */ | 159 | /* this default type might be overwritten by subcategories */ |
154 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | 160 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); |
@@ -170,173 +176,143 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
170 | case 31: | 176 | case 31: |
171 | switch (get_xop(inst)) { | 177 | switch (get_xop(inst)) { |
172 | 178 | ||
179 | case OP_31_XOP_TRAP: | ||
180 | #ifdef CONFIG_64BIT | ||
181 | case OP_31_XOP_TRAP_64: | ||
182 | #endif | ||
183 | #ifdef CONFIG_PPC_BOOK3S | ||
184 | kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); | ||
185 | #else | ||
186 | kvmppc_core_queue_program(vcpu, | ||
187 | vcpu->arch.shared->esr | ESR_PTR); | ||
188 | #endif | ||
189 | advance = 0; | ||
190 | break; | ||
173 | case OP_31_XOP_LWZX: | 191 | case OP_31_XOP_LWZX: |
174 | rt = get_rt(inst); | ||
175 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 192 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
176 | break; | 193 | break; |
177 | 194 | ||
178 | case OP_31_XOP_LBZX: | 195 | case OP_31_XOP_LBZX: |
179 | rt = get_rt(inst); | ||
180 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 196 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
181 | break; | 197 | break; |
182 | 198 | ||
183 | case OP_31_XOP_LBZUX: | 199 | case OP_31_XOP_LBZUX: |
184 | rt = get_rt(inst); | ||
185 | ra = get_ra(inst); | ||
186 | rb = get_rb(inst); | ||
187 | |||
188 | ea = kvmppc_get_gpr(vcpu, rb); | ||
189 | if (ra) | ||
190 | ea += kvmppc_get_gpr(vcpu, ra); | ||
191 | |||
192 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 200 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
193 | kvmppc_set_gpr(vcpu, ra, ea); | 201 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
194 | break; | 202 | break; |
195 | 203 | ||
196 | case OP_31_XOP_STWX: | 204 | case OP_31_XOP_STWX: |
197 | rs = get_rs(inst); | ||
198 | emulated = kvmppc_handle_store(run, vcpu, | 205 | emulated = kvmppc_handle_store(run, vcpu, |
199 | kvmppc_get_gpr(vcpu, rs), | 206 | kvmppc_get_gpr(vcpu, rs), |
200 | 4, 1); | 207 | 4, 1); |
201 | break; | 208 | break; |
202 | 209 | ||
203 | case OP_31_XOP_STBX: | 210 | case OP_31_XOP_STBX: |
204 | rs = get_rs(inst); | ||
205 | emulated = kvmppc_handle_store(run, vcpu, | 211 | emulated = kvmppc_handle_store(run, vcpu, |
206 | kvmppc_get_gpr(vcpu, rs), | 212 | kvmppc_get_gpr(vcpu, rs), |
207 | 1, 1); | 213 | 1, 1); |
208 | break; | 214 | break; |
209 | 215 | ||
210 | case OP_31_XOP_STBUX: | 216 | case OP_31_XOP_STBUX: |
211 | rs = get_rs(inst); | ||
212 | ra = get_ra(inst); | ||
213 | rb = get_rb(inst); | ||
214 | |||
215 | ea = kvmppc_get_gpr(vcpu, rb); | ||
216 | if (ra) | ||
217 | ea += kvmppc_get_gpr(vcpu, ra); | ||
218 | |||
219 | emulated = kvmppc_handle_store(run, vcpu, | 217 | emulated = kvmppc_handle_store(run, vcpu, |
220 | kvmppc_get_gpr(vcpu, rs), | 218 | kvmppc_get_gpr(vcpu, rs), |
221 | 1, 1); | 219 | 1, 1); |
222 | kvmppc_set_gpr(vcpu, rs, ea); | 220 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
223 | break; | 221 | break; |
224 | 222 | ||
225 | case OP_31_XOP_LHAX: | 223 | case OP_31_XOP_LHAX: |
226 | rt = get_rt(inst); | ||
227 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | 224 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); |
228 | break; | 225 | break; |
229 | 226 | ||
230 | case OP_31_XOP_LHZX: | 227 | case OP_31_XOP_LHZX: |
231 | rt = get_rt(inst); | ||
232 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 228 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
233 | break; | 229 | break; |
234 | 230 | ||
235 | case OP_31_XOP_LHZUX: | 231 | case OP_31_XOP_LHZUX: |
236 | rt = get_rt(inst); | ||
237 | ra = get_ra(inst); | ||
238 | rb = get_rb(inst); | ||
239 | |||
240 | ea = kvmppc_get_gpr(vcpu, rb); | ||
241 | if (ra) | ||
242 | ea += kvmppc_get_gpr(vcpu, ra); | ||
243 | |||
244 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 232 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
245 | kvmppc_set_gpr(vcpu, ra, ea); | 233 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
246 | break; | 234 | break; |
247 | 235 | ||
248 | case OP_31_XOP_MFSPR: | 236 | case OP_31_XOP_MFSPR: |
249 | sprn = get_sprn(inst); | ||
250 | rt = get_rt(inst); | ||
251 | |||
252 | switch (sprn) { | 237 | switch (sprn) { |
253 | case SPRN_SRR0: | 238 | case SPRN_SRR0: |
254 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0); | 239 | spr_val = vcpu->arch.shared->srr0; |
255 | break; | 240 | break; |
256 | case SPRN_SRR1: | 241 | case SPRN_SRR1: |
257 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1); | 242 | spr_val = vcpu->arch.shared->srr1; |
258 | break; | 243 | break; |
259 | case SPRN_PVR: | 244 | case SPRN_PVR: |
260 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; | 245 | spr_val = vcpu->arch.pvr; |
246 | break; | ||
261 | case SPRN_PIR: | 247 | case SPRN_PIR: |
262 | kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; | 248 | spr_val = vcpu->vcpu_id; |
249 | break; | ||
263 | case SPRN_MSSSR0: | 250 | case SPRN_MSSSR0: |
264 | kvmppc_set_gpr(vcpu, rt, 0); break; | 251 | spr_val = 0; |
252 | break; | ||
265 | 253 | ||
266 | /* Note: mftb and TBRL/TBWL are user-accessible, so | 254 | /* Note: mftb and TBRL/TBWL are user-accessible, so |
267 | * the guest can always access the real TB anyways. | 255 | * the guest can always access the real TB anyways. |
268 | * In fact, we probably will never see these traps. */ | 256 | * In fact, we probably will never see these traps. */ |
269 | case SPRN_TBWL: | 257 | case SPRN_TBWL: |
270 | kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; | 258 | spr_val = get_tb() >> 32; |
259 | break; | ||
271 | case SPRN_TBWU: | 260 | case SPRN_TBWU: |
272 | kvmppc_set_gpr(vcpu, rt, get_tb()); break; | 261 | spr_val = get_tb(); |
262 | break; | ||
273 | 263 | ||
274 | case SPRN_SPRG0: | 264 | case SPRN_SPRG0: |
275 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0); | 265 | spr_val = vcpu->arch.shared->sprg0; |
276 | break; | 266 | break; |
277 | case SPRN_SPRG1: | 267 | case SPRN_SPRG1: |
278 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1); | 268 | spr_val = vcpu->arch.shared->sprg1; |
279 | break; | 269 | break; |
280 | case SPRN_SPRG2: | 270 | case SPRN_SPRG2: |
281 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2); | 271 | spr_val = vcpu->arch.shared->sprg2; |
282 | break; | 272 | break; |
283 | case SPRN_SPRG3: | 273 | case SPRN_SPRG3: |
284 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3); | 274 | spr_val = vcpu->arch.shared->sprg3; |
285 | break; | 275 | break; |
286 | /* Note: SPRG4-7 are user-readable, so we don't get | 276 | /* Note: SPRG4-7 are user-readable, so we don't get |
287 | * a trap. */ | 277 | * a trap. */ |
288 | 278 | ||
289 | case SPRN_DEC: | 279 | case SPRN_DEC: |
290 | { | 280 | spr_val = kvmppc_get_dec(vcpu, get_tb()); |
291 | kvmppc_set_gpr(vcpu, rt, | ||
292 | kvmppc_get_dec(vcpu, get_tb())); | ||
293 | break; | 281 | break; |
294 | } | ||
295 | default: | 282 | default: |
296 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); | 283 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, |
297 | if (emulated == EMULATE_FAIL) { | 284 | &spr_val); |
298 | printk("mfspr: unknown spr %x\n", sprn); | 285 | if (unlikely(emulated == EMULATE_FAIL)) { |
299 | kvmppc_set_gpr(vcpu, rt, 0); | 286 | printk(KERN_INFO "mfspr: unknown spr " |
287 | "0x%x\n", sprn); | ||
300 | } | 288 | } |
301 | break; | 289 | break; |
302 | } | 290 | } |
291 | kvmppc_set_gpr(vcpu, rt, spr_val); | ||
303 | kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); | 292 | kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); |
304 | break; | 293 | break; |
305 | 294 | ||
306 | case OP_31_XOP_STHX: | 295 | case OP_31_XOP_STHX: |
307 | rs = get_rs(inst); | ||
308 | ra = get_ra(inst); | ||
309 | rb = get_rb(inst); | ||
310 | |||
311 | emulated = kvmppc_handle_store(run, vcpu, | 296 | emulated = kvmppc_handle_store(run, vcpu, |
312 | kvmppc_get_gpr(vcpu, rs), | 297 | kvmppc_get_gpr(vcpu, rs), |
313 | 2, 1); | 298 | 2, 1); |
314 | break; | 299 | break; |
315 | 300 | ||
316 | case OP_31_XOP_STHUX: | 301 | case OP_31_XOP_STHUX: |
317 | rs = get_rs(inst); | ||
318 | ra = get_ra(inst); | ||
319 | rb = get_rb(inst); | ||
320 | |||
321 | ea = kvmppc_get_gpr(vcpu, rb); | ||
322 | if (ra) | ||
323 | ea += kvmppc_get_gpr(vcpu, ra); | ||
324 | |||
325 | emulated = kvmppc_handle_store(run, vcpu, | 302 | emulated = kvmppc_handle_store(run, vcpu, |
326 | kvmppc_get_gpr(vcpu, rs), | 303 | kvmppc_get_gpr(vcpu, rs), |
327 | 2, 1); | 304 | 2, 1); |
328 | kvmppc_set_gpr(vcpu, ra, ea); | 305 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
329 | break; | 306 | break; |
330 | 307 | ||
331 | case OP_31_XOP_MTSPR: | 308 | case OP_31_XOP_MTSPR: |
332 | sprn = get_sprn(inst); | 309 | spr_val = kvmppc_get_gpr(vcpu, rs); |
333 | rs = get_rs(inst); | ||
334 | switch (sprn) { | 310 | switch (sprn) { |
335 | case SPRN_SRR0: | 311 | case SPRN_SRR0: |
336 | vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs); | 312 | vcpu->arch.shared->srr0 = spr_val; |
337 | break; | 313 | break; |
338 | case SPRN_SRR1: | 314 | case SPRN_SRR1: |
339 | vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs); | 315 | vcpu->arch.shared->srr1 = spr_val; |
340 | break; | 316 | break; |
341 | 317 | ||
342 | /* XXX We need to context-switch the timebase for | 318 | /* XXX We need to context-switch the timebase for |
@@ -347,27 +323,29 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
347 | case SPRN_MSSSR0: break; | 323 | case SPRN_MSSSR0: break; |
348 | 324 | ||
349 | case SPRN_DEC: | 325 | case SPRN_DEC: |
350 | vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); | 326 | vcpu->arch.dec = spr_val; |
351 | kvmppc_emulate_dec(vcpu); | 327 | kvmppc_emulate_dec(vcpu); |
352 | break; | 328 | break; |
353 | 329 | ||
354 | case SPRN_SPRG0: | 330 | case SPRN_SPRG0: |
355 | vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs); | 331 | vcpu->arch.shared->sprg0 = spr_val; |
356 | break; | 332 | break; |
357 | case SPRN_SPRG1: | 333 | case SPRN_SPRG1: |
358 | vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs); | 334 | vcpu->arch.shared->sprg1 = spr_val; |
359 | break; | 335 | break; |
360 | case SPRN_SPRG2: | 336 | case SPRN_SPRG2: |
361 | vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs); | 337 | vcpu->arch.shared->sprg2 = spr_val; |
362 | break; | 338 | break; |
363 | case SPRN_SPRG3: | 339 | case SPRN_SPRG3: |
364 | vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs); | 340 | vcpu->arch.shared->sprg3 = spr_val; |
365 | break; | 341 | break; |
366 | 342 | ||
367 | default: | 343 | default: |
368 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); | 344 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, |
345 | spr_val); | ||
369 | if (emulated == EMULATE_FAIL) | 346 | if (emulated == EMULATE_FAIL) |
370 | printk("mtspr: unknown spr %x\n", sprn); | 347 | printk(KERN_INFO "mtspr: unknown spr " |
348 | "0x%x\n", sprn); | ||
371 | break; | 349 | break; |
372 | } | 350 | } |
373 | kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); | 351 | kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); |
@@ -382,7 +360,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
382 | break; | 360 | break; |
383 | 361 | ||
384 | case OP_31_XOP_LWBRX: | 362 | case OP_31_XOP_LWBRX: |
385 | rt = get_rt(inst); | ||
386 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); | 363 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); |
387 | break; | 364 | break; |
388 | 365 | ||
@@ -390,25 +367,16 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
390 | break; | 367 | break; |
391 | 368 | ||
392 | case OP_31_XOP_STWBRX: | 369 | case OP_31_XOP_STWBRX: |
393 | rs = get_rs(inst); | ||
394 | ra = get_ra(inst); | ||
395 | rb = get_rb(inst); | ||
396 | |||
397 | emulated = kvmppc_handle_store(run, vcpu, | 370 | emulated = kvmppc_handle_store(run, vcpu, |
398 | kvmppc_get_gpr(vcpu, rs), | 371 | kvmppc_get_gpr(vcpu, rs), |
399 | 4, 0); | 372 | 4, 0); |
400 | break; | 373 | break; |
401 | 374 | ||
402 | case OP_31_XOP_LHBRX: | 375 | case OP_31_XOP_LHBRX: |
403 | rt = get_rt(inst); | ||
404 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); | 376 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); |
405 | break; | 377 | break; |
406 | 378 | ||
407 | case OP_31_XOP_STHBRX: | 379 | case OP_31_XOP_STHBRX: |
408 | rs = get_rs(inst); | ||
409 | ra = get_ra(inst); | ||
410 | rb = get_rb(inst); | ||
411 | |||
412 | emulated = kvmppc_handle_store(run, vcpu, | 380 | emulated = kvmppc_handle_store(run, vcpu, |
413 | kvmppc_get_gpr(vcpu, rs), | 381 | kvmppc_get_gpr(vcpu, rs), |
414 | 2, 0); | 382 | 2, 0); |
@@ -421,99 +389,78 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
421 | break; | 389 | break; |
422 | 390 | ||
423 | case OP_LWZ: | 391 | case OP_LWZ: |
424 | rt = get_rt(inst); | ||
425 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 392 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
426 | break; | 393 | break; |
427 | 394 | ||
428 | case OP_LWZU: | 395 | case OP_LWZU: |
429 | ra = get_ra(inst); | ||
430 | rt = get_rt(inst); | ||
431 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 396 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
432 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 397 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
433 | break; | 398 | break; |
434 | 399 | ||
435 | case OP_LBZ: | 400 | case OP_LBZ: |
436 | rt = get_rt(inst); | ||
437 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 401 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
438 | break; | 402 | break; |
439 | 403 | ||
440 | case OP_LBZU: | 404 | case OP_LBZU: |
441 | ra = get_ra(inst); | ||
442 | rt = get_rt(inst); | ||
443 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 405 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
444 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 406 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
445 | break; | 407 | break; |
446 | 408 | ||
447 | case OP_STW: | 409 | case OP_STW: |
448 | rs = get_rs(inst); | ||
449 | emulated = kvmppc_handle_store(run, vcpu, | 410 | emulated = kvmppc_handle_store(run, vcpu, |
450 | kvmppc_get_gpr(vcpu, rs), | 411 | kvmppc_get_gpr(vcpu, rs), |
451 | 4, 1); | 412 | 4, 1); |
452 | break; | 413 | break; |
453 | 414 | ||
454 | case OP_STWU: | 415 | case OP_STWU: |
455 | ra = get_ra(inst); | ||
456 | rs = get_rs(inst); | ||
457 | emulated = kvmppc_handle_store(run, vcpu, | 416 | emulated = kvmppc_handle_store(run, vcpu, |
458 | kvmppc_get_gpr(vcpu, rs), | 417 | kvmppc_get_gpr(vcpu, rs), |
459 | 4, 1); | 418 | 4, 1); |
460 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 419 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
461 | break; | 420 | break; |
462 | 421 | ||
463 | case OP_STB: | 422 | case OP_STB: |
464 | rs = get_rs(inst); | ||
465 | emulated = kvmppc_handle_store(run, vcpu, | 423 | emulated = kvmppc_handle_store(run, vcpu, |
466 | kvmppc_get_gpr(vcpu, rs), | 424 | kvmppc_get_gpr(vcpu, rs), |
467 | 1, 1); | 425 | 1, 1); |
468 | break; | 426 | break; |
469 | 427 | ||
470 | case OP_STBU: | 428 | case OP_STBU: |
471 | ra = get_ra(inst); | ||
472 | rs = get_rs(inst); | ||
473 | emulated = kvmppc_handle_store(run, vcpu, | 429 | emulated = kvmppc_handle_store(run, vcpu, |
474 | kvmppc_get_gpr(vcpu, rs), | 430 | kvmppc_get_gpr(vcpu, rs), |
475 | 1, 1); | 431 | 1, 1); |
476 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 432 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
477 | break; | 433 | break; |
478 | 434 | ||
479 | case OP_LHZ: | 435 | case OP_LHZ: |
480 | rt = get_rt(inst); | ||
481 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 436 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
482 | break; | 437 | break; |
483 | 438 | ||
484 | case OP_LHZU: | 439 | case OP_LHZU: |
485 | ra = get_ra(inst); | ||
486 | rt = get_rt(inst); | ||
487 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 440 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
488 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 441 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
489 | break; | 442 | break; |
490 | 443 | ||
491 | case OP_LHA: | 444 | case OP_LHA: |
492 | rt = get_rt(inst); | ||
493 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | 445 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); |
494 | break; | 446 | break; |
495 | 447 | ||
496 | case OP_LHAU: | 448 | case OP_LHAU: |
497 | ra = get_ra(inst); | ||
498 | rt = get_rt(inst); | ||
499 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | 449 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); |
500 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 450 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
501 | break; | 451 | break; |
502 | 452 | ||
503 | case OP_STH: | 453 | case OP_STH: |
504 | rs = get_rs(inst); | ||
505 | emulated = kvmppc_handle_store(run, vcpu, | 454 | emulated = kvmppc_handle_store(run, vcpu, |
506 | kvmppc_get_gpr(vcpu, rs), | 455 | kvmppc_get_gpr(vcpu, rs), |
507 | 2, 1); | 456 | 2, 1); |
508 | break; | 457 | break; |
509 | 458 | ||
510 | case OP_STHU: | 459 | case OP_STHU: |
511 | ra = get_ra(inst); | ||
512 | rs = get_rs(inst); | ||
513 | emulated = kvmppc_handle_store(run, vcpu, | 460 | emulated = kvmppc_handle_store(run, vcpu, |
514 | kvmppc_get_gpr(vcpu, rs), | 461 | kvmppc_get_gpr(vcpu, rs), |
515 | 2, 1); | 462 | 2, 1); |
516 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 463 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
517 | break; | 464 | break; |
518 | 465 | ||
519 | default: | 466 | default: |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 00d7e345b3fe..1493c8de947b 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -43,6 +43,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | |||
43 | v->requests; | 43 | v->requests; |
44 | } | 44 | } |
45 | 45 | ||
46 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) | ||
47 | { | ||
48 | return 1; | ||
49 | } | ||
50 | |||
46 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | 51 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
47 | { | 52 | { |
48 | int nr = kvmppc_get_gpr(vcpu, 11); | 53 | int nr = kvmppc_get_gpr(vcpu, 11); |
@@ -74,7 +79,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | |||
74 | } | 79 | } |
75 | case HC_VENDOR_KVM | KVM_HC_FEATURES: | 80 | case HC_VENDOR_KVM | KVM_HC_FEATURES: |
76 | r = HC_EV_SUCCESS; | 81 | r = HC_EV_SUCCESS; |
77 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500) | 82 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) |
78 | /* XXX Missing magic page on 44x */ | 83 | /* XXX Missing magic page on 44x */ |
79 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); | 84 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); |
80 | #endif | 85 | #endif |
@@ -109,6 +114,11 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu) | |||
109 | goto out; | 114 | goto out; |
110 | #endif | 115 | #endif |
111 | 116 | ||
117 | #ifdef CONFIG_KVM_BOOKE_HV | ||
118 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) | ||
119 | goto out; | ||
120 | #endif | ||
121 | |||
112 | r = true; | 122 | r = true; |
113 | 123 | ||
114 | out: | 124 | out: |
@@ -225,7 +235,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
225 | case KVM_CAP_PPC_PAIRED_SINGLES: | 235 | case KVM_CAP_PPC_PAIRED_SINGLES: |
226 | case KVM_CAP_PPC_OSI: | 236 | case KVM_CAP_PPC_OSI: |
227 | case KVM_CAP_PPC_GET_PVINFO: | 237 | case KVM_CAP_PPC_GET_PVINFO: |
228 | #ifdef CONFIG_KVM_E500 | 238 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
229 | case KVM_CAP_SW_TLB: | 239 | case KVM_CAP_SW_TLB: |
230 | #endif | 240 | #endif |
231 | r = 1; | 241 | r = 1; |
@@ -234,10 +244,12 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
234 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 244 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
235 | break; | 245 | break; |
236 | #endif | 246 | #endif |
237 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 247 | #ifdef CONFIG_PPC_BOOK3S_64 |
238 | case KVM_CAP_SPAPR_TCE: | 248 | case KVM_CAP_SPAPR_TCE: |
239 | r = 1; | 249 | r = 1; |
240 | break; | 250 | break; |
251 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
252 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
241 | case KVM_CAP_PPC_SMT: | 253 | case KVM_CAP_PPC_SMT: |
242 | r = threads_per_core; | 254 | r = threads_per_core; |
243 | break; | 255 | break; |
@@ -267,6 +279,11 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
267 | case KVM_CAP_MAX_VCPUS: | 279 | case KVM_CAP_MAX_VCPUS: |
268 | r = KVM_MAX_VCPUS; | 280 | r = KVM_MAX_VCPUS; |
269 | break; | 281 | break; |
282 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
283 | case KVM_CAP_PPC_GET_SMMU_INFO: | ||
284 | r = 1; | ||
285 | break; | ||
286 | #endif | ||
270 | default: | 287 | default: |
271 | r = 0; | 288 | r = 0; |
272 | break; | 289 | break; |
@@ -588,21 +605,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
588 | return r; | 605 | return r; |
589 | } | 606 | } |
590 | 607 | ||
591 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | ||
592 | { | ||
593 | int me; | ||
594 | int cpu = vcpu->cpu; | ||
595 | |||
596 | me = get_cpu(); | ||
597 | if (waitqueue_active(vcpu->arch.wqp)) { | ||
598 | wake_up_interruptible(vcpu->arch.wqp); | ||
599 | vcpu->stat.halt_wakeup++; | ||
600 | } else if (cpu != me && cpu != -1) { | ||
601 | smp_send_reschedule(vcpu->cpu); | ||
602 | } | ||
603 | put_cpu(); | ||
604 | } | ||
605 | |||
606 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | 608 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) |
607 | { | 609 | { |
608 | if (irq->irq == KVM_INTERRUPT_UNSET) { | 610 | if (irq->irq == KVM_INTERRUPT_UNSET) { |
@@ -611,6 +613,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | |||
611 | } | 613 | } |
612 | 614 | ||
613 | kvmppc_core_queue_external(vcpu, irq); | 615 | kvmppc_core_queue_external(vcpu, irq); |
616 | |||
614 | kvm_vcpu_kick(vcpu); | 617 | kvm_vcpu_kick(vcpu); |
615 | 618 | ||
616 | return 0; | 619 | return 0; |
@@ -633,7 +636,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, | |||
633 | r = 0; | 636 | r = 0; |
634 | vcpu->arch.papr_enabled = true; | 637 | vcpu->arch.papr_enabled = true; |
635 | break; | 638 | break; |
636 | #ifdef CONFIG_KVM_E500 | 639 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
637 | case KVM_CAP_SW_TLB: { | 640 | case KVM_CAP_SW_TLB: { |
638 | struct kvm_config_tlb cfg; | 641 | struct kvm_config_tlb cfg; |
639 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; | 642 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; |
@@ -710,7 +713,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
710 | break; | 713 | break; |
711 | } | 714 | } |
712 | 715 | ||
713 | #ifdef CONFIG_KVM_E500 | 716 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
714 | case KVM_DIRTY_TLB: { | 717 | case KVM_DIRTY_TLB: { |
715 | struct kvm_dirty_tlb dirty; | 718 | struct kvm_dirty_tlb dirty; |
716 | r = -EFAULT; | 719 | r = -EFAULT; |
@@ -720,7 +723,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
720 | break; | 723 | break; |
721 | } | 724 | } |
722 | #endif | 725 | #endif |
723 | |||
724 | default: | 726 | default: |
725 | r = -EINVAL; | 727 | r = -EINVAL; |
726 | } | 728 | } |
@@ -777,7 +779,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
777 | 779 | ||
778 | break; | 780 | break; |
779 | } | 781 | } |
780 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 782 | #ifdef CONFIG_PPC_BOOK3S_64 |
781 | case KVM_CREATE_SPAPR_TCE: { | 783 | case KVM_CREATE_SPAPR_TCE: { |
782 | struct kvm_create_spapr_tce create_tce; | 784 | struct kvm_create_spapr_tce create_tce; |
783 | struct kvm *kvm = filp->private_data; | 785 | struct kvm *kvm = filp->private_data; |
@@ -788,7 +790,9 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
788 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); | 790 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); |
789 | goto out; | 791 | goto out; |
790 | } | 792 | } |
793 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
791 | 794 | ||
795 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
792 | case KVM_ALLOCATE_RMA: { | 796 | case KVM_ALLOCATE_RMA: { |
793 | struct kvm *kvm = filp->private_data; | 797 | struct kvm *kvm = filp->private_data; |
794 | struct kvm_allocate_rma rma; | 798 | struct kvm_allocate_rma rma; |
@@ -800,6 +804,18 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
800 | } | 804 | } |
801 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | 805 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ |
802 | 806 | ||
807 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
808 | case KVM_PPC_GET_SMMU_INFO: { | ||
809 | struct kvm *kvm = filp->private_data; | ||
810 | struct kvm_ppc_smmu_info info; | ||
811 | |||
812 | memset(&info, 0, sizeof(info)); | ||
813 | r = kvm_vm_ioctl_get_smmu_info(kvm, &info); | ||
814 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) | ||
815 | r = -EFAULT; | ||
816 | break; | ||
817 | } | ||
818 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
803 | default: | 819 | default: |
804 | r = -ENOTTY; | 820 | r = -ENOTTY; |
805 | } | 821 | } |
@@ -808,6 +824,40 @@ out: | |||
808 | return r; | 824 | return r; |
809 | } | 825 | } |
810 | 826 | ||
827 | static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; | ||
828 | static unsigned long nr_lpids; | ||
829 | |||
830 | long kvmppc_alloc_lpid(void) | ||
831 | { | ||
832 | long lpid; | ||
833 | |||
834 | do { | ||
835 | lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); | ||
836 | if (lpid >= nr_lpids) { | ||
837 | pr_err("%s: No LPIDs free\n", __func__); | ||
838 | return -ENOMEM; | ||
839 | } | ||
840 | } while (test_and_set_bit(lpid, lpid_inuse)); | ||
841 | |||
842 | return lpid; | ||
843 | } | ||
844 | |||
845 | void kvmppc_claim_lpid(long lpid) | ||
846 | { | ||
847 | set_bit(lpid, lpid_inuse); | ||
848 | } | ||
849 | |||
850 | void kvmppc_free_lpid(long lpid) | ||
851 | { | ||
852 | clear_bit(lpid, lpid_inuse); | ||
853 | } | ||
854 | |||
855 | void kvmppc_init_lpid(unsigned long nr_lpids_param) | ||
856 | { | ||
857 | nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); | ||
858 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); | ||
859 | } | ||
860 | |||
811 | int kvm_arch_init(void *opaque) | 861 | int kvm_arch_init(void *opaque) |
812 | { | 862 | { |
813 | return 0; | 863 | return 0; |
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h index 8167d42a776f..bf191e72b2d8 100644 --- a/arch/powerpc/kvm/timing.h +++ b/arch/powerpc/kvm/timing.h | |||
@@ -93,6 +93,12 @@ static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type) | |||
93 | case SIGNAL_EXITS: | 93 | case SIGNAL_EXITS: |
94 | vcpu->stat.signal_exits++; | 94 | vcpu->stat.signal_exits++; |
95 | break; | 95 | break; |
96 | case DBELL_EXITS: | ||
97 | vcpu->stat.dbell_exits++; | ||
98 | break; | ||
99 | case GDBELL_EXITS: | ||
100 | vcpu->stat.gdbell_exits++; | ||
101 | break; | ||
96 | } | 102 | } |
97 | } | 103 | } |
98 | 104 | ||