diff options
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/44x.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 9 | ||||
-rw-r--r-- | arch/powerpc/kvm/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 272 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_32_mmu.c | 111 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_32_mmu_host.c | 75 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu.c | 42 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_host.c | 74 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_emulate.c | 73 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_mmu_hpte.c | 140 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_paired_singles.c | 55 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_rmhandlers.S | 32 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 108 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.h | 10 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke_emulate.c | 14 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke_interrupts.S | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500.c | 7 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500_tlb.c | 18 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500_tlb.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/emulate.c | 40 | ||||
-rw-r--r-- | arch/powerpc/kvm/fpu.S | 8 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 88 | ||||
-rw-r--r-- | arch/powerpc/kvm/trace.h | 239 |
23 files changed, 1007 insertions, 425 deletions
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index 73c0a3f64ed1..74d0e7421143 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c | |||
@@ -43,7 +43,7 @@ int kvmppc_core_check_processor_compat(void) | |||
43 | { | 43 | { |
44 | int r; | 44 | int r; |
45 | 45 | ||
46 | if (strcmp(cur_cpu_spec->platform, "ppc440") == 0) | 46 | if (strncmp(cur_cpu_spec->platform, "ppc440", 6) == 0) |
47 | r = 0; | 47 | r = 0; |
48 | else | 48 | else |
49 | r = -ENOTSUPP; | 49 | r = -ENOTSUPP; |
@@ -72,6 +72,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | |||
72 | /* Since the guest can directly access the timebase, it must know the | 72 | /* Since the guest can directly access the timebase, it must know the |
73 | * real timebase frequency. Accordingly, it must see the state of | 73 | * real timebase frequency. Accordingly, it must see the state of |
74 | * CCR1[TCS]. */ | 74 | * CCR1[TCS]. */ |
75 | /* XXX CCR1 doesn't exist on all 440 SoCs. */ | ||
75 | vcpu->arch.ccr1 = mfspr(SPRN_CCR1); | 76 | vcpu->arch.ccr1 = mfspr(SPRN_CCR1); |
76 | 77 | ||
77 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) | 78 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) |
@@ -123,8 +124,14 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
123 | if (err) | 124 | if (err) |
124 | goto free_vcpu; | 125 | goto free_vcpu; |
125 | 126 | ||
127 | vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); | ||
128 | if (!vcpu->arch.shared) | ||
129 | goto uninit_vcpu; | ||
130 | |||
126 | return vcpu; | 131 | return vcpu; |
127 | 132 | ||
133 | uninit_vcpu: | ||
134 | kvm_vcpu_uninit(vcpu); | ||
128 | free_vcpu: | 135 | free_vcpu: |
129 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); | 136 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); |
130 | out: | 137 | out: |
@@ -135,6 +142,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
135 | { | 142 | { |
136 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | 143 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
137 | 144 | ||
145 | free_page((unsigned long)vcpu->arch.shared); | ||
138 | kvm_vcpu_uninit(vcpu); | 146 | kvm_vcpu_uninit(vcpu); |
139 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); | 147 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); |
140 | } | 148 | } |
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 9b9b5cdea840..5f3cff83e089 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #ifdef DEBUG | 47 | #ifdef DEBUG |
48 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) | 48 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) |
49 | { | 49 | { |
50 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
50 | struct kvmppc_44x_tlbe *tlbe; | 51 | struct kvmppc_44x_tlbe *tlbe; |
51 | int i; | 52 | int i; |
52 | 53 | ||
@@ -221,14 +222,14 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, | |||
221 | 222 | ||
222 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | 223 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
223 | { | 224 | { |
224 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); | 225 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); |
225 | 226 | ||
226 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | 227 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
227 | } | 228 | } |
228 | 229 | ||
229 | int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | 230 | int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
230 | { | 231 | { |
231 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); | 232 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); |
232 | 233 | ||
233 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | 234 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
234 | } | 235 | } |
@@ -354,7 +355,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, | |||
354 | 355 | ||
355 | stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); | 356 | stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); |
356 | stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, | 357 | stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, |
357 | vcpu->arch.msr & MSR_PR); | 358 | vcpu->arch.shared->msr & MSR_PR); |
358 | stlbe.tid = !(asid & 0xff); | 359 | stlbe.tid = !(asid & 0xff); |
359 | 360 | ||
360 | /* Keep track of the reference so we can properly release it later. */ | 361 | /* Keep track of the reference so we can properly release it later. */ |
@@ -423,7 +424,7 @@ static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | |||
423 | 424 | ||
424 | /* Does it match current guest AS? */ | 425 | /* Does it match current guest AS? */ |
425 | /* XXX what about IS != DS? */ | 426 | /* XXX what about IS != DS? */ |
426 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) | 427 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS)) |
427 | return 0; | 428 | return 0; |
428 | 429 | ||
429 | gpa = get_tlb_raddr(tlbe); | 430 | gpa = get_tlb_raddr(tlbe); |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index d45c818a384c..4d6863823f69 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | 5 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror |
6 | 6 | ||
7 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm | 7 | ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm |
8 | 8 | ||
9 | common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) | 9 | common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) |
10 | 10 | ||
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index a3cef30d1d42..e316847c08c0 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/kvm_host.h> | 17 | #include <linux/kvm_host.h> |
18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include "trace.h" | ||
20 | 21 | ||
21 | #include <asm/reg.h> | 22 | #include <asm/reg.h> |
22 | #include <asm/cputable.h> | 23 | #include <asm/cputable.h> |
@@ -35,7 +36,6 @@ | |||
35 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 36 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
36 | 37 | ||
37 | /* #define EXIT_DEBUG */ | 38 | /* #define EXIT_DEBUG */ |
38 | /* #define EXIT_DEBUG_SIMPLE */ | ||
39 | /* #define DEBUG_EXT */ | 39 | /* #define DEBUG_EXT */ |
40 | 40 | ||
41 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | 41 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, |
@@ -105,65 +105,71 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | |||
105 | kvmppc_giveup_ext(vcpu, MSR_VSX); | 105 | kvmppc_giveup_ext(vcpu, MSR_VSX); |
106 | } | 106 | } |
107 | 107 | ||
108 | #if defined(EXIT_DEBUG) | ||
109 | static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) | ||
110 | { | ||
111 | u64 jd = mftb() - vcpu->arch.dec_jiffies; | ||
112 | return vcpu->arch.dec - jd; | ||
113 | } | ||
114 | #endif | ||
115 | |||
116 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | 108 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) |
117 | { | 109 | { |
118 | vcpu->arch.shadow_msr = vcpu->arch.msr; | 110 | ulong smsr = vcpu->arch.shared->msr; |
111 | |||
119 | /* Guest MSR values */ | 112 | /* Guest MSR values */ |
120 | vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | | 113 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE; |
121 | MSR_BE | MSR_DE; | ||
122 | /* Process MSR values */ | 114 | /* Process MSR values */ |
123 | vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | | 115 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; |
124 | MSR_EE; | ||
125 | /* External providers the guest reserved */ | 116 | /* External providers the guest reserved */ |
126 | vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext); | 117 | smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext); |
127 | /* 64-bit Process MSR values */ | 118 | /* 64-bit Process MSR values */ |
128 | #ifdef CONFIG_PPC_BOOK3S_64 | 119 | #ifdef CONFIG_PPC_BOOK3S_64 |
129 | vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV; | 120 | smsr |= MSR_ISF | MSR_HV; |
130 | #endif | 121 | #endif |
122 | vcpu->arch.shadow_msr = smsr; | ||
131 | } | 123 | } |
132 | 124 | ||
133 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | 125 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) |
134 | { | 126 | { |
135 | ulong old_msr = vcpu->arch.msr; | 127 | ulong old_msr = vcpu->arch.shared->msr; |
136 | 128 | ||
137 | #ifdef EXIT_DEBUG | 129 | #ifdef EXIT_DEBUG |
138 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | 130 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); |
139 | #endif | 131 | #endif |
140 | 132 | ||
141 | msr &= to_book3s(vcpu)->msr_mask; | 133 | msr &= to_book3s(vcpu)->msr_mask; |
142 | vcpu->arch.msr = msr; | 134 | vcpu->arch.shared->msr = msr; |
143 | kvmppc_recalc_shadow_msr(vcpu); | 135 | kvmppc_recalc_shadow_msr(vcpu); |
144 | 136 | ||
145 | if (msr & (MSR_WE|MSR_POW)) { | 137 | if (msr & MSR_POW) { |
146 | if (!vcpu->arch.pending_exceptions) { | 138 | if (!vcpu->arch.pending_exceptions) { |
147 | kvm_vcpu_block(vcpu); | 139 | kvm_vcpu_block(vcpu); |
148 | vcpu->stat.halt_wakeup++; | 140 | vcpu->stat.halt_wakeup++; |
141 | |||
142 | /* Unset POW bit after we woke up */ | ||
143 | msr &= ~MSR_POW; | ||
144 | vcpu->arch.shared->msr = msr; | ||
149 | } | 145 | } |
150 | } | 146 | } |
151 | 147 | ||
152 | if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) != | 148 | if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) != |
153 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { | 149 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { |
154 | kvmppc_mmu_flush_segments(vcpu); | 150 | kvmppc_mmu_flush_segments(vcpu); |
155 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | 151 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
152 | |||
153 | /* Preload magic page segment when in kernel mode */ | ||
154 | if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { | ||
155 | struct kvm_vcpu_arch *a = &vcpu->arch; | ||
156 | |||
157 | if (msr & MSR_DR) | ||
158 | kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); | ||
159 | else | ||
160 | kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); | ||
161 | } | ||
156 | } | 162 | } |
157 | 163 | ||
158 | /* Preload FPU if it's enabled */ | 164 | /* Preload FPU if it's enabled */ |
159 | if (vcpu->arch.msr & MSR_FP) | 165 | if (vcpu->arch.shared->msr & MSR_FP) |
160 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | 166 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
161 | } | 167 | } |
162 | 168 | ||
163 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | 169 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) |
164 | { | 170 | { |
165 | vcpu->arch.srr0 = kvmppc_get_pc(vcpu); | 171 | vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); |
166 | vcpu->arch.srr1 = vcpu->arch.msr | flags; | 172 | vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags; |
167 | kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec); | 173 | kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec); |
168 | vcpu->arch.mmu.reset_msr(vcpu); | 174 | vcpu->arch.mmu.reset_msr(vcpu); |
169 | } | 175 | } |
@@ -180,6 +186,7 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec) | |||
180 | case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; | 186 | case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; |
181 | case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; | 187 | case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; |
182 | case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; | 188 | case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; |
189 | case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break; | ||
183 | case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; | 190 | case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; |
184 | case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; | 191 | case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; |
185 | case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; | 192 | case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; |
@@ -199,6 +206,9 @@ static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, | |||
199 | { | 206 | { |
200 | clear_bit(kvmppc_book3s_vec2irqprio(vec), | 207 | clear_bit(kvmppc_book3s_vec2irqprio(vec), |
201 | &vcpu->arch.pending_exceptions); | 208 | &vcpu->arch.pending_exceptions); |
209 | |||
210 | if (!vcpu->arch.pending_exceptions) | ||
211 | vcpu->arch.shared->int_pending = 0; | ||
202 | } | 212 | } |
203 | 213 | ||
204 | void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) | 214 | void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) |
@@ -237,13 +247,19 @@ void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) | |||
237 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | 247 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
238 | struct kvm_interrupt *irq) | 248 | struct kvm_interrupt *irq) |
239 | { | 249 | { |
240 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); | 250 | unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL; |
251 | |||
252 | if (irq->irq == KVM_INTERRUPT_SET_LEVEL) | ||
253 | vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL; | ||
254 | |||
255 | kvmppc_book3s_queue_irqprio(vcpu, vec); | ||
241 | } | 256 | } |
242 | 257 | ||
243 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, | 258 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, |
244 | struct kvm_interrupt *irq) | 259 | struct kvm_interrupt *irq) |
245 | { | 260 | { |
246 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); | 261 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); |
262 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); | ||
247 | } | 263 | } |
248 | 264 | ||
249 | int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | 265 | int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) |
@@ -251,14 +267,29 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |||
251 | int deliver = 1; | 267 | int deliver = 1; |
252 | int vec = 0; | 268 | int vec = 0; |
253 | ulong flags = 0ULL; | 269 | ulong flags = 0ULL; |
270 | ulong crit_raw = vcpu->arch.shared->critical; | ||
271 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); | ||
272 | bool crit; | ||
273 | |||
274 | /* Truncate crit indicators in 32 bit mode */ | ||
275 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | ||
276 | crit_raw &= 0xffffffff; | ||
277 | crit_r1 &= 0xffffffff; | ||
278 | } | ||
279 | |||
280 | /* Critical section when crit == r1 */ | ||
281 | crit = (crit_raw == crit_r1); | ||
282 | /* ... and we're in supervisor mode */ | ||
283 | crit = crit && !(vcpu->arch.shared->msr & MSR_PR); | ||
254 | 284 | ||
255 | switch (priority) { | 285 | switch (priority) { |
256 | case BOOK3S_IRQPRIO_DECREMENTER: | 286 | case BOOK3S_IRQPRIO_DECREMENTER: |
257 | deliver = vcpu->arch.msr & MSR_EE; | 287 | deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; |
258 | vec = BOOK3S_INTERRUPT_DECREMENTER; | 288 | vec = BOOK3S_INTERRUPT_DECREMENTER; |
259 | break; | 289 | break; |
260 | case BOOK3S_IRQPRIO_EXTERNAL: | 290 | case BOOK3S_IRQPRIO_EXTERNAL: |
261 | deliver = vcpu->arch.msr & MSR_EE; | 291 | case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: |
292 | deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; | ||
262 | vec = BOOK3S_INTERRUPT_EXTERNAL; | 293 | vec = BOOK3S_INTERRUPT_EXTERNAL; |
263 | break; | 294 | break; |
264 | case BOOK3S_IRQPRIO_SYSTEM_RESET: | 295 | case BOOK3S_IRQPRIO_SYSTEM_RESET: |
@@ -320,9 +351,27 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |||
320 | return deliver; | 351 | return deliver; |
321 | } | 352 | } |
322 | 353 | ||
354 | /* | ||
355 | * This function determines if an irqprio should be cleared once issued. | ||
356 | */ | ||
357 | static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) | ||
358 | { | ||
359 | switch (priority) { | ||
360 | case BOOK3S_IRQPRIO_DECREMENTER: | ||
361 | /* DEC interrupts get cleared by mtdec */ | ||
362 | return false; | ||
363 | case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: | ||
364 | /* External interrupts get cleared by userspace */ | ||
365 | return false; | ||
366 | } | ||
367 | |||
368 | return true; | ||
369 | } | ||
370 | |||
323 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | 371 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) |
324 | { | 372 | { |
325 | unsigned long *pending = &vcpu->arch.pending_exceptions; | 373 | unsigned long *pending = &vcpu->arch.pending_exceptions; |
374 | unsigned long old_pending = vcpu->arch.pending_exceptions; | ||
326 | unsigned int priority; | 375 | unsigned int priority; |
327 | 376 | ||
328 | #ifdef EXIT_DEBUG | 377 | #ifdef EXIT_DEBUG |
@@ -332,8 +381,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |||
332 | priority = __ffs(*pending); | 381 | priority = __ffs(*pending); |
333 | while (priority < BOOK3S_IRQPRIO_MAX) { | 382 | while (priority < BOOK3S_IRQPRIO_MAX) { |
334 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && | 383 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && |
335 | (priority != BOOK3S_IRQPRIO_DECREMENTER)) { | 384 | clear_irqprio(vcpu, priority)) { |
336 | /* DEC interrupts get cleared by mtdec */ | ||
337 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 385 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
338 | break; | 386 | break; |
339 | } | 387 | } |
@@ -342,6 +390,12 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |||
342 | BITS_PER_BYTE * sizeof(*pending), | 390 | BITS_PER_BYTE * sizeof(*pending), |
343 | priority + 1); | 391 | priority + 1); |
344 | } | 392 | } |
393 | |||
394 | /* Tell the guest about our interrupt status */ | ||
395 | if (*pending) | ||
396 | vcpu->arch.shared->int_pending = 1; | ||
397 | else if (old_pending) | ||
398 | vcpu->arch.shared->int_pending = 0; | ||
345 | } | 399 | } |
346 | 400 | ||
347 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | 401 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) |
@@ -398,6 +452,25 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |||
398 | } | 452 | } |
399 | } | 453 | } |
400 | 454 | ||
455 | pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) | ||
456 | { | ||
457 | ulong mp_pa = vcpu->arch.magic_page_pa; | ||
458 | |||
459 | /* Magic page override */ | ||
460 | if (unlikely(mp_pa) && | ||
461 | unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == | ||
462 | ((mp_pa & PAGE_MASK) & KVM_PAM))) { | ||
463 | ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; | ||
464 | pfn_t pfn; | ||
465 | |||
466 | pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; | ||
467 | get_page(pfn_to_page(pfn)); | ||
468 | return pfn; | ||
469 | } | ||
470 | |||
471 | return gfn_to_pfn(vcpu->kvm, gfn); | ||
472 | } | ||
473 | |||
401 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To | 474 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To |
402 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to | 475 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to |
403 | * emulate 32 bytes dcbz length. | 476 | * emulate 32 bytes dcbz length. |
@@ -415,8 +488,10 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |||
415 | int i; | 488 | int i; |
416 | 489 | ||
417 | hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | 490 | hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); |
418 | if (is_error_page(hpage)) | 491 | if (is_error_page(hpage)) { |
492 | kvm_release_page_clean(hpage); | ||
419 | return; | 493 | return; |
494 | } | ||
420 | 495 | ||
421 | hpage_offset = pte->raddr & ~PAGE_MASK; | 496 | hpage_offset = pte->raddr & ~PAGE_MASK; |
422 | hpage_offset &= ~0xFFFULL; | 497 | hpage_offset &= ~0xFFFULL; |
@@ -437,14 +512,14 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |||
437 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, | 512 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, |
438 | struct kvmppc_pte *pte) | 513 | struct kvmppc_pte *pte) |
439 | { | 514 | { |
440 | int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR)); | 515 | int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); |
441 | int r; | 516 | int r; |
442 | 517 | ||
443 | if (relocated) { | 518 | if (relocated) { |
444 | r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); | 519 | r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); |
445 | } else { | 520 | } else { |
446 | pte->eaddr = eaddr; | 521 | pte->eaddr = eaddr; |
447 | pte->raddr = eaddr & 0xffffffff; | 522 | pte->raddr = eaddr & KVM_PAM; |
448 | pte->vpage = VSID_REAL | eaddr >> 12; | 523 | pte->vpage = VSID_REAL | eaddr >> 12; |
449 | pte->may_read = true; | 524 | pte->may_read = true; |
450 | pte->may_write = true; | 525 | pte->may_write = true; |
@@ -533,6 +608,13 @@ mmio: | |||
533 | 608 | ||
534 | static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | 609 | static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) |
535 | { | 610 | { |
611 | ulong mp_pa = vcpu->arch.magic_page_pa; | ||
612 | |||
613 | if (unlikely(mp_pa) && | ||
614 | unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { | ||
615 | return 1; | ||
616 | } | ||
617 | |||
536 | return kvm_is_visible_gfn(vcpu->kvm, gfn); | 618 | return kvm_is_visible_gfn(vcpu->kvm, gfn); |
537 | } | 619 | } |
538 | 620 | ||
@@ -545,8 +627,8 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
545 | int page_found = 0; | 627 | int page_found = 0; |
546 | struct kvmppc_pte pte; | 628 | struct kvmppc_pte pte; |
547 | bool is_mmio = false; | 629 | bool is_mmio = false; |
548 | bool dr = (vcpu->arch.msr & MSR_DR) ? true : false; | 630 | bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false; |
549 | bool ir = (vcpu->arch.msr & MSR_IR) ? true : false; | 631 | bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false; |
550 | u64 vsid; | 632 | u64 vsid; |
551 | 633 | ||
552 | relocated = data ? dr : ir; | 634 | relocated = data ? dr : ir; |
@@ -558,12 +640,12 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
558 | pte.may_execute = true; | 640 | pte.may_execute = true; |
559 | pte.may_read = true; | 641 | pte.may_read = true; |
560 | pte.may_write = true; | 642 | pte.may_write = true; |
561 | pte.raddr = eaddr & 0xffffffff; | 643 | pte.raddr = eaddr & KVM_PAM; |
562 | pte.eaddr = eaddr; | 644 | pte.eaddr = eaddr; |
563 | pte.vpage = eaddr >> 12; | 645 | pte.vpage = eaddr >> 12; |
564 | } | 646 | } |
565 | 647 | ||
566 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | 648 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
567 | case 0: | 649 | case 0: |
568 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); | 650 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); |
569 | break; | 651 | break; |
@@ -571,7 +653,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
571 | case MSR_IR: | 653 | case MSR_IR: |
572 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | 654 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); |
573 | 655 | ||
574 | if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR) | 656 | if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR) |
575 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); | 657 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); |
576 | else | 658 | else |
577 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); | 659 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); |
@@ -594,20 +676,23 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
594 | 676 | ||
595 | if (page_found == -ENOENT) { | 677 | if (page_found == -ENOENT) { |
596 | /* Page not found in guest PTE entries */ | 678 | /* Page not found in guest PTE entries */ |
597 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); | 679 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
598 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; | 680 | vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; |
599 | vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | 681 | vcpu->arch.shared->msr |= |
682 | (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | ||
600 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 683 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
601 | } else if (page_found == -EPERM) { | 684 | } else if (page_found == -EPERM) { |
602 | /* Storage protection */ | 685 | /* Storage protection */ |
603 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); | 686 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
604 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; | 687 | vcpu->arch.shared->dsisr = |
605 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; | 688 | to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; |
606 | vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | 689 | vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; |
690 | vcpu->arch.shared->msr |= | ||
691 | (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | ||
607 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 692 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
608 | } else if (page_found == -EINVAL) { | 693 | } else if (page_found == -EINVAL) { |
609 | /* Page not found in guest SLB */ | 694 | /* Page not found in guest SLB */ |
610 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); | 695 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
611 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | 696 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); |
612 | } else if (!is_mmio && | 697 | } else if (!is_mmio && |
613 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | 698 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { |
@@ -695,9 +780,11 @@ static int kvmppc_read_inst(struct kvm_vcpu *vcpu) | |||
695 | 780 | ||
696 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); | 781 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); |
697 | if (ret == -ENOENT) { | 782 | if (ret == -ENOENT) { |
698 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1); | 783 | ulong msr = vcpu->arch.shared->msr; |
699 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0); | 784 | |
700 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); | 785 | msr = kvmppc_set_field(msr, 33, 33, 1); |
786 | msr = kvmppc_set_field(msr, 34, 36, 0); | ||
787 | vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0); | ||
701 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); | 788 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); |
702 | return EMULATE_AGAIN; | 789 | return EMULATE_AGAIN; |
703 | } | 790 | } |
@@ -736,7 +823,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |||
736 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) | 823 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) |
737 | return RESUME_GUEST; | 824 | return RESUME_GUEST; |
738 | 825 | ||
739 | if (!(vcpu->arch.msr & msr)) { | 826 | if (!(vcpu->arch.shared->msr & msr)) { |
740 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 827 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
741 | return RESUME_GUEST; | 828 | return RESUME_GUEST; |
742 | } | 829 | } |
@@ -796,16 +883,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
796 | 883 | ||
797 | run->exit_reason = KVM_EXIT_UNKNOWN; | 884 | run->exit_reason = KVM_EXIT_UNKNOWN; |
798 | run->ready_for_interrupt_injection = 1; | 885 | run->ready_for_interrupt_injection = 1; |
799 | #ifdef EXIT_DEBUG | 886 | |
800 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", | 887 | trace_kvm_book3s_exit(exit_nr, vcpu); |
801 | exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), | ||
802 | kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1); | ||
803 | #elif defined (EXIT_DEBUG_SIMPLE) | ||
804 | if ((exit_nr != 0x900) && (exit_nr != 0x500)) | ||
805 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", | ||
806 | exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), | ||
807 | vcpu->arch.msr); | ||
808 | #endif | ||
809 | kvm_resched(vcpu); | 888 | kvm_resched(vcpu); |
810 | switch (exit_nr) { | 889 | switch (exit_nr) { |
811 | case BOOK3S_INTERRUPT_INST_STORAGE: | 890 | case BOOK3S_INTERRUPT_INST_STORAGE: |
@@ -836,9 +915,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
836 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | 915 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); |
837 | r = RESUME_GUEST; | 916 | r = RESUME_GUEST; |
838 | } else { | 917 | } else { |
839 | vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000; | 918 | vcpu->arch.shared->msr |= |
919 | to_svcpu(vcpu)->shadow_srr1 & 0x58000000; | ||
840 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 920 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
841 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | ||
842 | r = RESUME_GUEST; | 921 | r = RESUME_GUEST; |
843 | } | 922 | } |
844 | break; | 923 | break; |
@@ -861,17 +940,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
861 | if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { | 940 | if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { |
862 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); | 941 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
863 | } else { | 942 | } else { |
864 | vcpu->arch.dear = dar; | 943 | vcpu->arch.shared->dar = dar; |
865 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; | 944 | vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; |
866 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 945 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
867 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL); | ||
868 | r = RESUME_GUEST; | 946 | r = RESUME_GUEST; |
869 | } | 947 | } |
870 | break; | 948 | break; |
871 | } | 949 | } |
872 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | 950 | case BOOK3S_INTERRUPT_DATA_SEGMENT: |
873 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { | 951 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { |
874 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); | 952 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
875 | kvmppc_book3s_queue_irqprio(vcpu, | 953 | kvmppc_book3s_queue_irqprio(vcpu, |
876 | BOOK3S_INTERRUPT_DATA_SEGMENT); | 954 | BOOK3S_INTERRUPT_DATA_SEGMENT); |
877 | } | 955 | } |
@@ -904,7 +982,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
904 | program_interrupt: | 982 | program_interrupt: |
905 | flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; | 983 | flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; |
906 | 984 | ||
907 | if (vcpu->arch.msr & MSR_PR) { | 985 | if (vcpu->arch.shared->msr & MSR_PR) { |
908 | #ifdef EXIT_DEBUG | 986 | #ifdef EXIT_DEBUG |
909 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | 987 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); |
910 | #endif | 988 | #endif |
@@ -941,10 +1019,10 @@ program_interrupt: | |||
941 | break; | 1019 | break; |
942 | } | 1020 | } |
943 | case BOOK3S_INTERRUPT_SYSCALL: | 1021 | case BOOK3S_INTERRUPT_SYSCALL: |
944 | // XXX make user settable | ||
945 | if (vcpu->arch.osi_enabled && | 1022 | if (vcpu->arch.osi_enabled && |
946 | (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && | 1023 | (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && |
947 | (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { | 1024 | (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { |
1025 | /* MOL hypercalls */ | ||
948 | u64 *gprs = run->osi.gprs; | 1026 | u64 *gprs = run->osi.gprs; |
949 | int i; | 1027 | int i; |
950 | 1028 | ||
@@ -953,8 +1031,13 @@ program_interrupt: | |||
953 | gprs[i] = kvmppc_get_gpr(vcpu, i); | 1031 | gprs[i] = kvmppc_get_gpr(vcpu, i); |
954 | vcpu->arch.osi_needed = 1; | 1032 | vcpu->arch.osi_needed = 1; |
955 | r = RESUME_HOST_NV; | 1033 | r = RESUME_HOST_NV; |
956 | 1034 | } else if (!(vcpu->arch.shared->msr & MSR_PR) && | |
1035 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | ||
1036 | /* KVM PV hypercalls */ | ||
1037 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | ||
1038 | r = RESUME_GUEST; | ||
957 | } else { | 1039 | } else { |
1040 | /* Guest syscalls */ | ||
958 | vcpu->stat.syscall_exits++; | 1041 | vcpu->stat.syscall_exits++; |
959 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 1042 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
960 | r = RESUME_GUEST; | 1043 | r = RESUME_GUEST; |
@@ -989,9 +1072,9 @@ program_interrupt: | |||
989 | } | 1072 | } |
990 | case BOOK3S_INTERRUPT_ALIGNMENT: | 1073 | case BOOK3S_INTERRUPT_ALIGNMENT: |
991 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { | 1074 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { |
992 | to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu, | 1075 | vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu, |
993 | kvmppc_get_last_inst(vcpu)); | 1076 | kvmppc_get_last_inst(vcpu)); |
994 | vcpu->arch.dear = kvmppc_alignment_dar(vcpu, | 1077 | vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu, |
995 | kvmppc_get_last_inst(vcpu)); | 1078 | kvmppc_get_last_inst(vcpu)); |
996 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 1079 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
997 | } | 1080 | } |
@@ -1031,9 +1114,7 @@ program_interrupt: | |||
1031 | } | 1114 | } |
1032 | } | 1115 | } |
1033 | 1116 | ||
1034 | #ifdef EXIT_DEBUG | 1117 | trace_kvm_book3s_reenter(r, vcpu); |
1035 | printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r); | ||
1036 | #endif | ||
1037 | 1118 | ||
1038 | return r; | 1119 | return r; |
1039 | } | 1120 | } |
@@ -1052,14 +1133,14 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1052 | regs->ctr = kvmppc_get_ctr(vcpu); | 1133 | regs->ctr = kvmppc_get_ctr(vcpu); |
1053 | regs->lr = kvmppc_get_lr(vcpu); | 1134 | regs->lr = kvmppc_get_lr(vcpu); |
1054 | regs->xer = kvmppc_get_xer(vcpu); | 1135 | regs->xer = kvmppc_get_xer(vcpu); |
1055 | regs->msr = vcpu->arch.msr; | 1136 | regs->msr = vcpu->arch.shared->msr; |
1056 | regs->srr0 = vcpu->arch.srr0; | 1137 | regs->srr0 = vcpu->arch.shared->srr0; |
1057 | regs->srr1 = vcpu->arch.srr1; | 1138 | regs->srr1 = vcpu->arch.shared->srr1; |
1058 | regs->pid = vcpu->arch.pid; | 1139 | regs->pid = vcpu->arch.pid; |
1059 | regs->sprg0 = vcpu->arch.sprg0; | 1140 | regs->sprg0 = vcpu->arch.shared->sprg0; |
1060 | regs->sprg1 = vcpu->arch.sprg1; | 1141 | regs->sprg1 = vcpu->arch.shared->sprg1; |
1061 | regs->sprg2 = vcpu->arch.sprg2; | 1142 | regs->sprg2 = vcpu->arch.shared->sprg2; |
1062 | regs->sprg3 = vcpu->arch.sprg3; | 1143 | regs->sprg3 = vcpu->arch.shared->sprg3; |
1063 | regs->sprg5 = vcpu->arch.sprg4; | 1144 | regs->sprg5 = vcpu->arch.sprg4; |
1064 | regs->sprg6 = vcpu->arch.sprg5; | 1145 | regs->sprg6 = vcpu->arch.sprg5; |
1065 | regs->sprg7 = vcpu->arch.sprg6; | 1146 | regs->sprg7 = vcpu->arch.sprg6; |
@@ -1080,12 +1161,12 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1080 | kvmppc_set_lr(vcpu, regs->lr); | 1161 | kvmppc_set_lr(vcpu, regs->lr); |
1081 | kvmppc_set_xer(vcpu, regs->xer); | 1162 | kvmppc_set_xer(vcpu, regs->xer); |
1082 | kvmppc_set_msr(vcpu, regs->msr); | 1163 | kvmppc_set_msr(vcpu, regs->msr); |
1083 | vcpu->arch.srr0 = regs->srr0; | 1164 | vcpu->arch.shared->srr0 = regs->srr0; |
1084 | vcpu->arch.srr1 = regs->srr1; | 1165 | vcpu->arch.shared->srr1 = regs->srr1; |
1085 | vcpu->arch.sprg0 = regs->sprg0; | 1166 | vcpu->arch.shared->sprg0 = regs->sprg0; |
1086 | vcpu->arch.sprg1 = regs->sprg1; | 1167 | vcpu->arch.shared->sprg1 = regs->sprg1; |
1087 | vcpu->arch.sprg2 = regs->sprg2; | 1168 | vcpu->arch.shared->sprg2 = regs->sprg2; |
1088 | vcpu->arch.sprg3 = regs->sprg3; | 1169 | vcpu->arch.shared->sprg3 = regs->sprg3; |
1089 | vcpu->arch.sprg5 = regs->sprg4; | 1170 | vcpu->arch.sprg5 = regs->sprg4; |
1090 | vcpu->arch.sprg6 = regs->sprg5; | 1171 | vcpu->arch.sprg6 = regs->sprg5; |
1091 | vcpu->arch.sprg7 = regs->sprg6; | 1172 | vcpu->arch.sprg7 = regs->sprg6; |
@@ -1111,10 +1192,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
1111 | sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; | 1192 | sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; |
1112 | } | 1193 | } |
1113 | } else { | 1194 | } else { |
1114 | for (i = 0; i < 16; i++) { | 1195 | for (i = 0; i < 16; i++) |
1115 | sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; | 1196 | sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i]; |
1116 | sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; | 1197 | |
1117 | } | ||
1118 | for (i = 0; i < 8; i++) { | 1198 | for (i = 0; i < 8; i++) { |
1119 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | 1199 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; |
1120 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | 1200 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; |
@@ -1225,6 +1305,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1225 | struct kvmppc_vcpu_book3s *vcpu_book3s; | 1305 | struct kvmppc_vcpu_book3s *vcpu_book3s; |
1226 | struct kvm_vcpu *vcpu; | 1306 | struct kvm_vcpu *vcpu; |
1227 | int err = -ENOMEM; | 1307 | int err = -ENOMEM; |
1308 | unsigned long p; | ||
1228 | 1309 | ||
1229 | vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s)); | 1310 | vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s)); |
1230 | if (!vcpu_book3s) | 1311 | if (!vcpu_book3s) |
@@ -1242,6 +1323,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1242 | if (err) | 1323 | if (err) |
1243 | goto free_shadow_vcpu; | 1324 | goto free_shadow_vcpu; |
1244 | 1325 | ||
1326 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); | ||
1327 | /* the real shared page fills the last 4k of our page */ | ||
1328 | vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096); | ||
1329 | if (!p) | ||
1330 | goto uninit_vcpu; | ||
1331 | |||
1245 | vcpu->arch.host_retip = kvm_return_point; | 1332 | vcpu->arch.host_retip = kvm_return_point; |
1246 | vcpu->arch.host_msr = mfmsr(); | 1333 | vcpu->arch.host_msr = mfmsr(); |
1247 | #ifdef CONFIG_PPC_BOOK3S_64 | 1334 | #ifdef CONFIG_PPC_BOOK3S_64 |
@@ -1268,10 +1355,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1268 | 1355 | ||
1269 | err = kvmppc_mmu_init(vcpu); | 1356 | err = kvmppc_mmu_init(vcpu); |
1270 | if (err < 0) | 1357 | if (err < 0) |
1271 | goto free_shadow_vcpu; | 1358 | goto uninit_vcpu; |
1272 | 1359 | ||
1273 | return vcpu; | 1360 | return vcpu; |
1274 | 1361 | ||
1362 | uninit_vcpu: | ||
1363 | kvm_vcpu_uninit(vcpu); | ||
1275 | free_shadow_vcpu: | 1364 | free_shadow_vcpu: |
1276 | kfree(vcpu_book3s->shadow_vcpu); | 1365 | kfree(vcpu_book3s->shadow_vcpu); |
1277 | free_vcpu: | 1366 | free_vcpu: |
@@ -1284,6 +1373,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
1284 | { | 1373 | { |
1285 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | 1374 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
1286 | 1375 | ||
1376 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); | ||
1287 | kvm_vcpu_uninit(vcpu); | 1377 | kvm_vcpu_uninit(vcpu); |
1288 | kfree(vcpu_book3s->shadow_vcpu); | 1378 | kfree(vcpu_book3s->shadow_vcpu); |
1289 | vfree(vcpu_book3s); | 1379 | vfree(vcpu_book3s); |
@@ -1346,7 +1436,7 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1346 | local_irq_enable(); | 1436 | local_irq_enable(); |
1347 | 1437 | ||
1348 | /* Preload FPU if it's enabled */ | 1438 | /* Preload FPU if it's enabled */ |
1349 | if (vcpu->arch.msr & MSR_FP) | 1439 | if (vcpu->arch.shared->msr & MSR_FP) |
1350 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | 1440 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
1351 | 1441 | ||
1352 | ret = __kvmppc_vcpu_entry(kvm_run, vcpu); | 1442 | ret = __kvmppc_vcpu_entry(kvm_run, vcpu); |
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 3292d76101d2..c8cefdd15fd8 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c | |||
@@ -58,14 +58,39 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu) | |||
58 | #endif | 58 | #endif |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline u32 sr_vsid(u32 sr_raw) | ||
62 | { | ||
63 | return sr_raw & 0x0fffffff; | ||
64 | } | ||
65 | |||
66 | static inline bool sr_valid(u32 sr_raw) | ||
67 | { | ||
68 | return (sr_raw & 0x80000000) ? false : true; | ||
69 | } | ||
70 | |||
71 | static inline bool sr_ks(u32 sr_raw) | ||
72 | { | ||
73 | return (sr_raw & 0x40000000) ? true: false; | ||
74 | } | ||
75 | |||
76 | static inline bool sr_kp(u32 sr_raw) | ||
77 | { | ||
78 | return (sr_raw & 0x20000000) ? true: false; | ||
79 | } | ||
80 | |||
81 | static inline bool sr_nx(u32 sr_raw) | ||
82 | { | ||
83 | return (sr_raw & 0x10000000) ? true: false; | ||
84 | } | ||
85 | |||
61 | static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, | 86 | static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, |
62 | struct kvmppc_pte *pte, bool data); | 87 | struct kvmppc_pte *pte, bool data); |
63 | static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | 88 | static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, |
64 | u64 *vsid); | 89 | u64 *vsid); |
65 | 90 | ||
66 | static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr) | 91 | static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) |
67 | { | 92 | { |
68 | return &vcpu_book3s->sr[(eaddr >> 28) & 0xf]; | 93 | return vcpu->arch.shared->sr[(eaddr >> 28) & 0xf]; |
69 | } | 94 | } |
70 | 95 | ||
71 | static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, | 96 | static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, |
@@ -87,7 +112,7 @@ static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) | |||
87 | } | 112 | } |
88 | 113 | ||
89 | static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s, | 114 | static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s, |
90 | struct kvmppc_sr *sre, gva_t eaddr, | 115 | u32 sre, gva_t eaddr, |
91 | bool primary) | 116 | bool primary) |
92 | { | 117 | { |
93 | u32 page, hash, pteg, htabmask; | 118 | u32 page, hash, pteg, htabmask; |
@@ -96,7 +121,7 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3 | |||
96 | page = (eaddr & 0x0FFFFFFF) >> 12; | 121 | page = (eaddr & 0x0FFFFFFF) >> 12; |
97 | htabmask = ((vcpu_book3s->sdr1 & 0x1FF) << 16) | 0xFFC0; | 122 | htabmask = ((vcpu_book3s->sdr1 & 0x1FF) << 16) | 0xFFC0; |
98 | 123 | ||
99 | hash = ((sre->vsid ^ page) << 6); | 124 | hash = ((sr_vsid(sre) ^ page) << 6); |
100 | if (!primary) | 125 | if (!primary) |
101 | hash = ~hash; | 126 | hash = ~hash; |
102 | hash &= htabmask; | 127 | hash &= htabmask; |
@@ -104,8 +129,8 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3 | |||
104 | pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash; | 129 | pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash; |
105 | 130 | ||
106 | dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n", | 131 | dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n", |
107 | vcpu_book3s->vcpu.arch.pc, eaddr, vcpu_book3s->sdr1, pteg, | 132 | kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg, |
108 | sre->vsid); | 133 | sr_vsid(sre)); |
109 | 134 | ||
110 | r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); | 135 | r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); |
111 | if (kvm_is_error_hva(r)) | 136 | if (kvm_is_error_hva(r)) |
@@ -113,10 +138,9 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3 | |||
113 | return r | (pteg & ~PAGE_MASK); | 138 | return r | (pteg & ~PAGE_MASK); |
114 | } | 139 | } |
115 | 140 | ||
116 | static u32 kvmppc_mmu_book3s_32_get_ptem(struct kvmppc_sr *sre, gva_t eaddr, | 141 | static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary) |
117 | bool primary) | ||
118 | { | 142 | { |
119 | return ((eaddr & 0x0fffffff) >> 22) | (sre->vsid << 7) | | 143 | return ((eaddr & 0x0fffffff) >> 22) | (sr_vsid(sre) << 7) | |
120 | (primary ? 0 : 0x40) | 0x80000000; | 144 | (primary ? 0 : 0x40) | 0x80000000; |
121 | } | 145 | } |
122 | 146 | ||
@@ -133,7 +157,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
133 | else | 157 | else |
134 | bat = &vcpu_book3s->ibat[i]; | 158 | bat = &vcpu_book3s->ibat[i]; |
135 | 159 | ||
136 | if (vcpu->arch.msr & MSR_PR) { | 160 | if (vcpu->arch.shared->msr & MSR_PR) { |
137 | if (!bat->vp) | 161 | if (!bat->vp) |
138 | continue; | 162 | continue; |
139 | } else { | 163 | } else { |
@@ -180,17 +204,17 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
180 | bool primary) | 204 | bool primary) |
181 | { | 205 | { |
182 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | 206 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
183 | struct kvmppc_sr *sre; | 207 | u32 sre; |
184 | hva_t ptegp; | 208 | hva_t ptegp; |
185 | u32 pteg[16]; | 209 | u32 pteg[16]; |
186 | u32 ptem = 0; | 210 | u32 ptem = 0; |
187 | int i; | 211 | int i; |
188 | int found = 0; | 212 | int found = 0; |
189 | 213 | ||
190 | sre = find_sr(vcpu_book3s, eaddr); | 214 | sre = find_sr(vcpu, eaddr); |
191 | 215 | ||
192 | dprintk_pte("SR 0x%lx: vsid=0x%x, raw=0x%x\n", eaddr >> 28, | 216 | dprintk_pte("SR 0x%lx: vsid=0x%x, raw=0x%x\n", eaddr >> 28, |
193 | sre->vsid, sre->raw); | 217 | sr_vsid(sre), sre); |
194 | 218 | ||
195 | pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); | 219 | pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); |
196 | 220 | ||
@@ -214,8 +238,8 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
214 | pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF); | 238 | pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF); |
215 | pp = pteg[i+1] & 3; | 239 | pp = pteg[i+1] & 3; |
216 | 240 | ||
217 | if ((sre->Kp && (vcpu->arch.msr & MSR_PR)) || | 241 | if ((sr_kp(sre) && (vcpu->arch.shared->msr & MSR_PR)) || |
218 | (sre->Ks && !(vcpu->arch.msr & MSR_PR))) | 242 | (sr_ks(sre) && !(vcpu->arch.shared->msr & MSR_PR))) |
219 | pp |= 4; | 243 | pp |= 4; |
220 | 244 | ||
221 | pte->may_write = false; | 245 | pte->may_write = false; |
@@ -269,7 +293,7 @@ no_page_found: | |||
269 | dprintk_pte("KVM MMU: No PTE found (sdr1=0x%llx ptegp=0x%lx)\n", | 293 | dprintk_pte("KVM MMU: No PTE found (sdr1=0x%llx ptegp=0x%lx)\n", |
270 | to_book3s(vcpu)->sdr1, ptegp); | 294 | to_book3s(vcpu)->sdr1, ptegp); |
271 | for (i=0; i<16; i+=2) { | 295 | for (i=0; i<16; i+=2) { |
272 | dprintk_pte(" %02d: 0x%x - 0x%x (0x%llx)\n", | 296 | dprintk_pte(" %02d: 0x%x - 0x%x (0x%x)\n", |
273 | i, pteg[i], pteg[i+1], ptem); | 297 | i, pteg[i], pteg[i+1], ptem); |
274 | } | 298 | } |
275 | } | 299 | } |
@@ -281,8 +305,24 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
281 | struct kvmppc_pte *pte, bool data) | 305 | struct kvmppc_pte *pte, bool data) |
282 | { | 306 | { |
283 | int r; | 307 | int r; |
308 | ulong mp_ea = vcpu->arch.magic_page_ea; | ||
284 | 309 | ||
285 | pte->eaddr = eaddr; | 310 | pte->eaddr = eaddr; |
311 | |||
312 | /* Magic page override */ | ||
313 | if (unlikely(mp_ea) && | ||
314 | unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && | ||
315 | !(vcpu->arch.shared->msr & MSR_PR)) { | ||
316 | pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); | ||
317 | pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff); | ||
318 | pte->raddr &= KVM_PAM; | ||
319 | pte->may_execute = true; | ||
320 | pte->may_read = true; | ||
321 | pte->may_write = true; | ||
322 | |||
323 | return 0; | ||
324 | } | ||
325 | |||
286 | r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); | 326 | r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); |
287 | if (r < 0) | 327 | if (r < 0) |
288 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); | 328 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); |
@@ -295,30 +335,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
295 | 335 | ||
296 | static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum) | 336 | static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum) |
297 | { | 337 | { |
298 | return to_book3s(vcpu)->sr[srnum].raw; | 338 | return vcpu->arch.shared->sr[srnum]; |
299 | } | 339 | } |
300 | 340 | ||
301 | static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, | 341 | static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, |
302 | ulong value) | 342 | ulong value) |
303 | { | 343 | { |
304 | struct kvmppc_sr *sre; | 344 | vcpu->arch.shared->sr[srnum] = value; |
305 | |||
306 | sre = &to_book3s(vcpu)->sr[srnum]; | ||
307 | |||
308 | /* Flush any left-over shadows from the previous SR */ | ||
309 | |||
310 | /* XXX Not necessary? */ | ||
311 | /* kvmppc_mmu_pte_flush(vcpu, ((u64)sre->vsid) << 28, 0xf0000000ULL); */ | ||
312 | |||
313 | /* And then put in the new SR */ | ||
314 | sre->raw = value; | ||
315 | sre->vsid = (value & 0x0fffffff); | ||
316 | sre->valid = (value & 0x80000000) ? false : true; | ||
317 | sre->Ks = (value & 0x40000000) ? true : false; | ||
318 | sre->Kp = (value & 0x20000000) ? true : false; | ||
319 | sre->nx = (value & 0x10000000) ? true : false; | ||
320 | |||
321 | /* Map the new segment */ | ||
322 | kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT); | 345 | kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT); |
323 | } | 346 | } |
324 | 347 | ||
@@ -331,19 +354,19 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
331 | u64 *vsid) | 354 | u64 *vsid) |
332 | { | 355 | { |
333 | ulong ea = esid << SID_SHIFT; | 356 | ulong ea = esid << SID_SHIFT; |
334 | struct kvmppc_sr *sr; | 357 | u32 sr; |
335 | u64 gvsid = esid; | 358 | u64 gvsid = esid; |
336 | 359 | ||
337 | if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | 360 | if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
338 | sr = find_sr(to_book3s(vcpu), ea); | 361 | sr = find_sr(vcpu, ea); |
339 | if (sr->valid) | 362 | if (sr_valid(sr)) |
340 | gvsid = sr->vsid; | 363 | gvsid = sr_vsid(sr); |
341 | } | 364 | } |
342 | 365 | ||
343 | /* In case we only have one of MSR_IR or MSR_DR set, let's put | 366 | /* In case we only have one of MSR_IR or MSR_DR set, let's put |
344 | that in the real-mode context (and hope RM doesn't access | 367 | that in the real-mode context (and hope RM doesn't access |
345 | high memory) */ | 368 | high memory) */ |
346 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | 369 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
347 | case 0: | 370 | case 0: |
348 | *vsid = VSID_REAL | esid; | 371 | *vsid = VSID_REAL | esid; |
349 | break; | 372 | break; |
@@ -354,8 +377,8 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
354 | *vsid = VSID_REAL_DR | gvsid; | 377 | *vsid = VSID_REAL_DR | gvsid; |
355 | break; | 378 | break; |
356 | case MSR_DR|MSR_IR: | 379 | case MSR_DR|MSR_IR: |
357 | if (sr->valid) | 380 | if (sr_valid(sr)) |
358 | *vsid = sr->vsid; | 381 | *vsid = sr_vsid(sr); |
359 | else | 382 | else |
360 | *vsid = VSID_BAT | gvsid; | 383 | *vsid = VSID_BAT | gvsid; |
361 | break; | 384 | break; |
@@ -363,7 +386,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
363 | BUG(); | 386 | BUG(); |
364 | } | 387 | } |
365 | 388 | ||
366 | if (vcpu->arch.msr & MSR_PR) | 389 | if (vcpu->arch.shared->msr & MSR_PR) |
367 | *vsid |= VSID_PR; | 390 | *vsid |= VSID_PR; |
368 | 391 | ||
369 | return 0; | 392 | return 0; |
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 0b51ef872c1e..9fecbfbce773 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c | |||
@@ -19,7 +19,6 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/kvm_host.h> | 21 | #include <linux/kvm_host.h> |
22 | #include <linux/hash.h> | ||
23 | 22 | ||
24 | #include <asm/kvm_ppc.h> | 23 | #include <asm/kvm_ppc.h> |
25 | #include <asm/kvm_book3s.h> | 24 | #include <asm/kvm_book3s.h> |
@@ -77,7 +76,14 @@ void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | |||
77 | * a hash, so we don't waste cycles on looping */ | 76 | * a hash, so we don't waste cycles on looping */ |
78 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) | 77 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) |
79 | { | 78 | { |
80 | return hash_64(gvsid, SID_MAP_BITS); | 79 | return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ |
80 | ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ | ||
81 | ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ | ||
82 | ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ | ||
83 | ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ | ||
84 | ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ | ||
85 | ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ | ||
86 | ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); | ||
81 | } | 87 | } |
82 | 88 | ||
83 | 89 | ||
@@ -86,7 +92,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) | |||
86 | struct kvmppc_sid_map *map; | 92 | struct kvmppc_sid_map *map; |
87 | u16 sid_map_mask; | 93 | u16 sid_map_mask; |
88 | 94 | ||
89 | if (vcpu->arch.msr & MSR_PR) | 95 | if (vcpu->arch.shared->msr & MSR_PR) |
90 | gvsid |= VSID_PR; | 96 | gvsid |= VSID_PR; |
91 | 97 | ||
92 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); | 98 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); |
@@ -147,8 +153,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | |||
147 | struct hpte_cache *pte; | 153 | struct hpte_cache *pte; |
148 | 154 | ||
149 | /* Get host physical address for gpa */ | 155 | /* Get host physical address for gpa */ |
150 | hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); | 156 | hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); |
151 | if (kvm_is_error_hva(hpaddr)) { | 157 | if (is_error_pfn(hpaddr)) { |
152 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", | 158 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", |
153 | orig_pte->eaddr); | 159 | orig_pte->eaddr); |
154 | return -EINVAL; | 160 | return -EINVAL; |
@@ -253,7 +259,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | |||
253 | u16 sid_map_mask; | 259 | u16 sid_map_mask; |
254 | static int backwards_map = 0; | 260 | static int backwards_map = 0; |
255 | 261 | ||
256 | if (vcpu->arch.msr & MSR_PR) | 262 | if (vcpu->arch.shared->msr & MSR_PR) |
257 | gvsid |= VSID_PR; | 263 | gvsid |= VSID_PR; |
258 | 264 | ||
259 | /* We might get collisions that trap in preceding order, so let's | 265 | /* We might get collisions that trap in preceding order, so let's |
@@ -269,18 +275,15 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | |||
269 | backwards_map = !backwards_map; | 275 | backwards_map = !backwards_map; |
270 | 276 | ||
271 | /* Uh-oh ... out of mappings. Let's flush! */ | 277 | /* Uh-oh ... out of mappings. Let's flush! */ |
272 | if (vcpu_book3s->vsid_next >= vcpu_book3s->vsid_max) { | 278 | if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) { |
273 | vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; | 279 | vcpu_book3s->vsid_next = 0; |
274 | memset(vcpu_book3s->sid_map, 0, | 280 | memset(vcpu_book3s->sid_map, 0, |
275 | sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); | 281 | sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); |
276 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 282 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
277 | kvmppc_mmu_flush_segments(vcpu); | 283 | kvmppc_mmu_flush_segments(vcpu); |
278 | } | 284 | } |
279 | map->host_vsid = vcpu_book3s->vsid_next; | 285 | map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next]; |
280 | 286 | vcpu_book3s->vsid_next++; | |
281 | /* Would have to be 111 to be completely aligned with the rest of | ||
282 | Linux, but that is just way too little space! */ | ||
283 | vcpu_book3s->vsid_next+=1; | ||
284 | 287 | ||
285 | map->guest_vsid = gvsid; | 288 | map->guest_vsid = gvsid; |
286 | map->valid = true; | 289 | map->valid = true; |
@@ -327,40 +330,38 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) | |||
327 | 330 | ||
328 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | 331 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
329 | { | 332 | { |
333 | int i; | ||
334 | |||
330 | kvmppc_mmu_hpte_destroy(vcpu); | 335 | kvmppc_mmu_hpte_destroy(vcpu); |
331 | preempt_disable(); | 336 | preempt_disable(); |
332 | __destroy_context(to_book3s(vcpu)->context_id); | 337 | for (i = 0; i < SID_CONTEXTS; i++) |
338 | __destroy_context(to_book3s(vcpu)->context_id[i]); | ||
333 | preempt_enable(); | 339 | preempt_enable(); |
334 | } | 340 | } |
335 | 341 | ||
336 | /* From mm/mmu_context_hash32.c */ | 342 | /* From mm/mmu_context_hash32.c */ |
337 | #define CTX_TO_VSID(ctx) (((ctx) * (897 * 16)) & 0xffffff) | 343 | #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff) |
338 | 344 | ||
339 | int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | 345 | int kvmppc_mmu_init(struct kvm_vcpu *vcpu) |
340 | { | 346 | { |
341 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 347 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
342 | int err; | 348 | int err; |
343 | ulong sdr1; | 349 | ulong sdr1; |
350 | int i; | ||
351 | int j; | ||
344 | 352 | ||
345 | err = __init_new_context(); | 353 | for (i = 0; i < SID_CONTEXTS; i++) { |
346 | if (err < 0) | 354 | err = __init_new_context(); |
347 | return -1; | 355 | if (err < 0) |
348 | vcpu3s->context_id = err; | 356 | goto init_fail; |
349 | 357 | vcpu3s->context_id[i] = err; | |
350 | vcpu3s->vsid_max = CTX_TO_VSID(vcpu3s->context_id + 1) - 1; | ||
351 | vcpu3s->vsid_first = CTX_TO_VSID(vcpu3s->context_id); | ||
352 | |||
353 | #if 0 /* XXX still doesn't guarantee uniqueness */ | ||
354 | /* We could collide with the Linux vsid space because the vsid | ||
355 | * wraps around at 24 bits. We're safe if we do our own space | ||
356 | * though, so let's always set the highest bit. */ | ||
357 | 358 | ||
358 | vcpu3s->vsid_max |= 0x00800000; | 359 | /* Remember context id for this combination */ |
359 | vcpu3s->vsid_first |= 0x00800000; | 360 | for (j = 0; j < 16; j++) |
360 | #endif | 361 | vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j); |
361 | BUG_ON(vcpu3s->vsid_max < vcpu3s->vsid_first); | 362 | } |
362 | 363 | ||
363 | vcpu3s->vsid_next = vcpu3s->vsid_first; | 364 | vcpu3s->vsid_next = 0; |
364 | 365 | ||
365 | /* Remember where the HTAB is */ | 366 | /* Remember where the HTAB is */ |
366 | asm ( "mfsdr1 %0" : "=r"(sdr1) ); | 367 | asm ( "mfsdr1 %0" : "=r"(sdr1) ); |
@@ -370,4 +371,14 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | |||
370 | kvmppc_mmu_hpte_init(vcpu); | 371 | kvmppc_mmu_hpte_init(vcpu); |
371 | 372 | ||
372 | return 0; | 373 | return 0; |
374 | |||
375 | init_fail: | ||
376 | for (j = 0; j < i; j++) { | ||
377 | if (!vcpu3s->context_id[j]) | ||
378 | continue; | ||
379 | |||
380 | __destroy_context(to_book3s(vcpu)->context_id[j]); | ||
381 | } | ||
382 | |||
383 | return -1; | ||
373 | } | 384 | } |
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 4025ea26b3c1..d7889ef3211e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
@@ -163,6 +163,22 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
163 | bool found = false; | 163 | bool found = false; |
164 | bool perm_err = false; | 164 | bool perm_err = false; |
165 | int second = 0; | 165 | int second = 0; |
166 | ulong mp_ea = vcpu->arch.magic_page_ea; | ||
167 | |||
168 | /* Magic page override */ | ||
169 | if (unlikely(mp_ea) && | ||
170 | unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && | ||
171 | !(vcpu->arch.shared->msr & MSR_PR)) { | ||
172 | gpte->eaddr = eaddr; | ||
173 | gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); | ||
174 | gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); | ||
175 | gpte->raddr &= KVM_PAM; | ||
176 | gpte->may_execute = true; | ||
177 | gpte->may_read = true; | ||
178 | gpte->may_write = true; | ||
179 | |||
180 | return 0; | ||
181 | } | ||
166 | 182 | ||
167 | slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr); | 183 | slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr); |
168 | if (!slbe) | 184 | if (!slbe) |
@@ -180,9 +196,9 @@ do_second: | |||
180 | goto no_page_found; | 196 | goto no_page_found; |
181 | } | 197 | } |
182 | 198 | ||
183 | if ((vcpu->arch.msr & MSR_PR) && slbe->Kp) | 199 | if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp) |
184 | key = 4; | 200 | key = 4; |
185 | else if (!(vcpu->arch.msr & MSR_PR) && slbe->Ks) | 201 | else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks) |
186 | key = 4; | 202 | key = 4; |
187 | 203 | ||
188 | for (i=0; i<16; i+=2) { | 204 | for (i=0; i<16; i+=2) { |
@@ -381,7 +397,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) | |||
381 | for (i = 1; i < vcpu_book3s->slb_nr; i++) | 397 | for (i = 1; i < vcpu_book3s->slb_nr; i++) |
382 | vcpu_book3s->slb[i].valid = false; | 398 | vcpu_book3s->slb[i].valid = false; |
383 | 399 | ||
384 | if (vcpu->arch.msr & MSR_IR) { | 400 | if (vcpu->arch.shared->msr & MSR_IR) { |
385 | kvmppc_mmu_flush_segments(vcpu); | 401 | kvmppc_mmu_flush_segments(vcpu); |
386 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | 402 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
387 | } | 403 | } |
@@ -445,14 +461,15 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
445 | ulong ea = esid << SID_SHIFT; | 461 | ulong ea = esid << SID_SHIFT; |
446 | struct kvmppc_slb *slb; | 462 | struct kvmppc_slb *slb; |
447 | u64 gvsid = esid; | 463 | u64 gvsid = esid; |
464 | ulong mp_ea = vcpu->arch.magic_page_ea; | ||
448 | 465 | ||
449 | if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | 466 | if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
450 | slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); | 467 | slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); |
451 | if (slb) | 468 | if (slb) |
452 | gvsid = slb->vsid; | 469 | gvsid = slb->vsid; |
453 | } | 470 | } |
454 | 471 | ||
455 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | 472 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
456 | case 0: | 473 | case 0: |
457 | *vsid = VSID_REAL | esid; | 474 | *vsid = VSID_REAL | esid; |
458 | break; | 475 | break; |
@@ -464,7 +481,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
464 | break; | 481 | break; |
465 | case MSR_DR|MSR_IR: | 482 | case MSR_DR|MSR_IR: |
466 | if (!slb) | 483 | if (!slb) |
467 | return -ENOENT; | 484 | goto no_slb; |
468 | 485 | ||
469 | *vsid = gvsid; | 486 | *vsid = gvsid; |
470 | break; | 487 | break; |
@@ -473,10 +490,21 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
473 | break; | 490 | break; |
474 | } | 491 | } |
475 | 492 | ||
476 | if (vcpu->arch.msr & MSR_PR) | 493 | if (vcpu->arch.shared->msr & MSR_PR) |
477 | *vsid |= VSID_PR; | 494 | *vsid |= VSID_PR; |
478 | 495 | ||
479 | return 0; | 496 | return 0; |
497 | |||
498 | no_slb: | ||
499 | /* Catch magic page case */ | ||
500 | if (unlikely(mp_ea) && | ||
501 | unlikely(esid == (mp_ea >> SID_SHIFT)) && | ||
502 | !(vcpu->arch.shared->msr & MSR_PR)) { | ||
503 | *vsid = VSID_REAL | esid; | ||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | return -EINVAL; | ||
480 | } | 508 | } |
481 | 509 | ||
482 | static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu) | 510 | static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu) |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 384179a5002b..fa2f08434ba5 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c | |||
@@ -20,7 +20,6 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/kvm_host.h> | 22 | #include <linux/kvm_host.h> |
23 | #include <linux/hash.h> | ||
24 | 23 | ||
25 | #include <asm/kvm_ppc.h> | 24 | #include <asm/kvm_ppc.h> |
26 | #include <asm/kvm_book3s.h> | 25 | #include <asm/kvm_book3s.h> |
@@ -28,24 +27,9 @@ | |||
28 | #include <asm/machdep.h> | 27 | #include <asm/machdep.h> |
29 | #include <asm/mmu_context.h> | 28 | #include <asm/mmu_context.h> |
30 | #include <asm/hw_irq.h> | 29 | #include <asm/hw_irq.h> |
30 | #include "trace.h" | ||
31 | 31 | ||
32 | #define PTE_SIZE 12 | 32 | #define PTE_SIZE 12 |
33 | #define VSID_ALL 0 | ||
34 | |||
35 | /* #define DEBUG_MMU */ | ||
36 | /* #define DEBUG_SLB */ | ||
37 | |||
38 | #ifdef DEBUG_MMU | ||
39 | #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__) | ||
40 | #else | ||
41 | #define dprintk_mmu(a, ...) do { } while(0) | ||
42 | #endif | ||
43 | |||
44 | #ifdef DEBUG_SLB | ||
45 | #define dprintk_slb(a, ...) printk(KERN_INFO a, __VA_ARGS__) | ||
46 | #else | ||
47 | #define dprintk_slb(a, ...) do { } while(0) | ||
48 | #endif | ||
49 | 33 | ||
50 | void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | 34 | void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) |
51 | { | 35 | { |
@@ -58,34 +42,39 @@ void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | |||
58 | * a hash, so we don't waste cycles on looping */ | 42 | * a hash, so we don't waste cycles on looping */ |
59 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) | 43 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) |
60 | { | 44 | { |
61 | return hash_64(gvsid, SID_MAP_BITS); | 45 | return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ |
46 | ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ | ||
47 | ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ | ||
48 | ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ | ||
49 | ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ | ||
50 | ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ | ||
51 | ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ | ||
52 | ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); | ||
62 | } | 53 | } |
63 | 54 | ||
55 | |||
64 | static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) | 56 | static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) |
65 | { | 57 | { |
66 | struct kvmppc_sid_map *map; | 58 | struct kvmppc_sid_map *map; |
67 | u16 sid_map_mask; | 59 | u16 sid_map_mask; |
68 | 60 | ||
69 | if (vcpu->arch.msr & MSR_PR) | 61 | if (vcpu->arch.shared->msr & MSR_PR) |
70 | gvsid |= VSID_PR; | 62 | gvsid |= VSID_PR; |
71 | 63 | ||
72 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); | 64 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); |
73 | map = &to_book3s(vcpu)->sid_map[sid_map_mask]; | 65 | map = &to_book3s(vcpu)->sid_map[sid_map_mask]; |
74 | if (map->guest_vsid == gvsid) { | 66 | if (map->valid && (map->guest_vsid == gvsid)) { |
75 | dprintk_slb("SLB: Searching: 0x%llx -> 0x%llx\n", | 67 | trace_kvm_book3s_slb_found(gvsid, map->host_vsid); |
76 | gvsid, map->host_vsid); | ||
77 | return map; | 68 | return map; |
78 | } | 69 | } |
79 | 70 | ||
80 | map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; | 71 | map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; |
81 | if (map->guest_vsid == gvsid) { | 72 | if (map->valid && (map->guest_vsid == gvsid)) { |
82 | dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n", | 73 | trace_kvm_book3s_slb_found(gvsid, map->host_vsid); |
83 | gvsid, map->host_vsid); | ||
84 | return map; | 74 | return map; |
85 | } | 75 | } |
86 | 76 | ||
87 | dprintk_slb("SLB: Searching %d/%d: 0x%llx -> not found\n", | 77 | trace_kvm_book3s_slb_fail(sid_map_mask, gvsid); |
88 | sid_map_mask, SID_MAP_MASK - sid_map_mask, gvsid); | ||
89 | return NULL; | 78 | return NULL; |
90 | } | 79 | } |
91 | 80 | ||
@@ -101,18 +90,13 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | |||
101 | struct kvmppc_sid_map *map; | 90 | struct kvmppc_sid_map *map; |
102 | 91 | ||
103 | /* Get host physical address for gpa */ | 92 | /* Get host physical address for gpa */ |
104 | hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); | 93 | hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); |
105 | if (kvm_is_error_hva(hpaddr)) { | 94 | if (is_error_pfn(hpaddr)) { |
106 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); | 95 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); |
107 | return -EINVAL; | 96 | return -EINVAL; |
108 | } | 97 | } |
109 | hpaddr <<= PAGE_SHIFT; | 98 | hpaddr <<= PAGE_SHIFT; |
110 | #if PAGE_SHIFT == 12 | 99 | hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK); |
111 | #elif PAGE_SHIFT == 16 | ||
112 | hpaddr |= orig_pte->raddr & 0xf000; | ||
113 | #else | ||
114 | #error Unknown page size | ||
115 | #endif | ||
116 | 100 | ||
117 | /* and write the mapping ea -> hpa into the pt */ | 101 | /* and write the mapping ea -> hpa into the pt */ |
118 | vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); | 102 | vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); |
@@ -161,10 +145,7 @@ map_again: | |||
161 | } else { | 145 | } else { |
162 | struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu); | 146 | struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu); |
163 | 147 | ||
164 | dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n", | 148 | trace_kvm_book3s_64_mmu_map(rflags, hpteg, va, hpaddr, orig_pte); |
165 | ((rflags & HPTE_R_PP) == 3) ? '-' : 'w', | ||
166 | (rflags & HPTE_R_N) ? '-' : 'x', | ||
167 | orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr); | ||
168 | 149 | ||
169 | /* The ppc_md code may give us a secondary entry even though we | 150 | /* The ppc_md code may give us a secondary entry even though we |
170 | asked for a primary. Fix up. */ | 151 | asked for a primary. Fix up. */ |
@@ -191,7 +172,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | |||
191 | u16 sid_map_mask; | 172 | u16 sid_map_mask; |
192 | static int backwards_map = 0; | 173 | static int backwards_map = 0; |
193 | 174 | ||
194 | if (vcpu->arch.msr & MSR_PR) | 175 | if (vcpu->arch.shared->msr & MSR_PR) |
195 | gvsid |= VSID_PR; | 176 | gvsid |= VSID_PR; |
196 | 177 | ||
197 | /* We might get collisions that trap in preceding order, so let's | 178 | /* We might get collisions that trap in preceding order, so let's |
@@ -219,8 +200,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | |||
219 | map->guest_vsid = gvsid; | 200 | map->guest_vsid = gvsid; |
220 | map->valid = true; | 201 | map->valid = true; |
221 | 202 | ||
222 | dprintk_slb("SLB: New mapping at %d: 0x%llx -> 0x%llx\n", | 203 | trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid); |
223 | sid_map_mask, gvsid, map->host_vsid); | ||
224 | 204 | ||
225 | return map; | 205 | return map; |
226 | } | 206 | } |
@@ -292,7 +272,7 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) | |||
292 | to_svcpu(vcpu)->slb[slb_index].esid = slb_esid; | 272 | to_svcpu(vcpu)->slb[slb_index].esid = slb_esid; |
293 | to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid; | 273 | to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid; |
294 | 274 | ||
295 | dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid); | 275 | trace_kvm_book3s_slbmte(slb_vsid, slb_esid); |
296 | 276 | ||
297 | return 0; | 277 | return 0; |
298 | } | 278 | } |
@@ -306,7 +286,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) | |||
306 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | 286 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
307 | { | 287 | { |
308 | kvmppc_mmu_hpte_destroy(vcpu); | 288 | kvmppc_mmu_hpte_destroy(vcpu); |
309 | __destroy_context(to_book3s(vcpu)->context_id); | 289 | __destroy_context(to_book3s(vcpu)->context_id[0]); |
310 | } | 290 | } |
311 | 291 | ||
312 | int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | 292 | int kvmppc_mmu_init(struct kvm_vcpu *vcpu) |
@@ -317,10 +297,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | |||
317 | err = __init_new_context(); | 297 | err = __init_new_context(); |
318 | if (err < 0) | 298 | if (err < 0) |
319 | return -1; | 299 | return -1; |
320 | vcpu3s->context_id = err; | 300 | vcpu3s->context_id[0] = err; |
321 | 301 | ||
322 | vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1; | 302 | vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1; |
323 | vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS; | 303 | vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; |
324 | vcpu3s->vsid_next = vcpu3s->vsid_first; | 304 | vcpu3s->vsid_next = vcpu3s->vsid_first; |
325 | 305 | ||
326 | kvmppc_mmu_hpte_init(vcpu); | 306 | kvmppc_mmu_hpte_init(vcpu); |
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index c85f906038ce..466846557089 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c | |||
@@ -73,8 +73,8 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
73 | switch (get_xop(inst)) { | 73 | switch (get_xop(inst)) { |
74 | case OP_19_XOP_RFID: | 74 | case OP_19_XOP_RFID: |
75 | case OP_19_XOP_RFI: | 75 | case OP_19_XOP_RFI: |
76 | kvmppc_set_pc(vcpu, vcpu->arch.srr0); | 76 | kvmppc_set_pc(vcpu, vcpu->arch.shared->srr0); |
77 | kvmppc_set_msr(vcpu, vcpu->arch.srr1); | 77 | kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); |
78 | *advance = 0; | 78 | *advance = 0; |
79 | break; | 79 | break; |
80 | 80 | ||
@@ -86,14 +86,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
86 | case 31: | 86 | case 31: |
87 | switch (get_xop(inst)) { | 87 | switch (get_xop(inst)) { |
88 | case OP_31_XOP_MFMSR: | 88 | case OP_31_XOP_MFMSR: |
89 | kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr); | 89 | kvmppc_set_gpr(vcpu, get_rt(inst), |
90 | vcpu->arch.shared->msr); | ||
90 | break; | 91 | break; |
91 | case OP_31_XOP_MTMSRD: | 92 | case OP_31_XOP_MTMSRD: |
92 | { | 93 | { |
93 | ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); | 94 | ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); |
94 | if (inst & 0x10000) { | 95 | if (inst & 0x10000) { |
95 | vcpu->arch.msr &= ~(MSR_RI | MSR_EE); | 96 | vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE); |
96 | vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); | 97 | vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE); |
97 | } else | 98 | } else |
98 | kvmppc_set_msr(vcpu, rs); | 99 | kvmppc_set_msr(vcpu, rs); |
99 | break; | 100 | break; |
@@ -204,14 +205,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
204 | ra = kvmppc_get_gpr(vcpu, get_ra(inst)); | 205 | ra = kvmppc_get_gpr(vcpu, get_ra(inst)); |
205 | 206 | ||
206 | addr = (ra + rb) & ~31ULL; | 207 | addr = (ra + rb) & ~31ULL; |
207 | if (!(vcpu->arch.msr & MSR_SF)) | 208 | if (!(vcpu->arch.shared->msr & MSR_SF)) |
208 | addr &= 0xffffffff; | 209 | addr &= 0xffffffff; |
209 | vaddr = addr; | 210 | vaddr = addr; |
210 | 211 | ||
211 | r = kvmppc_st(vcpu, &addr, 32, zeros, true); | 212 | r = kvmppc_st(vcpu, &addr, 32, zeros, true); |
212 | if ((r == -ENOENT) || (r == -EPERM)) { | 213 | if ((r == -ENOENT) || (r == -EPERM)) { |
213 | *advance = 0; | 214 | *advance = 0; |
214 | vcpu->arch.dear = vaddr; | 215 | vcpu->arch.shared->dar = vaddr; |
215 | to_svcpu(vcpu)->fault_dar = vaddr; | 216 | to_svcpu(vcpu)->fault_dar = vaddr; |
216 | 217 | ||
217 | dsisr = DSISR_ISSTORE; | 218 | dsisr = DSISR_ISSTORE; |
@@ -220,7 +221,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
220 | else if (r == -EPERM) | 221 | else if (r == -EPERM) |
221 | dsisr |= DSISR_PROTFAULT; | 222 | dsisr |= DSISR_PROTFAULT; |
222 | 223 | ||
223 | to_book3s(vcpu)->dsisr = dsisr; | 224 | vcpu->arch.shared->dsisr = dsisr; |
224 | to_svcpu(vcpu)->fault_dsisr = dsisr; | 225 | to_svcpu(vcpu)->fault_dsisr = dsisr; |
225 | 226 | ||
226 | kvmppc_book3s_queue_irqprio(vcpu, | 227 | kvmppc_book3s_queue_irqprio(vcpu, |
@@ -263,7 +264,7 @@ void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, | |||
263 | } | 264 | } |
264 | } | 265 | } |
265 | 266 | ||
266 | static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn) | 267 | static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) |
267 | { | 268 | { |
268 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | 269 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
269 | struct kvmppc_bat *bat; | 270 | struct kvmppc_bat *bat; |
@@ -285,35 +286,7 @@ static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn) | |||
285 | BUG(); | 286 | BUG(); |
286 | } | 287 | } |
287 | 288 | ||
288 | if (sprn % 2) | 289 | return bat; |
289 | return bat->raw >> 32; | ||
290 | else | ||
291 | return bat->raw; | ||
292 | } | ||
293 | |||
294 | static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val) | ||
295 | { | ||
296 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
297 | struct kvmppc_bat *bat; | ||
298 | |||
299 | switch (sprn) { | ||
300 | case SPRN_IBAT0U ... SPRN_IBAT3L: | ||
301 | bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; | ||
302 | break; | ||
303 | case SPRN_IBAT4U ... SPRN_IBAT7L: | ||
304 | bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)]; | ||
305 | break; | ||
306 | case SPRN_DBAT0U ... SPRN_DBAT3L: | ||
307 | bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; | ||
308 | break; | ||
309 | case SPRN_DBAT4U ... SPRN_DBAT7L: | ||
310 | bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)]; | ||
311 | break; | ||
312 | default: | ||
313 | BUG(); | ||
314 | } | ||
315 | |||
316 | kvmppc_set_bat(vcpu, bat, !(sprn % 2), val); | ||
317 | } | 290 | } |
318 | 291 | ||
319 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 292 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) |
@@ -326,10 +299,10 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
326 | to_book3s(vcpu)->sdr1 = spr_val; | 299 | to_book3s(vcpu)->sdr1 = spr_val; |
327 | break; | 300 | break; |
328 | case SPRN_DSISR: | 301 | case SPRN_DSISR: |
329 | to_book3s(vcpu)->dsisr = spr_val; | 302 | vcpu->arch.shared->dsisr = spr_val; |
330 | break; | 303 | break; |
331 | case SPRN_DAR: | 304 | case SPRN_DAR: |
332 | vcpu->arch.dear = spr_val; | 305 | vcpu->arch.shared->dar = spr_val; |
333 | break; | 306 | break; |
334 | case SPRN_HIOR: | 307 | case SPRN_HIOR: |
335 | to_book3s(vcpu)->hior = spr_val; | 308 | to_book3s(vcpu)->hior = spr_val; |
@@ -338,12 +311,16 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
338 | case SPRN_IBAT4U ... SPRN_IBAT7L: | 311 | case SPRN_IBAT4U ... SPRN_IBAT7L: |
339 | case SPRN_DBAT0U ... SPRN_DBAT3L: | 312 | case SPRN_DBAT0U ... SPRN_DBAT3L: |
340 | case SPRN_DBAT4U ... SPRN_DBAT7L: | 313 | case SPRN_DBAT4U ... SPRN_DBAT7L: |
341 | kvmppc_write_bat(vcpu, sprn, (u32)spr_val); | 314 | { |
315 | struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); | ||
316 | |||
317 | kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val); | ||
342 | /* BAT writes happen so rarely that we're ok to flush | 318 | /* BAT writes happen so rarely that we're ok to flush |
343 | * everything here */ | 319 | * everything here */ |
344 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 320 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
345 | kvmppc_mmu_flush_segments(vcpu); | 321 | kvmppc_mmu_flush_segments(vcpu); |
346 | break; | 322 | break; |
323 | } | ||
347 | case SPRN_HID0: | 324 | case SPRN_HID0: |
348 | to_book3s(vcpu)->hid[0] = spr_val; | 325 | to_book3s(vcpu)->hid[0] = spr_val; |
349 | break; | 326 | break; |
@@ -433,16 +410,24 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
433 | case SPRN_IBAT4U ... SPRN_IBAT7L: | 410 | case SPRN_IBAT4U ... SPRN_IBAT7L: |
434 | case SPRN_DBAT0U ... SPRN_DBAT3L: | 411 | case SPRN_DBAT0U ... SPRN_DBAT3L: |
435 | case SPRN_DBAT4U ... SPRN_DBAT7L: | 412 | case SPRN_DBAT4U ... SPRN_DBAT7L: |
436 | kvmppc_set_gpr(vcpu, rt, kvmppc_read_bat(vcpu, sprn)); | 413 | { |
414 | struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); | ||
415 | |||
416 | if (sprn % 2) | ||
417 | kvmppc_set_gpr(vcpu, rt, bat->raw >> 32); | ||
418 | else | ||
419 | kvmppc_set_gpr(vcpu, rt, bat->raw); | ||
420 | |||
437 | break; | 421 | break; |
422 | } | ||
438 | case SPRN_SDR1: | 423 | case SPRN_SDR1: |
439 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); | 424 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); |
440 | break; | 425 | break; |
441 | case SPRN_DSISR: | 426 | case SPRN_DSISR: |
442 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr); | 427 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr); |
443 | break; | 428 | break; |
444 | case SPRN_DAR: | 429 | case SPRN_DAR: |
445 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); | 430 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); |
446 | break; | 431 | break; |
447 | case SPRN_HIOR: | 432 | case SPRN_HIOR: |
448 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); | 433 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); |
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c index 4868d4a7ebc5..79751d8dd131 100644 --- a/arch/powerpc/kvm/book3s_mmu_hpte.c +++ b/arch/powerpc/kvm/book3s_mmu_hpte.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/kvm_host.h> | 21 | #include <linux/kvm_host.h> |
22 | #include <linux/hash.h> | 22 | #include <linux/hash.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include "trace.h" | ||
24 | 25 | ||
25 | #include <asm/kvm_ppc.h> | 26 | #include <asm/kvm_ppc.h> |
26 | #include <asm/kvm_book3s.h> | 27 | #include <asm/kvm_book3s.h> |
@@ -30,14 +31,6 @@ | |||
30 | 31 | ||
31 | #define PTE_SIZE 12 | 32 | #define PTE_SIZE 12 |
32 | 33 | ||
33 | /* #define DEBUG_MMU */ | ||
34 | |||
35 | #ifdef DEBUG_MMU | ||
36 | #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__) | ||
37 | #else | ||
38 | #define dprintk_mmu(a, ...) do { } while(0) | ||
39 | #endif | ||
40 | |||
41 | static struct kmem_cache *hpte_cache; | 34 | static struct kmem_cache *hpte_cache; |
42 | 35 | ||
43 | static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) | 36 | static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) |
@@ -45,6 +38,12 @@ static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) | |||
45 | return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); | 38 | return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); |
46 | } | 39 | } |
47 | 40 | ||
41 | static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr) | ||
42 | { | ||
43 | return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE, | ||
44 | HPTEG_HASH_BITS_PTE_LONG); | ||
45 | } | ||
46 | |||
48 | static inline u64 kvmppc_mmu_hash_vpte(u64 vpage) | 47 | static inline u64 kvmppc_mmu_hash_vpte(u64 vpage) |
49 | { | 48 | { |
50 | return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE); | 49 | return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE); |
@@ -60,77 +59,128 @@ void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | |||
60 | { | 59 | { |
61 | u64 index; | 60 | u64 index; |
62 | 61 | ||
62 | trace_kvm_book3s_mmu_map(pte); | ||
63 | |||
64 | spin_lock(&vcpu->arch.mmu_lock); | ||
65 | |||
63 | /* Add to ePTE list */ | 66 | /* Add to ePTE list */ |
64 | index = kvmppc_mmu_hash_pte(pte->pte.eaddr); | 67 | index = kvmppc_mmu_hash_pte(pte->pte.eaddr); |
65 | hlist_add_head(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); | 68 | hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); |
69 | |||
70 | /* Add to ePTE_long list */ | ||
71 | index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); | ||
72 | hlist_add_head_rcu(&pte->list_pte_long, | ||
73 | &vcpu->arch.hpte_hash_pte_long[index]); | ||
66 | 74 | ||
67 | /* Add to vPTE list */ | 75 | /* Add to vPTE list */ |
68 | index = kvmppc_mmu_hash_vpte(pte->pte.vpage); | 76 | index = kvmppc_mmu_hash_vpte(pte->pte.vpage); |
69 | hlist_add_head(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); | 77 | hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); |
70 | 78 | ||
71 | /* Add to vPTE_long list */ | 79 | /* Add to vPTE_long list */ |
72 | index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); | 80 | index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); |
73 | hlist_add_head(&pte->list_vpte_long, | 81 | hlist_add_head_rcu(&pte->list_vpte_long, |
74 | &vcpu->arch.hpte_hash_vpte_long[index]); | 82 | &vcpu->arch.hpte_hash_vpte_long[index]); |
83 | |||
84 | spin_unlock(&vcpu->arch.mmu_lock); | ||
85 | } | ||
86 | |||
87 | static void free_pte_rcu(struct rcu_head *head) | ||
88 | { | ||
89 | struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head); | ||
90 | kmem_cache_free(hpte_cache, pte); | ||
75 | } | 91 | } |
76 | 92 | ||
77 | static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | 93 | static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) |
78 | { | 94 | { |
79 | dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n", | 95 | trace_kvm_book3s_mmu_invalidate(pte); |
80 | pte->pte.eaddr, pte->pte.vpage, pte->host_va); | ||
81 | 96 | ||
82 | /* Different for 32 and 64 bit */ | 97 | /* Different for 32 and 64 bit */ |
83 | kvmppc_mmu_invalidate_pte(vcpu, pte); | 98 | kvmppc_mmu_invalidate_pte(vcpu, pte); |
84 | 99 | ||
100 | spin_lock(&vcpu->arch.mmu_lock); | ||
101 | |||
102 | /* pte already invalidated in between? */ | ||
103 | if (hlist_unhashed(&pte->list_pte)) { | ||
104 | spin_unlock(&vcpu->arch.mmu_lock); | ||
105 | return; | ||
106 | } | ||
107 | |||
108 | hlist_del_init_rcu(&pte->list_pte); | ||
109 | hlist_del_init_rcu(&pte->list_pte_long); | ||
110 | hlist_del_init_rcu(&pte->list_vpte); | ||
111 | hlist_del_init_rcu(&pte->list_vpte_long); | ||
112 | |||
85 | if (pte->pte.may_write) | 113 | if (pte->pte.may_write) |
86 | kvm_release_pfn_dirty(pte->pfn); | 114 | kvm_release_pfn_dirty(pte->pfn); |
87 | else | 115 | else |
88 | kvm_release_pfn_clean(pte->pfn); | 116 | kvm_release_pfn_clean(pte->pfn); |
89 | 117 | ||
90 | hlist_del(&pte->list_pte); | 118 | spin_unlock(&vcpu->arch.mmu_lock); |
91 | hlist_del(&pte->list_vpte); | ||
92 | hlist_del(&pte->list_vpte_long); | ||
93 | 119 | ||
94 | vcpu->arch.hpte_cache_count--; | 120 | vcpu->arch.hpte_cache_count--; |
95 | kmem_cache_free(hpte_cache, pte); | 121 | call_rcu(&pte->rcu_head, free_pte_rcu); |
96 | } | 122 | } |
97 | 123 | ||
98 | static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) | 124 | static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) |
99 | { | 125 | { |
100 | struct hpte_cache *pte; | 126 | struct hpte_cache *pte; |
101 | struct hlist_node *node, *tmp; | 127 | struct hlist_node *node; |
102 | int i; | 128 | int i; |
103 | 129 | ||
130 | rcu_read_lock(); | ||
131 | |||
104 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { | 132 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { |
105 | struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; | 133 | struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; |
106 | 134 | ||
107 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) | 135 | hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) |
108 | invalidate_pte(vcpu, pte); | 136 | invalidate_pte(vcpu, pte); |
109 | } | 137 | } |
138 | |||
139 | rcu_read_unlock(); | ||
110 | } | 140 | } |
111 | 141 | ||
112 | static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) | 142 | static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) |
113 | { | 143 | { |
114 | struct hlist_head *list; | 144 | struct hlist_head *list; |
115 | struct hlist_node *node, *tmp; | 145 | struct hlist_node *node; |
116 | struct hpte_cache *pte; | 146 | struct hpte_cache *pte; |
117 | 147 | ||
118 | /* Find the list of entries in the map */ | 148 | /* Find the list of entries in the map */ |
119 | list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; | 149 | list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; |
120 | 150 | ||
151 | rcu_read_lock(); | ||
152 | |||
121 | /* Check the list for matching entries and invalidate */ | 153 | /* Check the list for matching entries and invalidate */ |
122 | hlist_for_each_entry_safe(pte, node, tmp, list, list_pte) | 154 | hlist_for_each_entry_rcu(pte, node, list, list_pte) |
123 | if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) | 155 | if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) |
124 | invalidate_pte(vcpu, pte); | 156 | invalidate_pte(vcpu, pte); |
157 | |||
158 | rcu_read_unlock(); | ||
125 | } | 159 | } |
126 | 160 | ||
127 | void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | 161 | static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea) |
128 | { | 162 | { |
129 | u64 i; | 163 | struct hlist_head *list; |
164 | struct hlist_node *node; | ||
165 | struct hpte_cache *pte; | ||
130 | 166 | ||
131 | dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n", | 167 | /* Find the list of entries in the map */ |
132 | vcpu->arch.hpte_cache_count, guest_ea, ea_mask); | 168 | list = &vcpu->arch.hpte_hash_pte_long[ |
169 | kvmppc_mmu_hash_pte_long(guest_ea)]; | ||
133 | 170 | ||
171 | rcu_read_lock(); | ||
172 | |||
173 | /* Check the list for matching entries and invalidate */ | ||
174 | hlist_for_each_entry_rcu(pte, node, list, list_pte_long) | ||
175 | if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) | ||
176 | invalidate_pte(vcpu, pte); | ||
177 | |||
178 | rcu_read_unlock(); | ||
179 | } | ||
180 | |||
181 | void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | ||
182 | { | ||
183 | trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask); | ||
134 | guest_ea &= ea_mask; | 184 | guest_ea &= ea_mask; |
135 | 185 | ||
136 | switch (ea_mask) { | 186 | switch (ea_mask) { |
@@ -138,9 +188,7 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | |||
138 | kvmppc_mmu_pte_flush_page(vcpu, guest_ea); | 188 | kvmppc_mmu_pte_flush_page(vcpu, guest_ea); |
139 | break; | 189 | break; |
140 | case 0x0ffff000: | 190 | case 0x0ffff000: |
141 | /* 32-bit flush w/o segment, go through all possible segments */ | 191 | kvmppc_mmu_pte_flush_long(vcpu, guest_ea); |
142 | for (i = 0; i < 0x100000000ULL; i += 0x10000000ULL) | ||
143 | kvmppc_mmu_pte_flush(vcpu, guest_ea | i, ~0xfffUL); | ||
144 | break; | 192 | break; |
145 | case 0: | 193 | case 0: |
146 | /* Doing a complete flush -> start from scratch */ | 194 | /* Doing a complete flush -> start from scratch */ |
@@ -156,39 +204,46 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | |||
156 | static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) | 204 | static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) |
157 | { | 205 | { |
158 | struct hlist_head *list; | 206 | struct hlist_head *list; |
159 | struct hlist_node *node, *tmp; | 207 | struct hlist_node *node; |
160 | struct hpte_cache *pte; | 208 | struct hpte_cache *pte; |
161 | u64 vp_mask = 0xfffffffffULL; | 209 | u64 vp_mask = 0xfffffffffULL; |
162 | 210 | ||
163 | list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; | 211 | list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; |
164 | 212 | ||
213 | rcu_read_lock(); | ||
214 | |||
165 | /* Check the list for matching entries and invalidate */ | 215 | /* Check the list for matching entries and invalidate */ |
166 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte) | 216 | hlist_for_each_entry_rcu(pte, node, list, list_vpte) |
167 | if ((pte->pte.vpage & vp_mask) == guest_vp) | 217 | if ((pte->pte.vpage & vp_mask) == guest_vp) |
168 | invalidate_pte(vcpu, pte); | 218 | invalidate_pte(vcpu, pte); |
219 | |||
220 | rcu_read_unlock(); | ||
169 | } | 221 | } |
170 | 222 | ||
171 | /* Flush with mask 0xffffff000 */ | 223 | /* Flush with mask 0xffffff000 */ |
172 | static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) | 224 | static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) |
173 | { | 225 | { |
174 | struct hlist_head *list; | 226 | struct hlist_head *list; |
175 | struct hlist_node *node, *tmp; | 227 | struct hlist_node *node; |
176 | struct hpte_cache *pte; | 228 | struct hpte_cache *pte; |
177 | u64 vp_mask = 0xffffff000ULL; | 229 | u64 vp_mask = 0xffffff000ULL; |
178 | 230 | ||
179 | list = &vcpu->arch.hpte_hash_vpte_long[ | 231 | list = &vcpu->arch.hpte_hash_vpte_long[ |
180 | kvmppc_mmu_hash_vpte_long(guest_vp)]; | 232 | kvmppc_mmu_hash_vpte_long(guest_vp)]; |
181 | 233 | ||
234 | rcu_read_lock(); | ||
235 | |||
182 | /* Check the list for matching entries and invalidate */ | 236 | /* Check the list for matching entries and invalidate */ |
183 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) | 237 | hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) |
184 | if ((pte->pte.vpage & vp_mask) == guest_vp) | 238 | if ((pte->pte.vpage & vp_mask) == guest_vp) |
185 | invalidate_pte(vcpu, pte); | 239 | invalidate_pte(vcpu, pte); |
240 | |||
241 | rcu_read_unlock(); | ||
186 | } | 242 | } |
187 | 243 | ||
188 | void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) | 244 | void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) |
189 | { | 245 | { |
190 | dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", | 246 | trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask); |
191 | vcpu->arch.hpte_cache_count, guest_vp, vp_mask); | ||
192 | guest_vp &= vp_mask; | 247 | guest_vp &= vp_mask; |
193 | 248 | ||
194 | switch(vp_mask) { | 249 | switch(vp_mask) { |
@@ -206,21 +261,24 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) | |||
206 | 261 | ||
207 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) | 262 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) |
208 | { | 263 | { |
209 | struct hlist_node *node, *tmp; | 264 | struct hlist_node *node; |
210 | struct hpte_cache *pte; | 265 | struct hpte_cache *pte; |
211 | int i; | 266 | int i; |
212 | 267 | ||
213 | dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n", | 268 | trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end); |
214 | vcpu->arch.hpte_cache_count, pa_start, pa_end); | 269 | |
270 | rcu_read_lock(); | ||
215 | 271 | ||
216 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { | 272 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { |
217 | struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; | 273 | struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; |
218 | 274 | ||
219 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) | 275 | hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) |
220 | if ((pte->pte.raddr >= pa_start) && | 276 | if ((pte->pte.raddr >= pa_start) && |
221 | (pte->pte.raddr < pa_end)) | 277 | (pte->pte.raddr < pa_end)) |
222 | invalidate_pte(vcpu, pte); | 278 | invalidate_pte(vcpu, pte); |
223 | } | 279 | } |
280 | |||
281 | rcu_read_unlock(); | ||
224 | } | 282 | } |
225 | 283 | ||
226 | struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) | 284 | struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) |
@@ -254,11 +312,15 @@ int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu) | |||
254 | /* init hpte lookup hashes */ | 312 | /* init hpte lookup hashes */ |
255 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte, | 313 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte, |
256 | ARRAY_SIZE(vcpu->arch.hpte_hash_pte)); | 314 | ARRAY_SIZE(vcpu->arch.hpte_hash_pte)); |
315 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long, | ||
316 | ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long)); | ||
257 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte, | 317 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte, |
258 | ARRAY_SIZE(vcpu->arch.hpte_hash_vpte)); | 318 | ARRAY_SIZE(vcpu->arch.hpte_hash_vpte)); |
259 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, | 319 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, |
260 | ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long)); | 320 | ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long)); |
261 | 321 | ||
322 | spin_lock_init(&vcpu->arch.mmu_lock); | ||
323 | |||
262 | return 0; | 324 | return 0; |
263 | } | 325 | } |
264 | 326 | ||
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index 474f2e24050a..7b0ee96c1bed 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c | |||
@@ -159,20 +159,21 @@ | |||
159 | 159 | ||
160 | static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) | 160 | static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) |
161 | { | 161 | { |
162 | kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt], &vcpu->arch.fpscr); | 162 | kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt]); |
163 | } | 163 | } |
164 | 164 | ||
165 | static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) | 165 | static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) |
166 | { | 166 | { |
167 | u64 dsisr; | 167 | u64 dsisr; |
168 | struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; | ||
168 | 169 | ||
169 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 36, 0); | 170 | shared->msr = kvmppc_set_field(shared->msr, 33, 36, 0); |
170 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); | 171 | shared->msr = kvmppc_set_field(shared->msr, 42, 47, 0); |
171 | vcpu->arch.dear = eaddr; | 172 | shared->dar = eaddr; |
172 | /* Page Fault */ | 173 | /* Page Fault */ |
173 | dsisr = kvmppc_set_field(0, 33, 33, 1); | 174 | dsisr = kvmppc_set_field(0, 33, 33, 1); |
174 | if (is_store) | 175 | if (is_store) |
175 | to_book3s(vcpu)->dsisr = kvmppc_set_field(dsisr, 38, 38, 1); | 176 | shared->dsisr = kvmppc_set_field(dsisr, 38, 38, 1); |
176 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); | 177 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); |
177 | } | 178 | } |
178 | 179 | ||
@@ -204,7 +205,7 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
204 | /* put in registers */ | 205 | /* put in registers */ |
205 | switch (ls_type) { | 206 | switch (ls_type) { |
206 | case FPU_LS_SINGLE: | 207 | case FPU_LS_SINGLE: |
207 | kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs], &vcpu->arch.fpscr); | 208 | kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs]); |
208 | vcpu->arch.qpr[rs] = *((u32*)tmp); | 209 | vcpu->arch.qpr[rs] = *((u32*)tmp); |
209 | break; | 210 | break; |
210 | case FPU_LS_DOUBLE: | 211 | case FPU_LS_DOUBLE: |
@@ -230,7 +231,7 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
230 | 231 | ||
231 | switch (ls_type) { | 232 | switch (ls_type) { |
232 | case FPU_LS_SINGLE: | 233 | case FPU_LS_SINGLE: |
233 | kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp, &vcpu->arch.fpscr); | 234 | kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp); |
234 | val = *((u32*)tmp); | 235 | val = *((u32*)tmp); |
235 | len = sizeof(u32); | 236 | len = sizeof(u32); |
236 | break; | 237 | break; |
@@ -296,7 +297,7 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
296 | emulated = EMULATE_DONE; | 297 | emulated = EMULATE_DONE; |
297 | 298 | ||
298 | /* put in registers */ | 299 | /* put in registers */ |
299 | kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs], &vcpu->arch.fpscr); | 300 | kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs]); |
300 | vcpu->arch.qpr[rs] = tmp[1]; | 301 | vcpu->arch.qpr[rs] = tmp[1]; |
301 | 302 | ||
302 | dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], | 303 | dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], |
@@ -314,7 +315,7 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
314 | u32 tmp[2]; | 315 | u32 tmp[2]; |
315 | int len = w ? sizeof(u32) : sizeof(u64); | 316 | int len = w ? sizeof(u32) : sizeof(u64); |
316 | 317 | ||
317 | kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0], &vcpu->arch.fpscr); | 318 | kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0]); |
318 | tmp[1] = vcpu->arch.qpr[rs]; | 319 | tmp[1] = vcpu->arch.qpr[rs]; |
319 | 320 | ||
320 | r = kvmppc_st(vcpu, &addr, len, tmp, true); | 321 | r = kvmppc_st(vcpu, &addr, len, tmp, true); |
@@ -516,9 +517,9 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, | |||
516 | WARN_ON(rc); | 517 | WARN_ON(rc); |
517 | 518 | ||
518 | /* PS0 */ | 519 | /* PS0 */ |
519 | kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr); | 520 | kvm_cvt_df(&fpr[reg_in1], &ps0_in1); |
520 | kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr); | 521 | kvm_cvt_df(&fpr[reg_in2], &ps0_in2); |
521 | kvm_cvt_df(&fpr[reg_in3], &ps0_in3, &vcpu->arch.fpscr); | 522 | kvm_cvt_df(&fpr[reg_in3], &ps0_in3); |
522 | 523 | ||
523 | if (scalar & SCALAR_LOW) | 524 | if (scalar & SCALAR_LOW) |
524 | ps0_in2 = qpr[reg_in2]; | 525 | ps0_in2 = qpr[reg_in2]; |
@@ -529,7 +530,7 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, | |||
529 | ps0_in1, ps0_in2, ps0_in3, ps0_out); | 530 | ps0_in1, ps0_in2, ps0_in3, ps0_out); |
530 | 531 | ||
531 | if (!(scalar & SCALAR_NO_PS0)) | 532 | if (!(scalar & SCALAR_NO_PS0)) |
532 | kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr); | 533 | kvm_cvt_fd(&ps0_out, &fpr[reg_out]); |
533 | 534 | ||
534 | /* PS1 */ | 535 | /* PS1 */ |
535 | ps1_in1 = qpr[reg_in1]; | 536 | ps1_in1 = qpr[reg_in1]; |
@@ -566,12 +567,12 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, | |||
566 | WARN_ON(rc); | 567 | WARN_ON(rc); |
567 | 568 | ||
568 | /* PS0 */ | 569 | /* PS0 */ |
569 | kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr); | 570 | kvm_cvt_df(&fpr[reg_in1], &ps0_in1); |
570 | 571 | ||
571 | if (scalar & SCALAR_LOW) | 572 | if (scalar & SCALAR_LOW) |
572 | ps0_in2 = qpr[reg_in2]; | 573 | ps0_in2 = qpr[reg_in2]; |
573 | else | 574 | else |
574 | kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr); | 575 | kvm_cvt_df(&fpr[reg_in2], &ps0_in2); |
575 | 576 | ||
576 | func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2); | 577 | func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2); |
577 | 578 | ||
@@ -579,7 +580,7 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, | |||
579 | dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", | 580 | dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", |
580 | ps0_in1, ps0_in2, ps0_out); | 581 | ps0_in1, ps0_in2, ps0_out); |
581 | 582 | ||
582 | kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr); | 583 | kvm_cvt_fd(&ps0_out, &fpr[reg_out]); |
583 | } | 584 | } |
584 | 585 | ||
585 | /* PS1 */ | 586 | /* PS1 */ |
@@ -615,13 +616,13 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, | |||
615 | WARN_ON(rc); | 616 | WARN_ON(rc); |
616 | 617 | ||
617 | /* PS0 */ | 618 | /* PS0 */ |
618 | kvm_cvt_df(&fpr[reg_in], &ps0_in, &vcpu->arch.fpscr); | 619 | kvm_cvt_df(&fpr[reg_in], &ps0_in); |
619 | func(&vcpu->arch.fpscr, &ps0_out, &ps0_in); | 620 | func(&vcpu->arch.fpscr, &ps0_out, &ps0_in); |
620 | 621 | ||
621 | dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", | 622 | dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", |
622 | ps0_in, ps0_out); | 623 | ps0_in, ps0_out); |
623 | 624 | ||
624 | kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr); | 625 | kvm_cvt_fd(&ps0_out, &fpr[reg_out]); |
625 | 626 | ||
626 | /* PS1 */ | 627 | /* PS1 */ |
627 | ps1_in = qpr[reg_in]; | 628 | ps1_in = qpr[reg_in]; |
@@ -658,7 +659,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
658 | if (!kvmppc_inst_is_paired_single(vcpu, inst)) | 659 | if (!kvmppc_inst_is_paired_single(vcpu, inst)) |
659 | return EMULATE_FAIL; | 660 | return EMULATE_FAIL; |
660 | 661 | ||
661 | if (!(vcpu->arch.msr & MSR_FP)) { | 662 | if (!(vcpu->arch.shared->msr & MSR_FP)) { |
662 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); | 663 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); |
663 | return EMULATE_AGAIN; | 664 | return EMULATE_AGAIN; |
664 | } | 665 | } |
@@ -671,7 +672,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
671 | #ifdef DEBUG | 672 | #ifdef DEBUG |
672 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { | 673 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { |
673 | u32 f; | 674 | u32 f; |
674 | kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr); | 675 | kvm_cvt_df(&vcpu->arch.fpr[i], &f); |
675 | dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", | 676 | dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", |
676 | i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); | 677 | i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); |
677 | } | 678 | } |
@@ -796,8 +797,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
796 | vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; | 797 | vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; |
797 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ | 798 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ |
798 | kvm_cvt_df(&vcpu->arch.fpr[ax_rb], | 799 | kvm_cvt_df(&vcpu->arch.fpr[ax_rb], |
799 | &vcpu->arch.qpr[ax_rd], | 800 | &vcpu->arch.qpr[ax_rd]); |
800 | &vcpu->arch.fpscr); | ||
801 | break; | 801 | break; |
802 | case OP_4X_PS_MERGE01: | 802 | case OP_4X_PS_MERGE01: |
803 | WARN_ON(rcomp); | 803 | WARN_ON(rcomp); |
@@ -808,19 +808,16 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
808 | WARN_ON(rcomp); | 808 | WARN_ON(rcomp); |
809 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ | 809 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ |
810 | kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], | 810 | kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], |
811 | &vcpu->arch.fpr[ax_rd], | 811 | &vcpu->arch.fpr[ax_rd]); |
812 | &vcpu->arch.fpscr); | ||
813 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ | 812 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ |
814 | kvm_cvt_df(&vcpu->arch.fpr[ax_rb], | 813 | kvm_cvt_df(&vcpu->arch.fpr[ax_rb], |
815 | &vcpu->arch.qpr[ax_rd], | 814 | &vcpu->arch.qpr[ax_rd]); |
816 | &vcpu->arch.fpscr); | ||
817 | break; | 815 | break; |
818 | case OP_4X_PS_MERGE11: | 816 | case OP_4X_PS_MERGE11: |
819 | WARN_ON(rcomp); | 817 | WARN_ON(rcomp); |
820 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ | 818 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ |
821 | kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], | 819 | kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], |
822 | &vcpu->arch.fpr[ax_rd], | 820 | &vcpu->arch.fpr[ax_rd]); |
823 | &vcpu->arch.fpscr); | ||
824 | vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; | 821 | vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; |
825 | break; | 822 | break; |
826 | } | 823 | } |
@@ -1255,7 +1252,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1255 | #ifdef DEBUG | 1252 | #ifdef DEBUG |
1256 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { | 1253 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { |
1257 | u32 f; | 1254 | u32 f; |
1258 | kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr); | 1255 | kvm_cvt_df(&vcpu->arch.fpr[i], &f); |
1259 | dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); | 1256 | dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); |
1260 | } | 1257 | } |
1261 | #endif | 1258 | #endif |
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 506d5c316c96..2b9c9088d00e 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S | |||
@@ -202,8 +202,25 @@ _GLOBAL(kvmppc_rmcall) | |||
202 | 202 | ||
203 | #if defined(CONFIG_PPC_BOOK3S_32) | 203 | #if defined(CONFIG_PPC_BOOK3S_32) |
204 | #define STACK_LR INT_FRAME_SIZE+4 | 204 | #define STACK_LR INT_FRAME_SIZE+4 |
205 | |||
206 | /* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */ | ||
207 | #define MSR_EXT_START \ | ||
208 | PPC_STL r20, _NIP(r1); \ | ||
209 | mfmsr r20; \ | ||
210 | LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \ | ||
211 | andc r3,r20,r3; /* Disable DR,EE */ \ | ||
212 | mtmsr r3; \ | ||
213 | sync | ||
214 | |||
215 | #define MSR_EXT_END \ | ||
216 | mtmsr r20; /* Enable DR,EE */ \ | ||
217 | sync; \ | ||
218 | PPC_LL r20, _NIP(r1) | ||
219 | |||
205 | #elif defined(CONFIG_PPC_BOOK3S_64) | 220 | #elif defined(CONFIG_PPC_BOOK3S_64) |
206 | #define STACK_LR _LINK | 221 | #define STACK_LR _LINK |
222 | #define MSR_EXT_START | ||
223 | #define MSR_EXT_END | ||
207 | #endif | 224 | #endif |
208 | 225 | ||
209 | /* | 226 | /* |
@@ -215,19 +232,12 @@ _GLOBAL(kvmppc_load_up_ ## what); \ | |||
215 | PPC_STLU r1, -INT_FRAME_SIZE(r1); \ | 232 | PPC_STLU r1, -INT_FRAME_SIZE(r1); \ |
216 | mflr r3; \ | 233 | mflr r3; \ |
217 | PPC_STL r3, STACK_LR(r1); \ | 234 | PPC_STL r3, STACK_LR(r1); \ |
218 | PPC_STL r20, _NIP(r1); \ | 235 | MSR_EXT_START; \ |
219 | mfmsr r20; \ | ||
220 | LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \ | ||
221 | andc r3,r20,r3; /* Disable DR,EE */ \ | ||
222 | mtmsr r3; \ | ||
223 | sync; \ | ||
224 | \ | 236 | \ |
225 | bl FUNC(load_up_ ## what); \ | 237 | bl FUNC(load_up_ ## what); \ |
226 | \ | 238 | \ |
227 | mtmsr r20; /* Enable DR,EE */ \ | 239 | MSR_EXT_END; \ |
228 | sync; \ | ||
229 | PPC_LL r3, STACK_LR(r1); \ | 240 | PPC_LL r3, STACK_LR(r1); \ |
230 | PPC_LL r20, _NIP(r1); \ | ||
231 | mtlr r3; \ | 241 | mtlr r3; \ |
232 | addi r1, r1, INT_FRAME_SIZE; \ | 242 | addi r1, r1, INT_FRAME_SIZE; \ |
233 | blr | 243 | blr |
@@ -242,10 +252,10 @@ define_load_up(vsx) | |||
242 | 252 | ||
243 | .global kvmppc_trampoline_lowmem | 253 | .global kvmppc_trampoline_lowmem |
244 | kvmppc_trampoline_lowmem: | 254 | kvmppc_trampoline_lowmem: |
245 | .long kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START | 255 | PPC_LONG kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START |
246 | 256 | ||
247 | .global kvmppc_trampoline_enter | 257 | .global kvmppc_trampoline_enter |
248 | kvmppc_trampoline_enter: | 258 | kvmppc_trampoline_enter: |
249 | .long kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START | 259 | PPC_LONG kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START |
250 | 260 | ||
251 | #include "book3s_segment.S" | 261 | #include "book3s_segment.S" |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 8d4e35f5372c..77575d08c818 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -62,9 +62,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | |||
62 | { | 62 | { |
63 | int i; | 63 | int i; |
64 | 64 | ||
65 | printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr); | 65 | printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); |
66 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); | 66 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); |
67 | printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1); | 67 | printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, |
68 | vcpu->arch.shared->srr1); | ||
68 | 69 | ||
69 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); | 70 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); |
70 | 71 | ||
@@ -130,13 +131,19 @@ void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) | |||
130 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | 131 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
131 | struct kvm_interrupt *irq) | 132 | struct kvm_interrupt *irq) |
132 | { | 133 | { |
133 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL); | 134 | unsigned int prio = BOOKE_IRQPRIO_EXTERNAL; |
135 | |||
136 | if (irq->irq == KVM_INTERRUPT_SET_LEVEL) | ||
137 | prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL; | ||
138 | |||
139 | kvmppc_booke_queue_irqprio(vcpu, prio); | ||
134 | } | 140 | } |
135 | 141 | ||
136 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, | 142 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, |
137 | struct kvm_interrupt *irq) | 143 | struct kvm_interrupt *irq) |
138 | { | 144 | { |
139 | clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); | 145 | clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); |
146 | clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); | ||
140 | } | 147 | } |
141 | 148 | ||
142 | /* Deliver the interrupt of the corresponding priority, if possible. */ | 149 | /* Deliver the interrupt of the corresponding priority, if possible. */ |
@@ -146,6 +153,26 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
146 | int allowed = 0; | 153 | int allowed = 0; |
147 | ulong uninitialized_var(msr_mask); | 154 | ulong uninitialized_var(msr_mask); |
148 | bool update_esr = false, update_dear = false; | 155 | bool update_esr = false, update_dear = false; |
156 | ulong crit_raw = vcpu->arch.shared->critical; | ||
157 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); | ||
158 | bool crit; | ||
159 | bool keep_irq = false; | ||
160 | |||
161 | /* Truncate crit indicators in 32 bit mode */ | ||
162 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | ||
163 | crit_raw &= 0xffffffff; | ||
164 | crit_r1 &= 0xffffffff; | ||
165 | } | ||
166 | |||
167 | /* Critical section when crit == r1 */ | ||
168 | crit = (crit_raw == crit_r1); | ||
169 | /* ... and we're in supervisor mode */ | ||
170 | crit = crit && !(vcpu->arch.shared->msr & MSR_PR); | ||
171 | |||
172 | if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) { | ||
173 | priority = BOOKE_IRQPRIO_EXTERNAL; | ||
174 | keep_irq = true; | ||
175 | } | ||
149 | 176 | ||
150 | switch (priority) { | 177 | switch (priority) { |
151 | case BOOKE_IRQPRIO_DTLB_MISS: | 178 | case BOOKE_IRQPRIO_DTLB_MISS: |
@@ -169,36 +196,38 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
169 | break; | 196 | break; |
170 | case BOOKE_IRQPRIO_CRITICAL: | 197 | case BOOKE_IRQPRIO_CRITICAL: |
171 | case BOOKE_IRQPRIO_WATCHDOG: | 198 | case BOOKE_IRQPRIO_WATCHDOG: |
172 | allowed = vcpu->arch.msr & MSR_CE; | 199 | allowed = vcpu->arch.shared->msr & MSR_CE; |
173 | msr_mask = MSR_ME; | 200 | msr_mask = MSR_ME; |
174 | break; | 201 | break; |
175 | case BOOKE_IRQPRIO_MACHINE_CHECK: | 202 | case BOOKE_IRQPRIO_MACHINE_CHECK: |
176 | allowed = vcpu->arch.msr & MSR_ME; | 203 | allowed = vcpu->arch.shared->msr & MSR_ME; |
177 | msr_mask = 0; | 204 | msr_mask = 0; |
178 | break; | 205 | break; |
179 | case BOOKE_IRQPRIO_EXTERNAL: | 206 | case BOOKE_IRQPRIO_EXTERNAL: |
180 | case BOOKE_IRQPRIO_DECREMENTER: | 207 | case BOOKE_IRQPRIO_DECREMENTER: |
181 | case BOOKE_IRQPRIO_FIT: | 208 | case BOOKE_IRQPRIO_FIT: |
182 | allowed = vcpu->arch.msr & MSR_EE; | 209 | allowed = vcpu->arch.shared->msr & MSR_EE; |
210 | allowed = allowed && !crit; | ||
183 | msr_mask = MSR_CE|MSR_ME|MSR_DE; | 211 | msr_mask = MSR_CE|MSR_ME|MSR_DE; |
184 | break; | 212 | break; |
185 | case BOOKE_IRQPRIO_DEBUG: | 213 | case BOOKE_IRQPRIO_DEBUG: |
186 | allowed = vcpu->arch.msr & MSR_DE; | 214 | allowed = vcpu->arch.shared->msr & MSR_DE; |
187 | msr_mask = MSR_ME; | 215 | msr_mask = MSR_ME; |
188 | break; | 216 | break; |
189 | } | 217 | } |
190 | 218 | ||
191 | if (allowed) { | 219 | if (allowed) { |
192 | vcpu->arch.srr0 = vcpu->arch.pc; | 220 | vcpu->arch.shared->srr0 = vcpu->arch.pc; |
193 | vcpu->arch.srr1 = vcpu->arch.msr; | 221 | vcpu->arch.shared->srr1 = vcpu->arch.shared->msr; |
194 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; | 222 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; |
195 | if (update_esr == true) | 223 | if (update_esr == true) |
196 | vcpu->arch.esr = vcpu->arch.queued_esr; | 224 | vcpu->arch.esr = vcpu->arch.queued_esr; |
197 | if (update_dear == true) | 225 | if (update_dear == true) |
198 | vcpu->arch.dear = vcpu->arch.queued_dear; | 226 | vcpu->arch.shared->dar = vcpu->arch.queued_dear; |
199 | kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); | 227 | kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); |
200 | 228 | ||
201 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 229 | if (!keep_irq) |
230 | clear_bit(priority, &vcpu->arch.pending_exceptions); | ||
202 | } | 231 | } |
203 | 232 | ||
204 | return allowed; | 233 | return allowed; |
@@ -208,6 +237,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
208 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | 237 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) |
209 | { | 238 | { |
210 | unsigned long *pending = &vcpu->arch.pending_exceptions; | 239 | unsigned long *pending = &vcpu->arch.pending_exceptions; |
240 | unsigned long old_pending = vcpu->arch.pending_exceptions; | ||
211 | unsigned int priority; | 241 | unsigned int priority; |
212 | 242 | ||
213 | priority = __ffs(*pending); | 243 | priority = __ffs(*pending); |
@@ -219,6 +249,12 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |||
219 | BITS_PER_BYTE * sizeof(*pending), | 249 | BITS_PER_BYTE * sizeof(*pending), |
220 | priority + 1); | 250 | priority + 1); |
221 | } | 251 | } |
252 | |||
253 | /* Tell the guest about our interrupt status */ | ||
254 | if (*pending) | ||
255 | vcpu->arch.shared->int_pending = 1; | ||
256 | else if (old_pending) | ||
257 | vcpu->arch.shared->int_pending = 0; | ||
222 | } | 258 | } |
223 | 259 | ||
224 | /** | 260 | /** |
@@ -265,7 +301,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
265 | break; | 301 | break; |
266 | 302 | ||
267 | case BOOKE_INTERRUPT_PROGRAM: | 303 | case BOOKE_INTERRUPT_PROGRAM: |
268 | if (vcpu->arch.msr & MSR_PR) { | 304 | if (vcpu->arch.shared->msr & MSR_PR) { |
269 | /* Program traps generated by user-level software must be handled | 305 | /* Program traps generated by user-level software must be handled |
270 | * by the guest kernel. */ | 306 | * by the guest kernel. */ |
271 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); | 307 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); |
@@ -337,7 +373,15 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
337 | break; | 373 | break; |
338 | 374 | ||
339 | case BOOKE_INTERRUPT_SYSCALL: | 375 | case BOOKE_INTERRUPT_SYSCALL: |
340 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); | 376 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
377 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | ||
378 | /* KVM PV hypercalls */ | ||
379 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | ||
380 | r = RESUME_GUEST; | ||
381 | } else { | ||
382 | /* Guest syscalls */ | ||
383 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); | ||
384 | } | ||
341 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); | 385 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); |
342 | r = RESUME_GUEST; | 386 | r = RESUME_GUEST; |
343 | break; | 387 | break; |
@@ -466,15 +510,19 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
466 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ | 510 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ |
467 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 511 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
468 | { | 512 | { |
513 | int i; | ||
514 | |||
469 | vcpu->arch.pc = 0; | 515 | vcpu->arch.pc = 0; |
470 | vcpu->arch.msr = 0; | 516 | vcpu->arch.shared->msr = 0; |
471 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ | 517 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
472 | 518 | ||
473 | vcpu->arch.shadow_pid = 1; | 519 | vcpu->arch.shadow_pid = 1; |
474 | 520 | ||
475 | /* Eye-catching number so we know if the guest takes an interrupt | 521 | /* Eye-catching numbers so we know if the guest takes an interrupt |
476 | * before it's programmed its own IVPR. */ | 522 | * before it's programmed its own IVPR/IVORs. */ |
477 | vcpu->arch.ivpr = 0x55550000; | 523 | vcpu->arch.ivpr = 0x55550000; |
524 | for (i = 0; i < BOOKE_IRQPRIO_MAX; i++) | ||
525 | vcpu->arch.ivor[i] = 0x7700 | i * 4; | ||
478 | 526 | ||
479 | kvmppc_init_timing_stats(vcpu); | 527 | kvmppc_init_timing_stats(vcpu); |
480 | 528 | ||
@@ -490,14 +538,14 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
490 | regs->ctr = vcpu->arch.ctr; | 538 | regs->ctr = vcpu->arch.ctr; |
491 | regs->lr = vcpu->arch.lr; | 539 | regs->lr = vcpu->arch.lr; |
492 | regs->xer = kvmppc_get_xer(vcpu); | 540 | regs->xer = kvmppc_get_xer(vcpu); |
493 | regs->msr = vcpu->arch.msr; | 541 | regs->msr = vcpu->arch.shared->msr; |
494 | regs->srr0 = vcpu->arch.srr0; | 542 | regs->srr0 = vcpu->arch.shared->srr0; |
495 | regs->srr1 = vcpu->arch.srr1; | 543 | regs->srr1 = vcpu->arch.shared->srr1; |
496 | regs->pid = vcpu->arch.pid; | 544 | regs->pid = vcpu->arch.pid; |
497 | regs->sprg0 = vcpu->arch.sprg0; | 545 | regs->sprg0 = vcpu->arch.shared->sprg0; |
498 | regs->sprg1 = vcpu->arch.sprg1; | 546 | regs->sprg1 = vcpu->arch.shared->sprg1; |
499 | regs->sprg2 = vcpu->arch.sprg2; | 547 | regs->sprg2 = vcpu->arch.shared->sprg2; |
500 | regs->sprg3 = vcpu->arch.sprg3; | 548 | regs->sprg3 = vcpu->arch.shared->sprg3; |
501 | regs->sprg5 = vcpu->arch.sprg4; | 549 | regs->sprg5 = vcpu->arch.sprg4; |
502 | regs->sprg6 = vcpu->arch.sprg5; | 550 | regs->sprg6 = vcpu->arch.sprg5; |
503 | regs->sprg7 = vcpu->arch.sprg6; | 551 | regs->sprg7 = vcpu->arch.sprg6; |
@@ -518,12 +566,12 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
518 | vcpu->arch.lr = regs->lr; | 566 | vcpu->arch.lr = regs->lr; |
519 | kvmppc_set_xer(vcpu, regs->xer); | 567 | kvmppc_set_xer(vcpu, regs->xer); |
520 | kvmppc_set_msr(vcpu, regs->msr); | 568 | kvmppc_set_msr(vcpu, regs->msr); |
521 | vcpu->arch.srr0 = regs->srr0; | 569 | vcpu->arch.shared->srr0 = regs->srr0; |
522 | vcpu->arch.srr1 = regs->srr1; | 570 | vcpu->arch.shared->srr1 = regs->srr1; |
523 | vcpu->arch.sprg0 = regs->sprg0; | 571 | vcpu->arch.shared->sprg0 = regs->sprg0; |
524 | vcpu->arch.sprg1 = regs->sprg1; | 572 | vcpu->arch.shared->sprg1 = regs->sprg1; |
525 | vcpu->arch.sprg2 = regs->sprg2; | 573 | vcpu->arch.shared->sprg2 = regs->sprg2; |
526 | vcpu->arch.sprg3 = regs->sprg3; | 574 | vcpu->arch.shared->sprg3 = regs->sprg3; |
527 | vcpu->arch.sprg5 = regs->sprg4; | 575 | vcpu->arch.sprg5 = regs->sprg4; |
528 | vcpu->arch.sprg6 = regs->sprg5; | 576 | vcpu->arch.sprg6 = regs->sprg5; |
529 | vcpu->arch.sprg7 = regs->sprg6; | 577 | vcpu->arch.sprg7 = regs->sprg6; |
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index d59bcca1f9d8..492bb7030358 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h | |||
@@ -46,7 +46,9 @@ | |||
46 | #define BOOKE_IRQPRIO_FIT 17 | 46 | #define BOOKE_IRQPRIO_FIT 17 |
47 | #define BOOKE_IRQPRIO_DECREMENTER 18 | 47 | #define BOOKE_IRQPRIO_DECREMENTER 18 |
48 | #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 | 48 | #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 |
49 | #define BOOKE_IRQPRIO_MAX 19 | 49 | /* Internal pseudo-irqprio for level triggered externals */ |
50 | #define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20 | ||
51 | #define BOOKE_IRQPRIO_MAX 20 | ||
50 | 52 | ||
51 | extern unsigned long kvmppc_booke_handlers; | 53 | extern unsigned long kvmppc_booke_handlers; |
52 | 54 | ||
@@ -54,12 +56,12 @@ extern unsigned long kvmppc_booke_handlers; | |||
54 | * changing. */ | 56 | * changing. */ |
55 | static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) | 57 | static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) |
56 | { | 58 | { |
57 | if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) | 59 | if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR)) |
58 | kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); | 60 | kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); |
59 | 61 | ||
60 | vcpu->arch.msr = new_msr; | 62 | vcpu->arch.shared->msr = new_msr; |
61 | 63 | ||
62 | if (vcpu->arch.msr & MSR_WE) { | 64 | if (vcpu->arch.shared->msr & MSR_WE) { |
63 | kvm_vcpu_block(vcpu); | 65 | kvm_vcpu_block(vcpu); |
64 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); | 66 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); |
65 | }; | 67 | }; |
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index cbc790ee1928..1260f5f24c0c 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c | |||
@@ -31,8 +31,8 @@ | |||
31 | 31 | ||
32 | static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) | 32 | static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) |
33 | { | 33 | { |
34 | vcpu->arch.pc = vcpu->arch.srr0; | 34 | vcpu->arch.pc = vcpu->arch.shared->srr0; |
35 | kvmppc_set_msr(vcpu, vcpu->arch.srr1); | 35 | kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); |
36 | } | 36 | } |
37 | 37 | ||
38 | int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 38 | int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, |
@@ -62,7 +62,7 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
62 | 62 | ||
63 | case OP_31_XOP_MFMSR: | 63 | case OP_31_XOP_MFMSR: |
64 | rt = get_rt(inst); | 64 | rt = get_rt(inst); |
65 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.msr); | 65 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); |
66 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); | 66 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); |
67 | break; | 67 | break; |
68 | 68 | ||
@@ -74,13 +74,13 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
74 | 74 | ||
75 | case OP_31_XOP_WRTEE: | 75 | case OP_31_XOP_WRTEE: |
76 | rs = get_rs(inst); | 76 | rs = get_rs(inst); |
77 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | 77 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) |
78 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); | 78 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); |
79 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | 79 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); |
80 | break; | 80 | break; |
81 | 81 | ||
82 | case OP_31_XOP_WRTEEI: | 82 | case OP_31_XOP_WRTEEI: |
83 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | 83 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) |
84 | | (inst & MSR_EE); | 84 | | (inst & MSR_EE); |
85 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | 85 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); |
86 | break; | 86 | break; |
@@ -105,7 +105,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
105 | 105 | ||
106 | switch (sprn) { | 106 | switch (sprn) { |
107 | case SPRN_DEAR: | 107 | case SPRN_DEAR: |
108 | vcpu->arch.dear = spr_val; break; | 108 | vcpu->arch.shared->dar = spr_val; break; |
109 | case SPRN_ESR: | 109 | case SPRN_ESR: |
110 | vcpu->arch.esr = spr_val; break; | 110 | vcpu->arch.esr = spr_val; break; |
111 | case SPRN_DBCR0: | 111 | case SPRN_DBCR0: |
@@ -200,7 +200,7 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
200 | case SPRN_IVPR: | 200 | case SPRN_IVPR: |
201 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; | 201 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; |
202 | case SPRN_DEAR: | 202 | case SPRN_DEAR: |
203 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); break; | 203 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break; |
204 | case SPRN_ESR: | 204 | case SPRN_ESR: |
205 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break; | 205 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break; |
206 | case SPRN_DBCR0: | 206 | case SPRN_DBCR0: |
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 380a78cf484d..049846911ce4 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S | |||
@@ -415,7 +415,8 @@ lightweight_exit: | |||
415 | lwz r8, VCPU_GPR(r8)(r4) | 415 | lwz r8, VCPU_GPR(r8)(r4) |
416 | lwz r3, VCPU_PC(r4) | 416 | lwz r3, VCPU_PC(r4) |
417 | mtsrr0 r3 | 417 | mtsrr0 r3 |
418 | lwz r3, VCPU_MSR(r4) | 418 | lwz r3, VCPU_SHARED(r4) |
419 | lwz r3, VCPU_SHARED_MSR(r3) | ||
419 | oris r3, r3, KVMPPC_MSR_MASK@h | 420 | oris r3, r3, KVMPPC_MSR_MASK@h |
420 | ori r3, r3, KVMPPC_MSR_MASK@l | 421 | ori r3, r3, KVMPPC_MSR_MASK@l |
421 | mtsrr1 r3 | 422 | mtsrr1 r3 |
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index e8a00b0c4449..71750f2dd5d3 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c | |||
@@ -117,8 +117,14 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
117 | if (err) | 117 | if (err) |
118 | goto uninit_vcpu; | 118 | goto uninit_vcpu; |
119 | 119 | ||
120 | vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); | ||
121 | if (!vcpu->arch.shared) | ||
122 | goto uninit_tlb; | ||
123 | |||
120 | return vcpu; | 124 | return vcpu; |
121 | 125 | ||
126 | uninit_tlb: | ||
127 | kvmppc_e500_tlb_uninit(vcpu_e500); | ||
122 | uninit_vcpu: | 128 | uninit_vcpu: |
123 | kvm_vcpu_uninit(vcpu); | 129 | kvm_vcpu_uninit(vcpu); |
124 | free_vcpu: | 130 | free_vcpu: |
@@ -131,6 +137,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
131 | { | 137 | { |
132 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 138 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
133 | 139 | ||
140 | free_page((unsigned long)vcpu->arch.shared); | ||
134 | kvmppc_e500_tlb_uninit(vcpu_e500); | 141 | kvmppc_e500_tlb_uninit(vcpu_e500); |
135 | kvm_vcpu_uninit(vcpu); | 142 | kvm_vcpu_uninit(vcpu); |
136 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); | 143 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); |
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index 21011e12caeb..d6d6d47a75a9 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c | |||
@@ -226,8 +226,7 @@ static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
226 | 226 | ||
227 | kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel); | 227 | kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel); |
228 | stlbe->mas1 = 0; | 228 | stlbe->mas1 = 0; |
229 | trace_kvm_stlb_inval(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, | 229 | trace_kvm_stlb_inval(index_of(tlbsel, esel)); |
230 | stlbe->mas3, stlbe->mas7); | ||
231 | } | 230 | } |
232 | 231 | ||
233 | static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, | 232 | static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, |
@@ -298,7 +297,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
298 | /* Get reference to new page. */ | 297 | /* Get reference to new page. */ |
299 | new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn); | 298 | new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn); |
300 | if (is_error_page(new_page)) { | 299 | if (is_error_page(new_page)) { |
301 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); | 300 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", |
301 | (long)gfn); | ||
302 | kvm_release_page_clean(new_page); | 302 | kvm_release_page_clean(new_page); |
303 | return; | 303 | return; |
304 | } | 304 | } |
@@ -314,10 +314,10 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
314 | | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID; | 314 | | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID; |
315 | stlbe->mas2 = (gvaddr & MAS2_EPN) | 315 | stlbe->mas2 = (gvaddr & MAS2_EPN) |
316 | | e500_shadow_mas2_attrib(gtlbe->mas2, | 316 | | e500_shadow_mas2_attrib(gtlbe->mas2, |
317 | vcpu_e500->vcpu.arch.msr & MSR_PR); | 317 | vcpu_e500->vcpu.arch.shared->msr & MSR_PR); |
318 | stlbe->mas3 = (hpaddr & MAS3_RPN) | 318 | stlbe->mas3 = (hpaddr & MAS3_RPN) |
319 | | e500_shadow_mas3_attrib(gtlbe->mas3, | 319 | | e500_shadow_mas3_attrib(gtlbe->mas3, |
320 | vcpu_e500->vcpu.arch.msr & MSR_PR); | 320 | vcpu_e500->vcpu.arch.shared->msr & MSR_PR); |
321 | stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN; | 321 | stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN; |
322 | 322 | ||
323 | trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, | 323 | trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, |
@@ -576,28 +576,28 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
576 | 576 | ||
577 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | 577 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
578 | { | 578 | { |
579 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); | 579 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); |
580 | 580 | ||
581 | return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); | 581 | return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); |
582 | } | 582 | } |
583 | 583 | ||
584 | int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | 584 | int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
585 | { | 585 | { |
586 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); | 586 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); |
587 | 587 | ||
588 | return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); | 588 | return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); |
589 | } | 589 | } |
590 | 590 | ||
591 | void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) | 591 | void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) |
592 | { | 592 | { |
593 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); | 593 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); |
594 | 594 | ||
595 | kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); | 595 | kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); |
596 | } | 596 | } |
597 | 597 | ||
598 | void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) | 598 | void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) |
599 | { | 599 | { |
600 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); | 600 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); |
601 | 601 | ||
602 | kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); | 602 | kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); |
603 | } | 603 | } |
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h index d28e3010a5e2..458946b4775d 100644 --- a/arch/powerpc/kvm/e500_tlb.h +++ b/arch/powerpc/kvm/e500_tlb.h | |||
@@ -171,7 +171,7 @@ static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | |||
171 | 171 | ||
172 | /* Does it match current guest AS? */ | 172 | /* Does it match current guest AS? */ |
173 | /* XXX what about IS != DS? */ | 173 | /* XXX what about IS != DS? */ |
174 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) | 174 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS)) |
175 | return 0; | 175 | return 0; |
176 | 176 | ||
177 | gpa = get_tlb_raddr(tlbe); | 177 | gpa = get_tlb_raddr(tlbe); |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 4568ec386c2a..c64fd2909bb2 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -145,7 +145,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
145 | /* this default type might be overwritten by subcategories */ | 145 | /* this default type might be overwritten by subcategories */ |
146 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | 146 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); |
147 | 147 | ||
148 | pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); | 148 | pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); |
149 | 149 | ||
150 | switch (get_op(inst)) { | 150 | switch (get_op(inst)) { |
151 | case OP_TRAP: | 151 | case OP_TRAP: |
@@ -242,9 +242,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
242 | 242 | ||
243 | switch (sprn) { | 243 | switch (sprn) { |
244 | case SPRN_SRR0: | 244 | case SPRN_SRR0: |
245 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break; | 245 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0); |
246 | break; | ||
246 | case SPRN_SRR1: | 247 | case SPRN_SRR1: |
247 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break; | 248 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1); |
249 | break; | ||
248 | case SPRN_PVR: | 250 | case SPRN_PVR: |
249 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; | 251 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; |
250 | case SPRN_PIR: | 252 | case SPRN_PIR: |
@@ -261,13 +263,17 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
261 | kvmppc_set_gpr(vcpu, rt, get_tb()); break; | 263 | kvmppc_set_gpr(vcpu, rt, get_tb()); break; |
262 | 264 | ||
263 | case SPRN_SPRG0: | 265 | case SPRN_SPRG0: |
264 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break; | 266 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0); |
267 | break; | ||
265 | case SPRN_SPRG1: | 268 | case SPRN_SPRG1: |
266 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break; | 269 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1); |
270 | break; | ||
267 | case SPRN_SPRG2: | 271 | case SPRN_SPRG2: |
268 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break; | 272 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2); |
273 | break; | ||
269 | case SPRN_SPRG3: | 274 | case SPRN_SPRG3: |
270 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break; | 275 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3); |
276 | break; | ||
271 | /* Note: SPRG4-7 are user-readable, so we don't get | 277 | /* Note: SPRG4-7 are user-readable, so we don't get |
272 | * a trap. */ | 278 | * a trap. */ |
273 | 279 | ||
@@ -275,7 +281,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
275 | { | 281 | { |
276 | u64 jd = get_tb() - vcpu->arch.dec_jiffies; | 282 | u64 jd = get_tb() - vcpu->arch.dec_jiffies; |
277 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd); | 283 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd); |
278 | pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", | 284 | pr_debug("mfDEC: %x - %llx = %lx\n", |
279 | vcpu->arch.dec, jd, | 285 | vcpu->arch.dec, jd, |
280 | kvmppc_get_gpr(vcpu, rt)); | 286 | kvmppc_get_gpr(vcpu, rt)); |
281 | break; | 287 | break; |
@@ -320,9 +326,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
320 | rs = get_rs(inst); | 326 | rs = get_rs(inst); |
321 | switch (sprn) { | 327 | switch (sprn) { |
322 | case SPRN_SRR0: | 328 | case SPRN_SRR0: |
323 | vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break; | 329 | vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs); |
330 | break; | ||
324 | case SPRN_SRR1: | 331 | case SPRN_SRR1: |
325 | vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break; | 332 | vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs); |
333 | break; | ||
326 | 334 | ||
327 | /* XXX We need to context-switch the timebase for | 335 | /* XXX We need to context-switch the timebase for |
328 | * watchdog and FIT. */ | 336 | * watchdog and FIT. */ |
@@ -337,13 +345,17 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
337 | break; | 345 | break; |
338 | 346 | ||
339 | case SPRN_SPRG0: | 347 | case SPRN_SPRG0: |
340 | vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break; | 348 | vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs); |
349 | break; | ||
341 | case SPRN_SPRG1: | 350 | case SPRN_SPRG1: |
342 | vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break; | 351 | vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs); |
352 | break; | ||
343 | case SPRN_SPRG2: | 353 | case SPRN_SPRG2: |
344 | vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break; | 354 | vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs); |
355 | break; | ||
345 | case SPRN_SPRG3: | 356 | case SPRN_SPRG3: |
346 | vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break; | 357 | vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs); |
358 | break; | ||
347 | 359 | ||
348 | default: | 360 | default: |
349 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); | 361 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); |
diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S index cb34bbe16113..bf68d597549e 100644 --- a/arch/powerpc/kvm/fpu.S +++ b/arch/powerpc/kvm/fpu.S | |||
@@ -273,19 +273,11 @@ FPD_THREE_IN(fnmsub) | |||
273 | FPD_THREE_IN(fnmadd) | 273 | FPD_THREE_IN(fnmadd) |
274 | 274 | ||
275 | _GLOBAL(kvm_cvt_fd) | 275 | _GLOBAL(kvm_cvt_fd) |
276 | lfd 0,0(r5) /* load up fpscr value */ | ||
277 | MTFSF_L(0) | ||
278 | lfs 0,0(r3) | 276 | lfs 0,0(r3) |
279 | stfd 0,0(r4) | 277 | stfd 0,0(r4) |
280 | mffs 0 | ||
281 | stfd 0,0(r5) /* save new fpscr value */ | ||
282 | blr | 278 | blr |
283 | 279 | ||
284 | _GLOBAL(kvm_cvt_df) | 280 | _GLOBAL(kvm_cvt_df) |
285 | lfd 0,0(r5) /* load up fpscr value */ | ||
286 | MTFSF_L(0) | ||
287 | lfd 0,0(r3) | 281 | lfd 0,0(r3) |
288 | stfs 0,0(r4) | 282 | stfs 0,0(r4) |
289 | mffs 0 | ||
290 | stfd 0,0(r5) /* save new fpscr value */ | ||
291 | blr | 283 | blr |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 72a4ad86ee91..2f87a1627f6c 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -38,9 +38,56 @@ | |||
38 | 38 | ||
39 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | 39 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
40 | { | 40 | { |
41 | return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); | 41 | return !(v->arch.shared->msr & MSR_WE) || |
42 | !!(v->arch.pending_exceptions); | ||
42 | } | 43 | } |
43 | 44 | ||
45 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | ||
46 | { | ||
47 | int nr = kvmppc_get_gpr(vcpu, 11); | ||
48 | int r; | ||
49 | unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); | ||
50 | unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); | ||
51 | unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); | ||
52 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); | ||
53 | unsigned long r2 = 0; | ||
54 | |||
55 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | ||
56 | /* 32 bit mode */ | ||
57 | param1 &= 0xffffffff; | ||
58 | param2 &= 0xffffffff; | ||
59 | param3 &= 0xffffffff; | ||
60 | param4 &= 0xffffffff; | ||
61 | } | ||
62 | |||
63 | switch (nr) { | ||
64 | case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE: | ||
65 | { | ||
66 | vcpu->arch.magic_page_pa = param1; | ||
67 | vcpu->arch.magic_page_ea = param2; | ||
68 | |||
69 | r2 = KVM_MAGIC_FEAT_SR; | ||
70 | |||
71 | r = HC_EV_SUCCESS; | ||
72 | break; | ||
73 | } | ||
74 | case HC_VENDOR_KVM | KVM_HC_FEATURES: | ||
75 | r = HC_EV_SUCCESS; | ||
76 | #if defined(CONFIG_PPC_BOOK3S) /* XXX Missing magic page on BookE */ | ||
77 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); | ||
78 | #endif | ||
79 | |||
80 | /* Second return value is in r4 */ | ||
81 | break; | ||
82 | default: | ||
83 | r = HC_EV_UNIMPLEMENTED; | ||
84 | break; | ||
85 | } | ||
86 | |||
87 | kvmppc_set_gpr(vcpu, 4, r2); | ||
88 | |||
89 | return r; | ||
90 | } | ||
44 | 91 | ||
45 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | 92 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) |
46 | { | 93 | { |
@@ -145,8 +192,10 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
145 | case KVM_CAP_PPC_SEGSTATE: | 192 | case KVM_CAP_PPC_SEGSTATE: |
146 | case KVM_CAP_PPC_PAIRED_SINGLES: | 193 | case KVM_CAP_PPC_PAIRED_SINGLES: |
147 | case KVM_CAP_PPC_UNSET_IRQ: | 194 | case KVM_CAP_PPC_UNSET_IRQ: |
195 | case KVM_CAP_PPC_IRQ_LEVEL: | ||
148 | case KVM_CAP_ENABLE_CAP: | 196 | case KVM_CAP_ENABLE_CAP: |
149 | case KVM_CAP_PPC_OSI: | 197 | case KVM_CAP_PPC_OSI: |
198 | case KVM_CAP_PPC_GET_PVINFO: | ||
150 | r = 1; | 199 | r = 1; |
151 | break; | 200 | break; |
152 | case KVM_CAP_COALESCED_MMIO: | 201 | case KVM_CAP_COALESCED_MMIO: |
@@ -534,16 +583,53 @@ out: | |||
534 | return r; | 583 | return r; |
535 | } | 584 | } |
536 | 585 | ||
586 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) | ||
587 | { | ||
588 | u32 inst_lis = 0x3c000000; | ||
589 | u32 inst_ori = 0x60000000; | ||
590 | u32 inst_nop = 0x60000000; | ||
591 | u32 inst_sc = 0x44000002; | ||
592 | u32 inst_imm_mask = 0xffff; | ||
593 | |||
594 | /* | ||
595 | * The hypercall to get into KVM from within guest context is as | ||
596 | * follows: | ||
597 | * | ||
598 | * lis r0, r0, KVM_SC_MAGIC_R0@h | ||
599 | * ori r0, KVM_SC_MAGIC_R0@l | ||
600 | * sc | ||
601 | * nop | ||
602 | */ | ||
603 | pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); | ||
604 | pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); | ||
605 | pvinfo->hcall[2] = inst_sc; | ||
606 | pvinfo->hcall[3] = inst_nop; | ||
607 | |||
608 | return 0; | ||
609 | } | ||
610 | |||
537 | long kvm_arch_vm_ioctl(struct file *filp, | 611 | long kvm_arch_vm_ioctl(struct file *filp, |
538 | unsigned int ioctl, unsigned long arg) | 612 | unsigned int ioctl, unsigned long arg) |
539 | { | 613 | { |
614 | void __user *argp = (void __user *)arg; | ||
540 | long r; | 615 | long r; |
541 | 616 | ||
542 | switch (ioctl) { | 617 | switch (ioctl) { |
618 | case KVM_PPC_GET_PVINFO: { | ||
619 | struct kvm_ppc_pvinfo pvinfo; | ||
620 | r = kvm_vm_ioctl_get_pvinfo(&pvinfo); | ||
621 | if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { | ||
622 | r = -EFAULT; | ||
623 | goto out; | ||
624 | } | ||
625 | |||
626 | break; | ||
627 | } | ||
543 | default: | 628 | default: |
544 | r = -ENOTTY; | 629 | r = -ENOTTY; |
545 | } | 630 | } |
546 | 631 | ||
632 | out: | ||
547 | return r; | 633 | return r; |
548 | } | 634 | } |
549 | 635 | ||
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h index a8e840018052..3aca1b042b8c 100644 --- a/arch/powerpc/kvm/trace.h +++ b/arch/powerpc/kvm/trace.h | |||
@@ -98,6 +98,245 @@ TRACE_EVENT(kvm_gtlb_write, | |||
98 | __entry->word1, __entry->word2) | 98 | __entry->word1, __entry->word2) |
99 | ); | 99 | ); |
100 | 100 | ||
101 | |||
102 | /************************************************************************* | ||
103 | * Book3S trace points * | ||
104 | *************************************************************************/ | ||
105 | |||
106 | #ifdef CONFIG_PPC_BOOK3S | ||
107 | |||
108 | TRACE_EVENT(kvm_book3s_exit, | ||
109 | TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu), | ||
110 | TP_ARGS(exit_nr, vcpu), | ||
111 | |||
112 | TP_STRUCT__entry( | ||
113 | __field( unsigned int, exit_nr ) | ||
114 | __field( unsigned long, pc ) | ||
115 | __field( unsigned long, msr ) | ||
116 | __field( unsigned long, dar ) | ||
117 | __field( unsigned long, srr1 ) | ||
118 | ), | ||
119 | |||
120 | TP_fast_assign( | ||
121 | __entry->exit_nr = exit_nr; | ||
122 | __entry->pc = kvmppc_get_pc(vcpu); | ||
123 | __entry->dar = kvmppc_get_fault_dar(vcpu); | ||
124 | __entry->msr = vcpu->arch.shared->msr; | ||
125 | __entry->srr1 = to_svcpu(vcpu)->shadow_srr1; | ||
126 | ), | ||
127 | |||
128 | TP_printk("exit=0x%x | pc=0x%lx | msr=0x%lx | dar=0x%lx | srr1=0x%lx", | ||
129 | __entry->exit_nr, __entry->pc, __entry->msr, __entry->dar, | ||
130 | __entry->srr1) | ||
131 | ); | ||
132 | |||
133 | TRACE_EVENT(kvm_book3s_reenter, | ||
134 | TP_PROTO(int r, struct kvm_vcpu *vcpu), | ||
135 | TP_ARGS(r, vcpu), | ||
136 | |||
137 | TP_STRUCT__entry( | ||
138 | __field( unsigned int, r ) | ||
139 | __field( unsigned long, pc ) | ||
140 | ), | ||
141 | |||
142 | TP_fast_assign( | ||
143 | __entry->r = r; | ||
144 | __entry->pc = kvmppc_get_pc(vcpu); | ||
145 | ), | ||
146 | |||
147 | TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc) | ||
148 | ); | ||
149 | |||
150 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
151 | |||
152 | TRACE_EVENT(kvm_book3s_64_mmu_map, | ||
153 | TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr, | ||
154 | struct kvmppc_pte *orig_pte), | ||
155 | TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte), | ||
156 | |||
157 | TP_STRUCT__entry( | ||
158 | __field( unsigned char, flag_w ) | ||
159 | __field( unsigned char, flag_x ) | ||
160 | __field( unsigned long, eaddr ) | ||
161 | __field( unsigned long, hpteg ) | ||
162 | __field( unsigned long, va ) | ||
163 | __field( unsigned long long, vpage ) | ||
164 | __field( unsigned long, hpaddr ) | ||
165 | ), | ||
166 | |||
167 | TP_fast_assign( | ||
168 | __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w'; | ||
169 | __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x'; | ||
170 | __entry->eaddr = orig_pte->eaddr; | ||
171 | __entry->hpteg = hpteg; | ||
172 | __entry->va = va; | ||
173 | __entry->vpage = orig_pte->vpage; | ||
174 | __entry->hpaddr = hpaddr; | ||
175 | ), | ||
176 | |||
177 | TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx", | ||
178 | __entry->flag_w, __entry->flag_x, __entry->eaddr, | ||
179 | __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr) | ||
180 | ); | ||
181 | |||
182 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
183 | |||
184 | TRACE_EVENT(kvm_book3s_mmu_map, | ||
185 | TP_PROTO(struct hpte_cache *pte), | ||
186 | TP_ARGS(pte), | ||
187 | |||
188 | TP_STRUCT__entry( | ||
189 | __field( u64, host_va ) | ||
190 | __field( u64, pfn ) | ||
191 | __field( ulong, eaddr ) | ||
192 | __field( u64, vpage ) | ||
193 | __field( ulong, raddr ) | ||
194 | __field( int, flags ) | ||
195 | ), | ||
196 | |||
197 | TP_fast_assign( | ||
198 | __entry->host_va = pte->host_va; | ||
199 | __entry->pfn = pte->pfn; | ||
200 | __entry->eaddr = pte->pte.eaddr; | ||
201 | __entry->vpage = pte->pte.vpage; | ||
202 | __entry->raddr = pte->pte.raddr; | ||
203 | __entry->flags = (pte->pte.may_read ? 0x4 : 0) | | ||
204 | (pte->pte.may_write ? 0x2 : 0) | | ||
205 | (pte->pte.may_execute ? 0x1 : 0); | ||
206 | ), | ||
207 | |||
208 | TP_printk("Map: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", | ||
209 | __entry->host_va, __entry->pfn, __entry->eaddr, | ||
210 | __entry->vpage, __entry->raddr, __entry->flags) | ||
211 | ); | ||
212 | |||
213 | TRACE_EVENT(kvm_book3s_mmu_invalidate, | ||
214 | TP_PROTO(struct hpte_cache *pte), | ||
215 | TP_ARGS(pte), | ||
216 | |||
217 | TP_STRUCT__entry( | ||
218 | __field( u64, host_va ) | ||
219 | __field( u64, pfn ) | ||
220 | __field( ulong, eaddr ) | ||
221 | __field( u64, vpage ) | ||
222 | __field( ulong, raddr ) | ||
223 | __field( int, flags ) | ||
224 | ), | ||
225 | |||
226 | TP_fast_assign( | ||
227 | __entry->host_va = pte->host_va; | ||
228 | __entry->pfn = pte->pfn; | ||
229 | __entry->eaddr = pte->pte.eaddr; | ||
230 | __entry->vpage = pte->pte.vpage; | ||
231 | __entry->raddr = pte->pte.raddr; | ||
232 | __entry->flags = (pte->pte.may_read ? 0x4 : 0) | | ||
233 | (pte->pte.may_write ? 0x2 : 0) | | ||
234 | (pte->pte.may_execute ? 0x1 : 0); | ||
235 | ), | ||
236 | |||
237 | TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", | ||
238 | __entry->host_va, __entry->pfn, __entry->eaddr, | ||
239 | __entry->vpage, __entry->raddr, __entry->flags) | ||
240 | ); | ||
241 | |||
242 | TRACE_EVENT(kvm_book3s_mmu_flush, | ||
243 | TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1, | ||
244 | unsigned long long p2), | ||
245 | TP_ARGS(type, vcpu, p1, p2), | ||
246 | |||
247 | TP_STRUCT__entry( | ||
248 | __field( int, count ) | ||
249 | __field( unsigned long long, p1 ) | ||
250 | __field( unsigned long long, p2 ) | ||
251 | __field( const char *, type ) | ||
252 | ), | ||
253 | |||
254 | TP_fast_assign( | ||
255 | __entry->count = vcpu->arch.hpte_cache_count; | ||
256 | __entry->p1 = p1; | ||
257 | __entry->p2 = p2; | ||
258 | __entry->type = type; | ||
259 | ), | ||
260 | |||
261 | TP_printk("Flush %d %sPTEs: %llx - %llx", | ||
262 | __entry->count, __entry->type, __entry->p1, __entry->p2) | ||
263 | ); | ||
264 | |||
265 | TRACE_EVENT(kvm_book3s_slb_found, | ||
266 | TP_PROTO(unsigned long long gvsid, unsigned long long hvsid), | ||
267 | TP_ARGS(gvsid, hvsid), | ||
268 | |||
269 | TP_STRUCT__entry( | ||
270 | __field( unsigned long long, gvsid ) | ||
271 | __field( unsigned long long, hvsid ) | ||
272 | ), | ||
273 | |||
274 | TP_fast_assign( | ||
275 | __entry->gvsid = gvsid; | ||
276 | __entry->hvsid = hvsid; | ||
277 | ), | ||
278 | |||
279 | TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid) | ||
280 | ); | ||
281 | |||
282 | TRACE_EVENT(kvm_book3s_slb_fail, | ||
283 | TP_PROTO(u16 sid_map_mask, unsigned long long gvsid), | ||
284 | TP_ARGS(sid_map_mask, gvsid), | ||
285 | |||
286 | TP_STRUCT__entry( | ||
287 | __field( unsigned short, sid_map_mask ) | ||
288 | __field( unsigned long long, gvsid ) | ||
289 | ), | ||
290 | |||
291 | TP_fast_assign( | ||
292 | __entry->sid_map_mask = sid_map_mask; | ||
293 | __entry->gvsid = gvsid; | ||
294 | ), | ||
295 | |||
296 | TP_printk("%x/%x: %llx", __entry->sid_map_mask, | ||
297 | SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid) | ||
298 | ); | ||
299 | |||
300 | TRACE_EVENT(kvm_book3s_slb_map, | ||
301 | TP_PROTO(u16 sid_map_mask, unsigned long long gvsid, | ||
302 | unsigned long long hvsid), | ||
303 | TP_ARGS(sid_map_mask, gvsid, hvsid), | ||
304 | |||
305 | TP_STRUCT__entry( | ||
306 | __field( unsigned short, sid_map_mask ) | ||
307 | __field( unsigned long long, guest_vsid ) | ||
308 | __field( unsigned long long, host_vsid ) | ||
309 | ), | ||
310 | |||
311 | TP_fast_assign( | ||
312 | __entry->sid_map_mask = sid_map_mask; | ||
313 | __entry->guest_vsid = gvsid; | ||
314 | __entry->host_vsid = hvsid; | ||
315 | ), | ||
316 | |||
317 | TP_printk("%x: %llx -> %llx", __entry->sid_map_mask, | ||
318 | __entry->guest_vsid, __entry->host_vsid) | ||
319 | ); | ||
320 | |||
321 | TRACE_EVENT(kvm_book3s_slbmte, | ||
322 | TP_PROTO(u64 slb_vsid, u64 slb_esid), | ||
323 | TP_ARGS(slb_vsid, slb_esid), | ||
324 | |||
325 | TP_STRUCT__entry( | ||
326 | __field( u64, slb_vsid ) | ||
327 | __field( u64, slb_esid ) | ||
328 | ), | ||
329 | |||
330 | TP_fast_assign( | ||
331 | __entry->slb_vsid = slb_vsid; | ||
332 | __entry->slb_esid = slb_esid; | ||
333 | ), | ||
334 | |||
335 | TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid) | ||
336 | ); | ||
337 | |||
338 | #endif /* CONFIG_PPC_BOOK3S */ | ||
339 | |||
101 | #endif /* _TRACE_KVM_H */ | 340 | #endif /* _TRACE_KVM_H */ |
102 | 341 | ||
103 | /* This part must be outside protection */ | 342 | /* This part must be outside protection */ |