aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2013-04-12 10:08:46 -0400
committerAlexander Graf <agraf@suse.de>2013-04-26 14:27:23 -0400
commit5df554ad5b7522ea62b0ff9d5be35183494efc21 (patch)
treeb00f569c1d7a684d3455ad59f52c7b5aa7ac04e7 /arch/powerpc/kvm
parentf0f5c481a91c56f1ee5b3809bf3943115143b1a7 (diff)
kvm/ppc/mpic: in-kernel MPIC emulation
Hook the MPIC code up to the KVM interfaces, add locking, etc. Signed-off-by: Scott Wood <scottwood@freescale.com> [agraf: add stub function for kvmppc_mpic_set_epr, non-booke, 64bit] Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/Kconfig9
-rw-r--r--arch/powerpc/kvm/Makefile2
-rw-r--r--arch/powerpc/kvm/booke.c8
-rw-r--r--arch/powerpc/kvm/mpic.c762
-rw-r--r--arch/powerpc/kvm/powerpc.c12
5 files changed, 594 insertions, 199 deletions
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 448952035b6e..f47e95e0b6de 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -151,6 +151,15 @@ config KVM_E500MC
151 151
152 If unsure, say N. 152 If unsure, say N.
153 153
154config KVM_MPIC
155 bool "KVM in-kernel MPIC emulation"
156 depends on KVM
157 help
158 Enable support for emulating MPIC devices inside the
159 host kernel, rather than relying on userspace to emulate.
160 Currently, support is limited to certain versions of
161 Freescale's MPIC implementation.
162
154source drivers/vhost/Kconfig 163source drivers/vhost/Kconfig
155 164
156endif # VIRTUALIZATION 165endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index b772eded8c26..4a2277a221bb 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -103,6 +103,8 @@ kvm-book3s_32-objs := \
103 book3s_32_mmu.o 103 book3s_32_mmu.o
104kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs) 104kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
105 105
106kvm-objs-$(CONFIG_KVM_MPIC) += mpic.o
107
106kvm-objs := $(kvm-objs-m) $(kvm-objs-y) 108kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
107 109
108obj-$(CONFIG_KVM_440) += kvm.o 110obj-$(CONFIG_KVM_440) += kvm.o
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 02756537cd90..4da11ed48c59 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -346,7 +346,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
346 keep_irq = true; 346 keep_irq = true;
347 } 347 }
348 348
349 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_enabled) 349 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
350 update_epr = true; 350 update_epr = true;
351 351
352 switch (priority) { 352 switch (priority) {
@@ -427,8 +427,10 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
427 set_guest_esr(vcpu, vcpu->arch.queued_esr); 427 set_guest_esr(vcpu, vcpu->arch.queued_esr);
428 if (update_dear == true) 428 if (update_dear == true)
429 set_guest_dear(vcpu, vcpu->arch.queued_dear); 429 set_guest_dear(vcpu, vcpu->arch.queued_dear);
430 if (update_epr == true) 430 if (update_epr == true) {
431 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu); 431 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
432 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
433 }
432 434
433 new_msr &= msr_mask; 435 new_msr &= msr_mask;
434#if defined(CONFIG_64BIT) 436#if defined(CONFIG_64BIT)
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index 1df67aed7a91..cb451b91e342 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -23,6 +23,19 @@
23 * THE SOFTWARE. 23 * THE SOFTWARE.
24 */ 24 */
25 25
26#include <linux/slab.h>
27#include <linux/mutex.h>
28#include <linux/kvm_host.h>
29#include <linux/errno.h>
30#include <linux/fs.h>
31#include <linux/anon_inodes.h>
32#include <asm/uaccess.h>
33#include <asm/mpic.h>
34#include <asm/kvm_para.h>
35#include <asm/kvm_host.h>
36#include <asm/kvm_ppc.h>
37#include "iodev.h"
38
26#define MAX_CPU 32 39#define MAX_CPU 32
27#define MAX_SRC 256 40#define MAX_SRC 256
28#define MAX_TMR 4 41#define MAX_TMR 4
@@ -36,6 +49,7 @@
36#define OPENPIC_FLAG_ILR (2 << 0) 49#define OPENPIC_FLAG_ILR (2 << 0)
37 50
38/* OpenPIC address map */ 51/* OpenPIC address map */
52#define OPENPIC_REG_SIZE 0x40000
39#define OPENPIC_GLB_REG_START 0x0 53#define OPENPIC_GLB_REG_START 0x0
40#define OPENPIC_GLB_REG_SIZE 0x10F0 54#define OPENPIC_GLB_REG_SIZE 0x10F0
41#define OPENPIC_TMR_REG_START 0x10F0 55#define OPENPIC_TMR_REG_START 0x10F0
@@ -89,6 +103,7 @@ static struct fsl_mpic_info fsl_mpic_42 = {
89#define ILR_INTTGT_INT 0x00 103#define ILR_INTTGT_INT 0x00
90#define ILR_INTTGT_CINT 0x01 /* critical */ 104#define ILR_INTTGT_CINT 0x01 /* critical */
91#define ILR_INTTGT_MCP 0x02 /* machine check */ 105#define ILR_INTTGT_MCP 0x02 /* machine check */
106#define NUM_OUTPUTS 3
92 107
93#define MSIIR_OFFSET 0x140 108#define MSIIR_OFFSET 0x140
94#define MSIIR_SRS_SHIFT 29 109#define MSIIR_SRS_SHIFT 29
@@ -98,18 +113,19 @@ static struct fsl_mpic_info fsl_mpic_42 = {
98 113
99static int get_current_cpu(void) 114static int get_current_cpu(void)
100{ 115{
101 CPUState *cpu_single_cpu; 116#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
102 117 struct kvm_vcpu *vcpu = current->thread.kvm_vcpu;
103 if (!cpu_single_env) 118 return vcpu ? vcpu->vcpu_id : -1;
104 return -1; 119#else
105 120 /* XXX */
106 cpu_single_cpu = ENV_GET_CPU(cpu_single_env); 121 return -1;
107 return cpu_single_cpu->cpu_index; 122#endif
108} 123}
109 124
110static uint32_t openpic_cpu_read_internal(void *opaque, gpa_t addr, int idx); 125static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
111static void openpic_cpu_write_internal(void *opaque, gpa_t addr, 126 u32 val, int idx);
112 uint32_t val, int idx); 127static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
128 u32 *ptr, int idx);
113 129
114enum irq_type { 130enum irq_type {
115 IRQ_TYPE_NORMAL = 0, 131 IRQ_TYPE_NORMAL = 0,
@@ -131,7 +147,7 @@ struct irq_source {
131 uint32_t idr; /* IRQ destination register */ 147 uint32_t idr; /* IRQ destination register */
132 uint32_t destmask; /* bitmap of CPU destinations */ 148 uint32_t destmask; /* bitmap of CPU destinations */
133 int last_cpu; 149 int last_cpu;
134 int output; /* IRQ level, e.g. OPENPIC_OUTPUT_INT */ 150 int output; /* IRQ level, e.g. ILR_INTTGT_INT */
135 int pending; /* TRUE if IRQ is pending */ 151 int pending; /* TRUE if IRQ is pending */
136 enum irq_type type; 152 enum irq_type type;
137 bool level:1; /* level-triggered */ 153 bool level:1; /* level-triggered */
@@ -158,16 +174,27 @@ struct irq_source {
158#define IDR_CI 0x40000000 /* critical interrupt */ 174#define IDR_CI 0x40000000 /* critical interrupt */
159 175
160struct irq_dest { 176struct irq_dest {
177 struct kvm_vcpu *vcpu;
178
161 int32_t ctpr; /* CPU current task priority */ 179 int32_t ctpr; /* CPU current task priority */
162 struct irq_queue raised; 180 struct irq_queue raised;
163 struct irq_queue servicing; 181 struct irq_queue servicing;
164 qemu_irq *irqs;
165 182
166 /* Count of IRQ sources asserting on non-INT outputs */ 183 /* Count of IRQ sources asserting on non-INT outputs */
167 uint32_t outputs_active[OPENPIC_OUTPUT_NB]; 184 uint32_t outputs_active[NUM_OUTPUTS];
168}; 185};
169 186
170struct openpic { 187struct openpic {
188 struct kvm *kvm;
189 struct kvm_device *dev;
190 struct kvm_io_device mmio;
191 struct list_head mmio_regions;
192 atomic_t users;
193 bool mmio_mapped;
194
195 gpa_t reg_base;
196 spinlock_t lock;
197
171 /* Behavior control */ 198 /* Behavior control */
172 struct fsl_mpic_info *fsl; 199 struct fsl_mpic_info *fsl;
173 uint32_t model; 200 uint32_t model;
@@ -208,6 +235,47 @@ struct openpic {
208 uint32_t irq_msi; 235 uint32_t irq_msi;
209}; 236};
210 237
238
239static void mpic_irq_raise(struct openpic *opp, struct irq_dest *dst,
240 int output)
241{
242 struct kvm_interrupt irq = {
243 .irq = KVM_INTERRUPT_SET_LEVEL,
244 };
245
246 if (!dst->vcpu) {
247 pr_debug("%s: destination cpu %d does not exist\n",
248 __func__, (int)(dst - &opp->dst[0]));
249 return;
250 }
251
252 pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->vcpu_id,
253 output);
254
255 if (output != ILR_INTTGT_INT) /* TODO */
256 return;
257
258 kvm_vcpu_ioctl_interrupt(dst->vcpu, &irq);
259}
260
261static void mpic_irq_lower(struct openpic *opp, struct irq_dest *dst,
262 int output)
263{
264 if (!dst->vcpu) {
265 pr_debug("%s: destination cpu %d does not exist\n",
266 __func__, (int)(dst - &opp->dst[0]));
267 return;
268 }
269
270 pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->vcpu_id,
271 output);
272
273 if (output != ILR_INTTGT_INT) /* TODO */
274 return;
275
276 kvmppc_core_dequeue_external(dst->vcpu);
277}
278
211static inline void IRQ_setbit(struct irq_queue *q, int n_IRQ) 279static inline void IRQ_setbit(struct irq_queue *q, int n_IRQ)
212{ 280{
213 set_bit(n_IRQ, q->queue); 281 set_bit(n_IRQ, q->queue);
@@ -268,7 +336,7 @@ static void IRQ_local_pipe(struct openpic *opp, int n_CPU, int n_IRQ,
268 pr_debug("%s: IRQ %d active %d was %d\n", 336 pr_debug("%s: IRQ %d active %d was %d\n",
269 __func__, n_IRQ, active, was_active); 337 __func__, n_IRQ, active, was_active);
270 338
271 if (src->output != OPENPIC_OUTPUT_INT) { 339 if (src->output != ILR_INTTGT_INT) {
272 pr_debug("%s: output %d irq %d active %d was %d count %d\n", 340 pr_debug("%s: output %d irq %d active %d was %d count %d\n",
273 __func__, src->output, n_IRQ, active, was_active, 341 __func__, src->output, n_IRQ, active, was_active,
274 dst->outputs_active[src->output]); 342 dst->outputs_active[src->output]);
@@ -282,14 +350,14 @@ static void IRQ_local_pipe(struct openpic *opp, int n_CPU, int n_IRQ,
282 dst->outputs_active[src->output]++ == 0) { 350 dst->outputs_active[src->output]++ == 0) {
283 pr_debug("%s: Raise OpenPIC output %d cpu %d irq %d\n", 351 pr_debug("%s: Raise OpenPIC output %d cpu %d irq %d\n",
284 __func__, src->output, n_CPU, n_IRQ); 352 __func__, src->output, n_CPU, n_IRQ);
285 qemu_irq_raise(dst->irqs[src->output]); 353 mpic_irq_raise(opp, dst, src->output);
286 } 354 }
287 } else { 355 } else {
288 if (was_active && 356 if (was_active &&
289 --dst->outputs_active[src->output] == 0) { 357 --dst->outputs_active[src->output] == 0) {
290 pr_debug("%s: Lower OpenPIC output %d cpu %d irq %d\n", 358 pr_debug("%s: Lower OpenPIC output %d cpu %d irq %d\n",
291 __func__, src->output, n_CPU, n_IRQ); 359 __func__, src->output, n_CPU, n_IRQ);
292 qemu_irq_lower(dst->irqs[src->output]); 360 mpic_irq_lower(opp, dst, src->output);
293 } 361 }
294 } 362 }
295 363
@@ -322,8 +390,7 @@ static void IRQ_local_pipe(struct openpic *opp, int n_CPU, int n_IRQ,
322 } else { 390 } else {
323 pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d/%d\n", 391 pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d/%d\n",
324 __func__, n_CPU, n_IRQ, dst->raised.next); 392 __func__, n_CPU, n_IRQ, dst->raised.next);
325 qemu_irq_raise(opp->dst[n_CPU]. 393 mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
326 irqs[OPENPIC_OUTPUT_INT]);
327 } 394 }
328 } else { 395 } else {
329 IRQ_get_next(opp, &dst->servicing); 396 IRQ_get_next(opp, &dst->servicing);
@@ -338,8 +405,7 @@ static void IRQ_local_pipe(struct openpic *opp, int n_CPU, int n_IRQ,
338 pr_debug("%s: IRQ %d inactive, current prio %d/%d, CPU %d\n", 405 pr_debug("%s: IRQ %d inactive, current prio %d/%d, CPU %d\n",
339 __func__, n_IRQ, dst->ctpr, 406 __func__, n_IRQ, dst->ctpr,
340 dst->servicing.priority, n_CPU); 407 dst->servicing.priority, n_CPU);
341 qemu_irq_lower(opp->dst[n_CPU]. 408 mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
342 irqs[OPENPIC_OUTPUT_INT]);
343 } 409 }
344 } 410 }
345} 411}
@@ -415,8 +481,8 @@ static void openpic_set_irq(void *opaque, int n_IRQ, int level)
415 struct irq_source *src; 481 struct irq_source *src;
416 482
417 if (n_IRQ >= MAX_IRQ) { 483 if (n_IRQ >= MAX_IRQ) {
418 pr_err("%s: IRQ %d out of range\n", __func__, n_IRQ); 484 WARN_ONCE(1, "%s: IRQ %d out of range\n", __func__, n_IRQ);
419 abort(); 485 return;
420 } 486 }
421 487
422 src = &opp->src[n_IRQ]; 488 src = &opp->src[n_IRQ];
@@ -433,7 +499,7 @@ static void openpic_set_irq(void *opaque, int n_IRQ, int level)
433 openpic_update_irq(opp, n_IRQ); 499 openpic_update_irq(opp, n_IRQ);
434 } 500 }
435 501
436 if (src->output != OPENPIC_OUTPUT_INT) { 502 if (src->output != ILR_INTTGT_INT) {
437 /* Edge-triggered interrupts shouldn't be used 503 /* Edge-triggered interrupts shouldn't be used
438 * with non-INT delivery, but just in case, 504 * with non-INT delivery, but just in case,
439 * try to make it do something sane rather than 505 * try to make it do something sane rather than
@@ -446,15 +512,13 @@ static void openpic_set_irq(void *opaque, int n_IRQ, int level)
446 } 512 }
447} 513}
448 514
449static void openpic_reset(DeviceState *d) 515static void openpic_reset(struct openpic *opp)
450{ 516{
451 struct openpic *opp = FROM_SYSBUS(typeof(*opp), SYS_BUS_DEVICE(d));
452 int i; 517 int i;
453 518
454 opp->gcr = GCR_RESET; 519 opp->gcr = GCR_RESET;
455 /* Initialise controller registers */ 520 /* Initialise controller registers */
456 opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) | 521 opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) |
457 ((opp->nb_cpus - 1) << FRR_NCPU_SHIFT) |
458 (opp->vid << FRR_VID_SHIFT); 522 (opp->vid << FRR_VID_SHIFT);
459 523
460 opp->pir = 0; 524 opp->pir = 0;
@@ -504,7 +568,7 @@ static inline uint32_t read_IRQreg_idr(struct openpic *opp, int n_IRQ)
504static inline uint32_t read_IRQreg_ilr(struct openpic *opp, int n_IRQ) 568static inline uint32_t read_IRQreg_ilr(struct openpic *opp, int n_IRQ)
505{ 569{
506 if (opp->flags & OPENPIC_FLAG_ILR) 570 if (opp->flags & OPENPIC_FLAG_ILR)
507 return output_to_inttgt(opp->src[n_IRQ].output); 571 return opp->src[n_IRQ].output;
508 572
509 return 0xffffffff; 573 return 0xffffffff;
510} 574}
@@ -539,7 +603,7 @@ static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ,
539 __func__); 603 __func__);
540 } 604 }
541 605
542 src->output = OPENPIC_OUTPUT_CINT; 606 src->output = ILR_INTTGT_CINT;
543 src->nomask = true; 607 src->nomask = true;
544 src->destmask = 0; 608 src->destmask = 0;
545 609
@@ -550,7 +614,7 @@ static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ,
550 src->destmask |= 1UL << i; 614 src->destmask |= 1UL << i;
551 } 615 }
552 } else { 616 } else {
553 src->output = OPENPIC_OUTPUT_INT; 617 src->output = ILR_INTTGT_INT;
554 src->nomask = false; 618 src->nomask = false;
555 src->destmask = src->idr & normal_mask; 619 src->destmask = src->idr & normal_mask;
556 } 620 }
@@ -565,7 +629,7 @@ static inline void write_IRQreg_ilr(struct openpic *opp, int n_IRQ,
565 if (opp->flags & OPENPIC_FLAG_ILR) { 629 if (opp->flags & OPENPIC_FLAG_ILR) {
566 struct irq_source *src = &opp->src[n_IRQ]; 630 struct irq_source *src = &opp->src[n_IRQ];
567 631
568 src->output = inttgt_to_output(val & ILR_INTTGT_MASK); 632 src->output = val & ILR_INTTGT_MASK;
569 pr_debug("Set ILR %d to 0x%08x, output %d\n", n_IRQ, src->idr, 633 pr_debug("Set ILR %d to 0x%08x, output %d\n", n_IRQ, src->idr,
570 src->output); 634 src->output);
571 635
@@ -614,34 +678,23 @@ static inline void write_IRQreg_ivpr(struct openpic *opp, int n_IRQ,
614 678
615static void openpic_gcr_write(struct openpic *opp, uint64_t val) 679static void openpic_gcr_write(struct openpic *opp, uint64_t val)
616{ 680{
617 bool mpic_proxy = false;
618
619 if (val & GCR_RESET) { 681 if (val & GCR_RESET) {
620 openpic_reset(&opp->busdev.qdev); 682 openpic_reset(opp);
621 return; 683 return;
622 } 684 }
623 685
624 opp->gcr &= ~opp->mpic_mode_mask; 686 opp->gcr &= ~opp->mpic_mode_mask;
625 opp->gcr |= val & opp->mpic_mode_mask; 687 opp->gcr |= val & opp->mpic_mode_mask;
626
627 /* Set external proxy mode */
628 if ((val & opp->mpic_mode_mask) == GCR_MODE_PROXY)
629 mpic_proxy = true;
630
631 ppce500_set_mpic_proxy(mpic_proxy);
632} 688}
633 689
634static void openpic_gbl_write(void *opaque, gpa_t addr, uint64_t val, 690static int openpic_gbl_write(void *opaque, gpa_t addr, u32 val)
635 unsigned len)
636{ 691{
637 struct openpic *opp = opaque; 692 struct openpic *opp = opaque;
638 struct irq_dest *dst; 693 int err = 0;
639 int idx;
640 694
641 pr_debug("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64 "\n", 695 pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
642 __func__, addr, val);
643 if (addr & 0xF) 696 if (addr & 0xF)
644 return; 697 return 0;
645 698
646 switch (addr) { 699 switch (addr) {
647 case 0x00: /* Block Revision Register1 (BRR1) is Readonly */ 700 case 0x00: /* Block Revision Register1 (BRR1) is Readonly */
@@ -654,7 +707,8 @@ static void openpic_gbl_write(void *opaque, gpa_t addr, uint64_t val,
654 case 0x90: 707 case 0x90:
655 case 0xA0: 708 case 0xA0:
656 case 0xB0: 709 case 0xB0:
657 openpic_cpu_write_internal(opp, addr, val, get_current_cpu()); 710 err = openpic_cpu_write_internal(opp, addr, val,
711 get_current_cpu());
658 break; 712 break;
659 case 0x1000: /* FRR */ 713 case 0x1000: /* FRR */
660 break; 714 break;
@@ -664,21 +718,11 @@ static void openpic_gbl_write(void *opaque, gpa_t addr, uint64_t val,
664 case 0x1080: /* VIR */ 718 case 0x1080: /* VIR */
665 break; 719 break;
666 case 0x1090: /* PIR */ 720 case 0x1090: /* PIR */
667 for (idx = 0; idx < opp->nb_cpus; idx++) { 721 /*
668 if ((val & (1 << idx)) && !(opp->pir & (1 << idx))) { 722 * This register is used to reset a CPU core --
669 pr_debug("Raise OpenPIC RESET output for CPU %d\n", 723 * let userspace handle it.
670 idx); 724 */
671 dst = &opp->dst[idx]; 725 err = -ENXIO;
672 qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_RESET]);
673 } else if (!(val & (1 << idx)) &&
674 (opp->pir & (1 << idx))) {
675 pr_debug("Lower OpenPIC RESET output for CPU %d\n",
676 idx);
677 dst = &opp->dst[idx];
678 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_RESET]);
679 }
680 }
681 opp->pir = val;
682 break; 726 break;
683 case 0x10A0: /* IPI_IVPR */ 727 case 0x10A0: /* IPI_IVPR */
684 case 0x10B0: 728 case 0x10B0:
@@ -695,21 +739,25 @@ static void openpic_gbl_write(void *opaque, gpa_t addr, uint64_t val,
695 default: 739 default:
696 break; 740 break;
697 } 741 }
742
743 return err;
698} 744}
699 745
700static uint64_t openpic_gbl_read(void *opaque, gpa_t addr, unsigned len) 746static int openpic_gbl_read(void *opaque, gpa_t addr, u32 *ptr)
701{ 747{
702 struct openpic *opp = opaque; 748 struct openpic *opp = opaque;
703 uint32_t retval; 749 u32 retval;
750 int err = 0;
704 751
705 pr_debug("%s: addr %#" HWADDR_PRIx "\n", __func__, addr); 752 pr_debug("%s: addr %#llx\n", __func__, addr);
706 retval = 0xFFFFFFFF; 753 retval = 0xFFFFFFFF;
707 if (addr & 0xF) 754 if (addr & 0xF)
708 return retval; 755 goto out;
709 756
710 switch (addr) { 757 switch (addr) {
711 case 0x1000: /* FRR */ 758 case 0x1000: /* FRR */
712 retval = opp->frr; 759 retval = opp->frr;
760 retval |= (opp->nb_cpus - 1) << FRR_NCPU_SHIFT;
713 break; 761 break;
714 case 0x1020: /* GCR */ 762 case 0x1020: /* GCR */
715 retval = opp->gcr; 763 retval = opp->gcr;
@@ -731,8 +779,8 @@ static uint64_t openpic_gbl_read(void *opaque, gpa_t addr, unsigned len)
731 case 0x90: 779 case 0x90:
732 case 0xA0: 780 case 0xA0:
733 case 0xB0: 781 case 0xB0:
734 retval = 782 err = openpic_cpu_read_internal(opp, addr,
735 openpic_cpu_read_internal(opp, addr, get_current_cpu()); 783 &retval, get_current_cpu());
736 break; 784 break;
737 case 0x10A0: /* IPI_IVPR */ 785 case 0x10A0: /* IPI_IVPR */
738 case 0x10B0: 786 case 0x10B0:
@@ -750,28 +798,28 @@ static uint64_t openpic_gbl_read(void *opaque, gpa_t addr, unsigned len)
750 default: 798 default:
751 break; 799 break;
752 } 800 }
753 pr_debug("%s: => 0x%08x\n", __func__, retval);
754 801
755 return retval; 802out:
803 pr_debug("%s: => 0x%08x\n", __func__, retval);
804 *ptr = retval;
805 return err;
756} 806}
757 807
758static void openpic_tmr_write(void *opaque, gpa_t addr, uint64_t val, 808static int openpic_tmr_write(void *opaque, gpa_t addr, u32 val)
759 unsigned len)
760{ 809{
761 struct openpic *opp = opaque; 810 struct openpic *opp = opaque;
762 int idx; 811 int idx;
763 812
764 addr += 0x10f0; 813 addr += 0x10f0;
765 814
766 pr_debug("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64 "\n", 815 pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
767 __func__, addr, val);
768 if (addr & 0xF) 816 if (addr & 0xF)
769 return; 817 return 0;
770 818
771 if (addr == 0x10f0) { 819 if (addr == 0x10f0) {
772 /* TFRR */ 820 /* TFRR */
773 opp->tfrr = val; 821 opp->tfrr = val;
774 return; 822 return 0;
775 } 823 }
776 824
777 idx = (addr >> 6) & 0x3; 825 idx = (addr >> 6) & 0x3;
@@ -795,15 +843,17 @@ static void openpic_tmr_write(void *opaque, gpa_t addr, uint64_t val,
795 write_IRQreg_idr(opp, opp->irq_tim0 + idx, val); 843 write_IRQreg_idr(opp, opp->irq_tim0 + idx, val);
796 break; 844 break;
797 } 845 }
846
847 return 0;
798} 848}
799 849
800static uint64_t openpic_tmr_read(void *opaque, gpa_t addr, unsigned len) 850static int openpic_tmr_read(void *opaque, gpa_t addr, u32 *ptr)
801{ 851{
802 struct openpic *opp = opaque; 852 struct openpic *opp = opaque;
803 uint32_t retval = -1; 853 uint32_t retval = -1;
804 int idx; 854 int idx;
805 855
806 pr_debug("%s: addr %#" HWADDR_PRIx "\n", __func__, addr); 856 pr_debug("%s: addr %#llx\n", __func__, addr);
807 if (addr & 0xF) 857 if (addr & 0xF)
808 goto out; 858 goto out;
809 859
@@ -813,6 +863,7 @@ static uint64_t openpic_tmr_read(void *opaque, gpa_t addr, unsigned len)
813 retval = opp->tfrr; 863 retval = opp->tfrr;
814 goto out; 864 goto out;
815 } 865 }
866
816 switch (addr & 0x30) { 867 switch (addr & 0x30) {
817 case 0x00: /* TCCR */ 868 case 0x00: /* TCCR */
818 retval = opp->timers[idx].tccr; 869 retval = opp->timers[idx].tccr;
@@ -830,18 +881,16 @@ static uint64_t openpic_tmr_read(void *opaque, gpa_t addr, unsigned len)
830 881
831out: 882out:
832 pr_debug("%s: => 0x%08x\n", __func__, retval); 883 pr_debug("%s: => 0x%08x\n", __func__, retval);
833 884 *ptr = retval;
834 return retval; 885 return 0;
835} 886}
836 887
837static void openpic_src_write(void *opaque, gpa_t addr, uint64_t val, 888static int openpic_src_write(void *opaque, gpa_t addr, u32 val)
838 unsigned len)
839{ 889{
840 struct openpic *opp = opaque; 890 struct openpic *opp = opaque;
841 int idx; 891 int idx;
842 892
843 pr_debug("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64 "\n", 893 pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
844 __func__, addr, val);
845 894
846 addr = addr & 0xffff; 895 addr = addr & 0xffff;
847 idx = addr >> 5; 896 idx = addr >> 5;
@@ -857,15 +906,17 @@ static void openpic_src_write(void *opaque, gpa_t addr, uint64_t val,
857 write_IRQreg_ilr(opp, idx, val); 906 write_IRQreg_ilr(opp, idx, val);
858 break; 907 break;
859 } 908 }
909
910 return 0;
860} 911}
861 912
862static uint64_t openpic_src_read(void *opaque, uint64_t addr, unsigned len) 913static int openpic_src_read(void *opaque, gpa_t addr, u32 *ptr)
863{ 914{
864 struct openpic *opp = opaque; 915 struct openpic *opp = opaque;
865 uint32_t retval; 916 uint32_t retval;
866 int idx; 917 int idx;
867 918
868 pr_debug("%s: addr %#" HWADDR_PRIx "\n", __func__, addr); 919 pr_debug("%s: addr %#llx\n", __func__, addr);
869 retval = 0xFFFFFFFF; 920 retval = 0xFFFFFFFF;
870 921
871 addr = addr & 0xffff; 922 addr = addr & 0xffff;
@@ -884,20 +935,19 @@ static uint64_t openpic_src_read(void *opaque, uint64_t addr, unsigned len)
884 } 935 }
885 936
886 pr_debug("%s: => 0x%08x\n", __func__, retval); 937 pr_debug("%s: => 0x%08x\n", __func__, retval);
887 return retval; 938 *ptr = retval;
939 return 0;
888} 940}
889 941
890static void openpic_msi_write(void *opaque, gpa_t addr, uint64_t val, 942static int openpic_msi_write(void *opaque, gpa_t addr, u32 val)
891 unsigned size)
892{ 943{
893 struct openpic *opp = opaque; 944 struct openpic *opp = opaque;
894 int idx = opp->irq_msi; 945 int idx = opp->irq_msi;
895 int srs, ibs; 946 int srs, ibs;
896 947
897 pr_debug("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64 "\n", 948 pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val);
898 __func__, addr, val);
899 if (addr & 0xF) 949 if (addr & 0xF)
900 return; 950 return 0;
901 951
902 switch (addr) { 952 switch (addr) {
903 case MSIIR_OFFSET: 953 case MSIIR_OFFSET:
@@ -911,17 +961,19 @@ static void openpic_msi_write(void *opaque, gpa_t addr, uint64_t val,
911 /* most registers are read-only, thus ignored */ 961 /* most registers are read-only, thus ignored */
912 break; 962 break;
913 } 963 }
964
965 return 0;
914} 966}
915 967
916static uint64_t openpic_msi_read(void *opaque, gpa_t addr, unsigned size) 968static int openpic_msi_read(void *opaque, gpa_t addr, u32 *ptr)
917{ 969{
918 struct openpic *opp = opaque; 970 struct openpic *opp = opaque;
919 uint64_t r = 0; 971 uint32_t r = 0;
920 int i, srs; 972 int i, srs;
921 973
922 pr_debug("%s: addr %#" HWADDR_PRIx "\n", __func__, addr); 974 pr_debug("%s: addr %#llx\n", __func__, addr);
923 if (addr & 0xF) 975 if (addr & 0xF)
924 return -1; 976 return -ENXIO;
925 977
926 srs = addr >> 4; 978 srs = addr >> 4;
927 979
@@ -945,45 +997,47 @@ static uint64_t openpic_msi_read(void *opaque, gpa_t addr, unsigned size)
945 break; 997 break;
946 } 998 }
947 999
948 return r; 1000 pr_debug("%s: => 0x%08x\n", __func__, r);
1001 *ptr = r;
1002 return 0;
949} 1003}
950 1004
951static uint64_t openpic_summary_read(void *opaque, gpa_t addr, unsigned size) 1005static int openpic_summary_read(void *opaque, gpa_t addr, u32 *ptr)
952{ 1006{
953 uint64_t r = 0; 1007 uint32_t r = 0;
954 1008
955 pr_debug("%s: addr %#" HWADDR_PRIx "\n", __func__, addr); 1009 pr_debug("%s: addr %#llx\n", __func__, addr);
956 1010
957 /* TODO: EISR/EIMR */ 1011 /* TODO: EISR/EIMR */
958 1012
959 return r; 1013 *ptr = r;
1014 return 0;
960} 1015}
961 1016
962static void openpic_summary_write(void *opaque, gpa_t addr, uint64_t val, 1017static int openpic_summary_write(void *opaque, gpa_t addr, u32 val)
963 unsigned size)
964{ 1018{
965 pr_debug("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64 "\n", 1019 pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val);
966 __func__, addr, val);
967 1020
968 /* TODO: EISR/EIMR */ 1021 /* TODO: EISR/EIMR */
1022 return 0;
969} 1023}
970 1024
971static void openpic_cpu_write_internal(void *opaque, gpa_t addr, 1025static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
972 uint32_t val, int idx) 1026 u32 val, int idx)
973{ 1027{
974 struct openpic *opp = opaque; 1028 struct openpic *opp = opaque;
975 struct irq_source *src; 1029 struct irq_source *src;
976 struct irq_dest *dst; 1030 struct irq_dest *dst;
977 int s_IRQ, n_IRQ; 1031 int s_IRQ, n_IRQ;
978 1032
979 pr_debug("%s: cpu %d addr %#" HWADDR_PRIx " <= 0x%08x\n", __func__, idx, 1033 pr_debug("%s: cpu %d addr %#llx <= 0x%08x\n", __func__, idx,
980 addr, val); 1034 addr, val);
981 1035
982 if (idx < 0) 1036 if (idx < 0)
983 return; 1037 return 0;
984 1038
985 if (addr & 0xF) 1039 if (addr & 0xF)
986 return; 1040 return 0;
987 1041
988 dst = &opp->dst[idx]; 1042 dst = &opp->dst[idx];
989 addr &= 0xFF0; 1043 addr &= 0xFF0;
@@ -1008,11 +1062,11 @@ static void openpic_cpu_write_internal(void *opaque, gpa_t addr,
1008 if (dst->raised.priority <= dst->ctpr) { 1062 if (dst->raised.priority <= dst->ctpr) {
1009 pr_debug("%s: Lower OpenPIC INT output cpu %d due to ctpr\n", 1063 pr_debug("%s: Lower OpenPIC INT output cpu %d due to ctpr\n",
1010 __func__, idx); 1064 __func__, idx);
1011 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]); 1065 mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
1012 } else if (dst->raised.priority > dst->servicing.priority) { 1066 } else if (dst->raised.priority > dst->servicing.priority) {
1013 pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d\n", 1067 pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d\n",
1014 __func__, idx, dst->raised.next); 1068 __func__, idx, dst->raised.next);
1015 qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_INT]); 1069 mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
1016 } 1070 }
1017 1071
1018 break; 1072 break;
@@ -1043,18 +1097,22 @@ static void openpic_cpu_write_internal(void *opaque, gpa_t addr,
1043 IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) { 1097 IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) {
1044 pr_debug("Raise OpenPIC INT output cpu %d irq %d\n", 1098 pr_debug("Raise OpenPIC INT output cpu %d irq %d\n",
1045 idx, n_IRQ); 1099 idx, n_IRQ);
1046 qemu_irq_raise(opp->dst[idx].irqs[OPENPIC_OUTPUT_INT]); 1100 mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
1047 } 1101 }
1048 break; 1102 break;
1049 default: 1103 default:
1050 break; 1104 break;
1051 } 1105 }
1106
1107 return 0;
1052} 1108}
1053 1109
1054static void openpic_cpu_write(void *opaque, gpa_t addr, uint64_t val, 1110static int openpic_cpu_write(void *opaque, gpa_t addr, u32 val)
1055 unsigned len)
1056{ 1111{
1057 openpic_cpu_write_internal(opaque, addr, val, (addr & 0x1f000) >> 12); 1112 struct openpic *opp = opaque;
1113
1114 return openpic_cpu_write_internal(opp, addr, val,
1115 (addr & 0x1f000) >> 12);
1058} 1116}
1059 1117
1060static uint32_t openpic_iack(struct openpic *opp, struct irq_dest *dst, 1118static uint32_t openpic_iack(struct openpic *opp, struct irq_dest *dst,
@@ -1064,7 +1122,7 @@ static uint32_t openpic_iack(struct openpic *opp, struct irq_dest *dst,
1064 int retval, irq; 1122 int retval, irq;
1065 1123
1066 pr_debug("Lower OpenPIC INT output\n"); 1124 pr_debug("Lower OpenPIC INT output\n");
1067 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]); 1125 mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
1068 1126
1069 irq = IRQ_get_next(opp, &dst->raised); 1127 irq = IRQ_get_next(opp, &dst->raised);
1070 pr_debug("IACK: irq=%d\n", irq); 1128 pr_debug("IACK: irq=%d\n", irq);
@@ -1107,20 +1165,21 @@ static uint32_t openpic_iack(struct openpic *opp, struct irq_dest *dst,
1107 return retval; 1165 return retval;
1108} 1166}
1109 1167
1110static uint32_t openpic_cpu_read_internal(void *opaque, gpa_t addr, int idx) 1168static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
1169 u32 *ptr, int idx)
1111{ 1170{
1112 struct openpic *opp = opaque; 1171 struct openpic *opp = opaque;
1113 struct irq_dest *dst; 1172 struct irq_dest *dst;
1114 uint32_t retval; 1173 uint32_t retval;
1115 1174
1116 pr_debug("%s: cpu %d addr %#" HWADDR_PRIx "\n", __func__, idx, addr); 1175 pr_debug("%s: cpu %d addr %#llx\n", __func__, idx, addr);
1117 retval = 0xFFFFFFFF; 1176 retval = 0xFFFFFFFF;
1118 1177
1119 if (idx < 0) 1178 if (idx < 0)
1120 return retval; 1179 goto out;
1121 1180
1122 if (addr & 0xF) 1181 if (addr & 0xF)
1123 return retval; 1182 goto out;
1124 1183
1125 dst = &opp->dst[idx]; 1184 dst = &opp->dst[idx];
1126 addr &= 0xFF0; 1185 addr &= 0xFF0;
@@ -1142,49 +1201,67 @@ static uint32_t openpic_cpu_read_internal(void *opaque, gpa_t addr, int idx)
1142 } 1201 }
1143 pr_debug("%s: => 0x%08x\n", __func__, retval); 1202 pr_debug("%s: => 0x%08x\n", __func__, retval);
1144 1203
1145 return retval; 1204out:
1205 *ptr = retval;
1206 return 0;
1146} 1207}
1147 1208
1148static uint64_t openpic_cpu_read(void *opaque, gpa_t addr, unsigned len) 1209static int openpic_cpu_read(void *opaque, gpa_t addr, u32 *ptr)
1149{ 1210{
1150 return openpic_cpu_read_internal(opaque, addr, (addr & 0x1f000) >> 12); 1211 struct openpic *opp = opaque;
1212
1213 return openpic_cpu_read_internal(opp, addr, ptr,
1214 (addr & 0x1f000) >> 12);
1151} 1215}
1152 1216
1153static const struct kvm_io_device_ops openpic_glb_ops_be = { 1217struct mem_reg {
1218 struct list_head list;
1219 int (*read)(void *opaque, gpa_t addr, u32 *ptr);
1220 int (*write)(void *opaque, gpa_t addr, u32 val);
1221 gpa_t start_addr;
1222 int size;
1223};
1224
1225static struct mem_reg openpic_gbl_mmio = {
1154 .write = openpic_gbl_write, 1226 .write = openpic_gbl_write,
1155 .read = openpic_gbl_read, 1227 .read = openpic_gbl_read,
1228 .start_addr = OPENPIC_GLB_REG_START,
1229 .size = OPENPIC_GLB_REG_SIZE,
1156}; 1230};
1157 1231
1158static const struct kvm_io_device_ops openpic_tmr_ops_be = { 1232static struct mem_reg openpic_tmr_mmio = {
1159 .write = openpic_tmr_write, 1233 .write = openpic_tmr_write,
1160 .read = openpic_tmr_read, 1234 .read = openpic_tmr_read,
1235 .start_addr = OPENPIC_TMR_REG_START,
1236 .size = OPENPIC_TMR_REG_SIZE,
1161}; 1237};
1162 1238
1163static const struct kvm_io_device_ops openpic_cpu_ops_be = { 1239static struct mem_reg openpic_cpu_mmio = {
1164 .write = openpic_cpu_write, 1240 .write = openpic_cpu_write,
1165 .read = openpic_cpu_read, 1241 .read = openpic_cpu_read,
1242 .start_addr = OPENPIC_CPU_REG_START,
1243 .size = OPENPIC_CPU_REG_SIZE,
1166}; 1244};
1167 1245
1168static const struct kvm_io_device_ops openpic_src_ops_be = { 1246static struct mem_reg openpic_src_mmio = {
1169 .write = openpic_src_write, 1247 .write = openpic_src_write,
1170 .read = openpic_src_read, 1248 .read = openpic_src_read,
1249 .start_addr = OPENPIC_SRC_REG_START,
1250 .size = OPENPIC_SRC_REG_SIZE,
1171}; 1251};
1172 1252
1173static const struct kvm_io_device_ops openpic_msi_ops_be = { 1253static struct mem_reg openpic_msi_mmio = {
1174 .read = openpic_msi_read, 1254 .read = openpic_msi_read,
1175 .write = openpic_msi_write, 1255 .write = openpic_msi_write,
1256 .start_addr = OPENPIC_MSI_REG_START,
1257 .size = OPENPIC_MSI_REG_SIZE,
1176}; 1258};
1177 1259
1178static const struct kvm_io_device_ops openpic_summary_ops_be = { 1260static struct mem_reg openpic_summary_mmio = {
1179 .read = openpic_summary_read, 1261 .read = openpic_summary_read,
1180 .write = openpic_summary_write, 1262 .write = openpic_summary_write,
1181}; 1263 .start_addr = OPENPIC_SUMMARY_REG_START,
1182 1264 .size = OPENPIC_SUMMARY_REG_SIZE,
1183struct mem_reg {
1184 const char *name;
1185 const struct kvm_io_device_ops *ops;
1186 gpa_t start_addr;
1187 int size;
1188}; 1265};
1189 1266
1190static void fsl_common_init(struct openpic *opp) 1267static void fsl_common_init(struct openpic *opp)
@@ -1192,6 +1269,9 @@ static void fsl_common_init(struct openpic *opp)
1192 int i; 1269 int i;
1193 int virq = MAX_SRC; 1270 int virq = MAX_SRC;
1194 1271
1272 list_add(&openpic_msi_mmio.list, &opp->mmio_regions);
1273 list_add(&openpic_summary_mmio.list, &opp->mmio_regions);
1274
1195 opp->vid = VID_REVISION_1_2; 1275 opp->vid = VID_REVISION_1_2;
1196 opp->vir = VIR_GENERIC; 1276 opp->vir = VIR_GENERIC;
1197 opp->vector_mask = 0xFFFF; 1277 opp->vector_mask = 0xFFFF;
@@ -1205,11 +1285,10 @@ static void fsl_common_init(struct openpic *opp)
1205 opp->irq_tim0 = virq; 1285 opp->irq_tim0 = virq;
1206 virq += MAX_TMR; 1286 virq += MAX_TMR;
1207 1287
1208 assert(virq <= MAX_IRQ); 1288 BUG_ON(virq > MAX_IRQ);
1209 1289
1210 opp->irq_msi = 224; 1290 opp->irq_msi = 224;
1211 1291
1212 msi_supported = true;
1213 for (i = 0; i < opp->fsl->max_ext; i++) 1292 for (i = 0; i < opp->fsl->max_ext; i++)
1214 opp->src[i].level = false; 1293 opp->src[i].level = false;
1215 1294
@@ -1226,63 +1305,352 @@ static void fsl_common_init(struct openpic *opp)
1226 } 1305 }
1227} 1306}
1228 1307
1229static void map_list(struct openpic *opp, const struct mem_reg *list, 1308static int kvm_mpic_read_internal(struct openpic *opp, gpa_t addr, u32 *ptr)
1230 int *count)
1231{ 1309{
1232 while (list->name) { 1310 struct list_head *node;
1233 assert(*count < ARRAY_SIZE(opp->sub_io_mem));
1234 1311
1235 memory_region_init_io(&opp->sub_io_mem[*count], list->ops, opp, 1312 list_for_each(node, &opp->mmio_regions) {
1236 list->name, list->size); 1313 struct mem_reg *mr = list_entry(node, struct mem_reg, list);
1237 1314
1238 memory_region_add_subregion(&opp->mem, list->start_addr, 1315 if (mr->start_addr > addr || addr >= mr->start_addr + mr->size)
1239 &opp->sub_io_mem[*count]); 1316 continue;
1240 1317
1241 (*count)++; 1318 return mr->read(opp, addr - mr->start_addr, ptr);
1242 list++;
1243 } 1319 }
1320
1321 return -ENXIO;
1244} 1322}
1245 1323
1246static int openpic_init(SysBusDevice *dev) 1324static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val)
1247{ 1325{
1248 struct openpic *opp = FROM_SYSBUS(typeof(*opp), dev); 1326 struct list_head *node;
1249 int i, j; 1327
1250 int list_count = 0; 1328 list_for_each(node, &opp->mmio_regions) {
1251 static const struct mem_reg list_le[] = { 1329 struct mem_reg *mr = list_entry(node, struct mem_reg, list);
1252 {"glb", &openpic_glb_ops_le, 1330
1253 OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE}, 1331 if (mr->start_addr > addr || addr >= mr->start_addr + mr->size)
1254 {"tmr", &openpic_tmr_ops_le, 1332 continue;
1255 OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE},
1256 {"src", &openpic_src_ops_le,
1257 OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE},
1258 {"cpu", &openpic_cpu_ops_le,
1259 OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE},
1260 {NULL}
1261 };
1262 static const struct mem_reg list_be[] = {
1263 {"glb", &openpic_glb_ops_be,
1264 OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE},
1265 {"tmr", &openpic_tmr_ops_be,
1266 OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE},
1267 {"src", &openpic_src_ops_be,
1268 OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE},
1269 {"cpu", &openpic_cpu_ops_be,
1270 OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE},
1271 {NULL}
1272 };
1273 static const struct mem_reg list_fsl[] = {
1274 {"msi", &openpic_msi_ops_be,
1275 OPENPIC_MSI_REG_START, OPENPIC_MSI_REG_SIZE},
1276 {"summary", &openpic_summary_ops_be,
1277 OPENPIC_SUMMARY_REG_START, OPENPIC_SUMMARY_REG_SIZE},
1278 {NULL}
1279 };
1280 1333
1281 memory_region_init(&opp->mem, "openpic", 0x40000); 1334 return mr->write(opp, addr - mr->start_addr, val);
1335 }
1336
1337 return -ENXIO;
1338}
1339
1340static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
1341 int len, void *ptr)
1342{
1343 struct openpic *opp = container_of(this, struct openpic, mmio);
1344 int ret;
1345 union {
1346 u32 val;
1347 u8 bytes[4];
1348 } u;
1349
1350 if (addr & (len - 1)) {
1351 pr_debug("%s: bad alignment %llx/%d\n",
1352 __func__, addr, len);
1353 return -EINVAL;
1354 }
1355
1356 spin_lock_irq(&opp->lock);
1357 ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val);
1358 spin_unlock_irq(&opp->lock);
1359
1360 /*
1361 * Technically only 32-bit accesses are allowed, but be nice to
1362 * people dumping registers a byte at a time -- it works in real
1363 * hardware (reads only, not writes).
1364 */
1365 if (len == 4) {
1366 *(u32 *)ptr = u.val;
1367 pr_debug("%s: addr %llx ret %d len 4 val %x\n",
1368 __func__, addr, ret, u.val);
1369 } else if (len == 1) {
1370 *(u8 *)ptr = u.bytes[addr & 3];
1371 pr_debug("%s: addr %llx ret %d len 1 val %x\n",
1372 __func__, addr, ret, u.bytes[addr & 3]);
1373 } else {
1374 pr_debug("%s: bad length %d\n", __func__, len);
1375 return -EINVAL;
1376 }
1377
1378 return ret;
1379}
1380
1381static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr,
1382 int len, const void *ptr)
1383{
1384 struct openpic *opp = container_of(this, struct openpic, mmio);
1385 int ret;
1386
1387 if (len != 4) {
1388 pr_debug("%s: bad length %d\n", __func__, len);
1389 return -EOPNOTSUPP;
1390 }
1391 if (addr & 3) {
1392 pr_debug("%s: bad alignment %llx/%d\n", __func__, addr, len);
1393 return -EOPNOTSUPP;
1394 }
1395
1396 spin_lock_irq(&opp->lock);
1397 ret = kvm_mpic_write_internal(opp, addr - opp->reg_base,
1398 *(const u32 *)ptr);
1399 spin_unlock_irq(&opp->lock);
1400
1401 pr_debug("%s: addr %llx ret %d val %x\n",
1402 __func__, addr, ret, *(const u32 *)ptr);
1403
1404 return ret;
1405}
1406
1407static void kvm_mpic_dtor(struct kvm_io_device *this)
1408{
1409 struct openpic *opp = container_of(this, struct openpic, mmio);
1410
1411 opp->mmio_mapped = false;
1412}
1413
1414static const struct kvm_io_device_ops mpic_mmio_ops = {
1415 .read = kvm_mpic_read,
1416 .write = kvm_mpic_write,
1417 .destructor = kvm_mpic_dtor,
1418};
1419
1420static void map_mmio(struct openpic *opp)
1421{
1422 BUG_ON(opp->mmio_mapped);
1423 opp->mmio_mapped = true;
1424
1425 kvm_iodevice_init(&opp->mmio, &mpic_mmio_ops);
1426
1427 kvm_io_bus_register_dev(opp->kvm, KVM_MMIO_BUS,
1428 opp->reg_base, OPENPIC_REG_SIZE,
1429 &opp->mmio);
1430}
1431
1432static void unmap_mmio(struct openpic *opp)
1433{
1434 BUG_ON(opp->mmio_mapped);
1435 opp->mmio_mapped = false;
1436
1437 kvm_io_bus_unregister_dev(opp->kvm, KVM_MMIO_BUS, &opp->mmio);
1438}
1439
1440static int set_base_addr(struct openpic *opp, struct kvm_device_attr *attr)
1441{
1442 u64 base;
1443
1444 if (copy_from_user(&base, (u64 __user *)(long)attr->addr, sizeof(u64)))
1445 return -EFAULT;
1446
1447 if (base & 0x3ffff) {
1448 pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx not aligned\n",
1449 __func__, base);
1450 return -EINVAL;
1451 }
1452
1453 if (base == opp->reg_base)
1454 return 0;
1455
1456 mutex_lock(&opp->kvm->slots_lock);
1457
1458 unmap_mmio(opp);
1459 opp->reg_base = base;
1460
1461 pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx\n",
1462 __func__, base);
1463
1464 if (base == 0)
1465 goto out;
1466
1467 map_mmio(opp);
1468
1469 mutex_unlock(&opp->kvm->slots_lock);
1470out:
1471 return 0;
1472}
1473
1474#define ATTR_SET 0
1475#define ATTR_GET 1
1476
1477static int access_reg(struct openpic *opp, gpa_t addr, u32 *val, int type)
1478{
1479 int ret;
1480
1481 if (addr & 3)
1482 return -ENXIO;
1483
1484 spin_lock_irq(&opp->lock);
1485
1486 if (type == ATTR_SET)
1487 ret = kvm_mpic_write_internal(opp, addr, *val);
1488 else
1489 ret = kvm_mpic_read_internal(opp, addr, val);
1490
1491 spin_unlock_irq(&opp->lock);
1492
1493 pr_debug("%s: type %d addr %llx val %x\n", __func__, type, addr, *val);
1494
1495 return ret;
1496}
1497
1498static int mpic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1499{
1500 struct openpic *opp = dev->private;
1501 u32 attr32;
1502
1503 switch (attr->group) {
1504 case KVM_DEV_MPIC_GRP_MISC:
1505 switch (attr->attr) {
1506 case KVM_DEV_MPIC_BASE_ADDR:
1507 return set_base_addr(opp, attr);
1508 }
1509
1510 break;
1511
1512 case KVM_DEV_MPIC_GRP_REGISTER:
1513 if (get_user(attr32, (u32 __user *)(long)attr->addr))
1514 return -EFAULT;
1515
1516 return access_reg(opp, attr->attr, &attr32, ATTR_SET);
1517
1518 case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
1519 if (attr->attr > MAX_SRC)
1520 return -EINVAL;
1521
1522 if (get_user(attr32, (u32 __user *)(long)attr->addr))
1523 return -EFAULT;
1524
1525 if (attr32 != 0 && attr32 != 1)
1526 return -EINVAL;
1527
1528 spin_lock_irq(&opp->lock);
1529 openpic_set_irq(opp, attr->attr, attr32);
1530 spin_unlock_irq(&opp->lock);
1531 return 0;
1532 }
1533
1534 return -ENXIO;
1535}
1536
1537static int mpic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1538{
1539 struct openpic *opp = dev->private;
1540 u64 attr64;
1541 u32 attr32;
1542 int ret;
1543
1544 switch (attr->group) {
1545 case KVM_DEV_MPIC_GRP_MISC:
1546 switch (attr->attr) {
1547 case KVM_DEV_MPIC_BASE_ADDR:
1548 mutex_lock(&opp->kvm->slots_lock);
1549 attr64 = opp->reg_base;
1550 mutex_unlock(&opp->kvm->slots_lock);
1551
1552 if (copy_to_user((u64 __user *)(long)attr->addr,
1553 &attr64, sizeof(u64)))
1554 return -EFAULT;
1555
1556 return 0;
1557 }
1558
1559 break;
1560
1561 case KVM_DEV_MPIC_GRP_REGISTER:
1562 ret = access_reg(opp, attr->attr, &attr32, ATTR_GET);
1563 if (ret)
1564 return ret;
1565
1566 if (put_user(attr32, (u32 __user *)(long)attr->addr))
1567 return -EFAULT;
1568
1569 return 0;
1570
1571 case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
1572 if (attr->attr > MAX_SRC)
1573 return -EINVAL;
1574
1575 spin_lock_irq(&opp->lock);
1576 attr32 = opp->src[attr->attr].pending;
1577 spin_unlock_irq(&opp->lock);
1578
1579 if (put_user(attr32, (u32 __user *)(long)attr->addr))
1580 return -EFAULT;
1581
1582 return 0;
1583 }
1584
1585 return -ENXIO;
1586}
1587
1588static int mpic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1589{
1590 switch (attr->group) {
1591 case KVM_DEV_MPIC_GRP_MISC:
1592 switch (attr->attr) {
1593 case KVM_DEV_MPIC_BASE_ADDR:
1594 return 0;
1595 }
1596
1597 break;
1598
1599 case KVM_DEV_MPIC_GRP_REGISTER:
1600 return 0;
1601
1602 case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
1603 if (attr->attr > MAX_SRC)
1604 break;
1605
1606 return 0;
1607 }
1608
1609 return -ENXIO;
1610}
1611
1612static void mpic_destroy(struct kvm_device *dev)
1613{
1614 struct openpic *opp = dev->private;
1615
1616 if (opp->mmio_mapped) {
1617 /*
1618 * Normally we get unmapped by kvm_io_bus_destroy(),
1619 * which happens before the VCPUs release their references.
1620 *
1621 * Thus, we should only get here if no VCPUs took a reference
1622 * to us in the first place.
1623 */
1624 WARN_ON(opp->nb_cpus != 0);
1625 unmap_mmio(opp);
1626 }
1627
1628 kfree(opp);
1629}
1630
1631static int mpic_create(struct kvm_device *dev, u32 type)
1632{
1633 struct openpic *opp;
1634 int ret;
1635
1636 opp = kzalloc(sizeof(struct openpic), GFP_KERNEL);
1637 if (!opp)
1638 return -ENOMEM;
1639
1640 dev->private = opp;
1641 opp->kvm = dev->kvm;
1642 opp->dev = dev;
1643 opp->model = type;
1644 spin_lock_init(&opp->lock);
1645
1646 INIT_LIST_HEAD(&opp->mmio_regions);
1647 list_add(&openpic_gbl_mmio.list, &opp->mmio_regions);
1648 list_add(&openpic_tmr_mmio.list, &opp->mmio_regions);
1649 list_add(&openpic_src_mmio.list, &opp->mmio_regions);
1650 list_add(&openpic_cpu_mmio.list, &opp->mmio_regions);
1282 1651
1283 switch (opp->model) { 1652 switch (opp->model) {
1284 case OPENPIC_MODEL_FSL_MPIC_20: 1653 case KVM_DEV_TYPE_FSL_MPIC_20:
1285 default:
1286 opp->fsl = &fsl_mpic_20; 1654 opp->fsl = &fsl_mpic_20;
1287 opp->brr1 = 0x00400200; 1655 opp->brr1 = 0x00400200;
1288 opp->flags |= OPENPIC_FLAG_IDR_CRIT; 1656 opp->flags |= OPENPIC_FLAG_IDR_CRIT;
@@ -1290,12 +1658,10 @@ static int openpic_init(SysBusDevice *dev)
1290 opp->mpic_mode_mask = GCR_MODE_MIXED; 1658 opp->mpic_mode_mask = GCR_MODE_MIXED;
1291 1659
1292 fsl_common_init(opp); 1660 fsl_common_init(opp);
1293 map_list(opp, list_be, &list_count);
1294 map_list(opp, list_fsl, &list_count);
1295 1661
1296 break; 1662 break;
1297 1663
1298 case OPENPIC_MODEL_FSL_MPIC_42: 1664 case KVM_DEV_TYPE_FSL_MPIC_42:
1299 opp->fsl = &fsl_mpic_42; 1665 opp->fsl = &fsl_mpic_42;
1300 opp->brr1 = 0x00400402; 1666 opp->brr1 = 0x00400402;
1301 opp->flags |= OPENPIC_FLAG_ILR; 1667 opp->flags |= OPENPIC_FLAG_ILR;
@@ -1303,11 +1669,27 @@ static int openpic_init(SysBusDevice *dev)
1303 opp->mpic_mode_mask = GCR_MODE_PROXY; 1669 opp->mpic_mode_mask = GCR_MODE_PROXY;
1304 1670
1305 fsl_common_init(opp); 1671 fsl_common_init(opp);
1306 map_list(opp, list_be, &list_count);
1307 map_list(opp, list_fsl, &list_count);
1308 1672
1309 break; 1673 break;
1674
1675 default:
1676 ret = -ENODEV;
1677 goto err;
1310 } 1678 }
1311 1679
1680 openpic_reset(opp);
1312 return 0; 1681 return 0;
1682
1683err:
1684 kfree(opp);
1685 return ret;
1313} 1686}
1687
1688struct kvm_device_ops kvm_mpic_ops = {
1689 .name = "kvm-mpic",
1690 .create = mpic_create,
1691 .destroy = mpic_destroy,
1692 .set_attr = mpic_set_attr,
1693 .get_attr = mpic_get_attr,
1694 .has_attr = mpic_has_attr,
1695};
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 6b8108624851..88d69cf1f953 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -317,6 +317,7 @@ int kvm_dev_ioctl_check_extension(long ext)
317 case KVM_CAP_ENABLE_CAP: 317 case KVM_CAP_ENABLE_CAP:
318 case KVM_CAP_ONE_REG: 318 case KVM_CAP_ONE_REG:
319 case KVM_CAP_IOEVENTFD: 319 case KVM_CAP_IOEVENTFD:
320 case KVM_CAP_DEVICE_CTRL:
320 r = 1; 321 r = 1;
321 break; 322 break;
322#ifndef CONFIG_KVM_BOOK3S_64_HV 323#ifndef CONFIG_KVM_BOOK3S_64_HV
@@ -762,7 +763,10 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
762 break; 763 break;
763 case KVM_CAP_PPC_EPR: 764 case KVM_CAP_PPC_EPR:
764 r = 0; 765 r = 0;
765 vcpu->arch.epr_enabled = cap->args[0]; 766 if (cap->args[0])
767 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
768 else
769 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
766 break; 770 break;
767#ifdef CONFIG_BOOKE 771#ifdef CONFIG_BOOKE
768 case KVM_CAP_PPC_BOOKE_WATCHDOG: 772 case KVM_CAP_PPC_BOOKE_WATCHDOG:
@@ -908,6 +912,7 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
908long kvm_arch_vm_ioctl(struct file *filp, 912long kvm_arch_vm_ioctl(struct file *filp,
909 unsigned int ioctl, unsigned long arg) 913 unsigned int ioctl, unsigned long arg)
910{ 914{
915 struct kvm *kvm __maybe_unused = filp->private_data;
911 void __user *argp = (void __user *)arg; 916 void __user *argp = (void __user *)arg;
912 long r; 917 long r;
913 918
@@ -926,7 +931,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
926#ifdef CONFIG_PPC_BOOK3S_64 931#ifdef CONFIG_PPC_BOOK3S_64
927 case KVM_CREATE_SPAPR_TCE: { 932 case KVM_CREATE_SPAPR_TCE: {
928 struct kvm_create_spapr_tce create_tce; 933 struct kvm_create_spapr_tce create_tce;
929 struct kvm *kvm = filp->private_data;
930 934
931 r = -EFAULT; 935 r = -EFAULT;
932 if (copy_from_user(&create_tce, argp, sizeof(create_tce))) 936 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
@@ -938,7 +942,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
938 942
939#ifdef CONFIG_KVM_BOOK3S_64_HV 943#ifdef CONFIG_KVM_BOOK3S_64_HV
940 case KVM_ALLOCATE_RMA: { 944 case KVM_ALLOCATE_RMA: {
941 struct kvm *kvm = filp->private_data;
942 struct kvm_allocate_rma rma; 945 struct kvm_allocate_rma rma;
943 946
944 r = kvm_vm_ioctl_allocate_rma(kvm, &rma); 947 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
@@ -948,7 +951,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
948 } 951 }
949 952
950 case KVM_PPC_ALLOCATE_HTAB: { 953 case KVM_PPC_ALLOCATE_HTAB: {
951 struct kvm *kvm = filp->private_data;
952 u32 htab_order; 954 u32 htab_order;
953 955
954 r = -EFAULT; 956 r = -EFAULT;
@@ -965,7 +967,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
965 } 967 }
966 968
967 case KVM_PPC_GET_HTAB_FD: { 969 case KVM_PPC_GET_HTAB_FD: {
968 struct kvm *kvm = filp->private_data;
969 struct kvm_get_htab_fd ghf; 970 struct kvm_get_htab_fd ghf;
970 971
971 r = -EFAULT; 972 r = -EFAULT;
@@ -978,7 +979,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
978 979
979#ifdef CONFIG_PPC_BOOK3S_64 980#ifdef CONFIG_PPC_BOOK3S_64
980 case KVM_PPC_GET_SMMU_INFO: { 981 case KVM_PPC_GET_SMMU_INFO: {
981 struct kvm *kvm = filp->private_data;
982 struct kvm_ppc_smmu_info info; 982 struct kvm_ppc_smmu_info info;
983 983
984 memset(&info, 0, sizeof(info)); 984 memset(&info, 0, sizeof(info));