diff options
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 14 | ||||
-rw-r--r-- | arch/arm/kvm/interrupts_head.S | 26 | ||||
-rw-r--r-- | arch/arm64/kernel/asm-offsets.c | 14 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp.S | 26 | ||||
-rw-r--r-- | include/kvm/arm_vgic.h | 20 | ||||
-rw-r--r-- | virt/kvm/arm/vgic.c | 56 |
6 files changed, 81 insertions, 75 deletions
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 85598b5d1efd..713e807621d2 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -182,13 +182,13 @@ int main(void) | |||
182 | DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); | 182 | DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); |
183 | #ifdef CONFIG_KVM_ARM_VGIC | 183 | #ifdef CONFIG_KVM_ARM_VGIC |
184 | DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); | 184 | DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); |
185 | DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); | 185 | DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); |
186 | DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr)); | 186 | DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); |
187 | DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr)); | 187 | DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr)); |
188 | DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr)); | 188 | DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr)); |
189 | DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr)); | 189 | DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr)); |
190 | DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr)); | 190 | DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); |
191 | DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr)); | 191 | DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); |
192 | DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); | 192 | DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); |
193 | #ifdef CONFIG_KVM_ARM_TIMER | 193 | #ifdef CONFIG_KVM_ARM_TIMER |
194 | DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); | 194 | DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); |
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index 76af93025574..e4eaf30205c5 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S | |||
@@ -421,14 +421,14 @@ vcpu .req r0 @ vcpu pointer always in r0 | |||
421 | ldr r9, [r2, #GICH_ELRSR1] | 421 | ldr r9, [r2, #GICH_ELRSR1] |
422 | ldr r10, [r2, #GICH_APR] | 422 | ldr r10, [r2, #GICH_APR] |
423 | 423 | ||
424 | str r3, [r11, #VGIC_CPU_HCR] | 424 | str r3, [r11, #VGIC_V2_CPU_HCR] |
425 | str r4, [r11, #VGIC_CPU_VMCR] | 425 | str r4, [r11, #VGIC_V2_CPU_VMCR] |
426 | str r5, [r11, #VGIC_CPU_MISR] | 426 | str r5, [r11, #VGIC_V2_CPU_MISR] |
427 | str r6, [r11, #VGIC_CPU_EISR] | 427 | str r6, [r11, #VGIC_V2_CPU_EISR] |
428 | str r7, [r11, #(VGIC_CPU_EISR + 4)] | 428 | str r7, [r11, #(VGIC_V2_CPU_EISR + 4)] |
429 | str r8, [r11, #VGIC_CPU_ELRSR] | 429 | str r8, [r11, #VGIC_V2_CPU_ELRSR] |
430 | str r9, [r11, #(VGIC_CPU_ELRSR + 4)] | 430 | str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)] |
431 | str r10, [r11, #VGIC_CPU_APR] | 431 | str r10, [r11, #VGIC_V2_CPU_APR] |
432 | 432 | ||
433 | /* Clear GICH_HCR */ | 433 | /* Clear GICH_HCR */ |
434 | mov r5, #0 | 434 | mov r5, #0 |
@@ -436,7 +436,7 @@ vcpu .req r0 @ vcpu pointer always in r0 | |||
436 | 436 | ||
437 | /* Save list registers */ | 437 | /* Save list registers */ |
438 | add r2, r2, #GICH_LR0 | 438 | add r2, r2, #GICH_LR0 |
439 | add r3, r11, #VGIC_CPU_LR | 439 | add r3, r11, #VGIC_V2_CPU_LR |
440 | ldr r4, [r11, #VGIC_CPU_NR_LR] | 440 | ldr r4, [r11, #VGIC_CPU_NR_LR] |
441 | 1: ldr r6, [r2], #4 | 441 | 1: ldr r6, [r2], #4 |
442 | str r6, [r3], #4 | 442 | str r6, [r3], #4 |
@@ -463,9 +463,9 @@ vcpu .req r0 @ vcpu pointer always in r0 | |||
463 | add r11, vcpu, #VCPU_VGIC_CPU | 463 | add r11, vcpu, #VCPU_VGIC_CPU |
464 | 464 | ||
465 | /* We only restore a minimal set of registers */ | 465 | /* We only restore a minimal set of registers */ |
466 | ldr r3, [r11, #VGIC_CPU_HCR] | 466 | ldr r3, [r11, #VGIC_V2_CPU_HCR] |
467 | ldr r4, [r11, #VGIC_CPU_VMCR] | 467 | ldr r4, [r11, #VGIC_V2_CPU_VMCR] |
468 | ldr r8, [r11, #VGIC_CPU_APR] | 468 | ldr r8, [r11, #VGIC_V2_CPU_APR] |
469 | 469 | ||
470 | str r3, [r2, #GICH_HCR] | 470 | str r3, [r2, #GICH_HCR] |
471 | str r4, [r2, #GICH_VMCR] | 471 | str r4, [r2, #GICH_VMCR] |
@@ -473,7 +473,7 @@ vcpu .req r0 @ vcpu pointer always in r0 | |||
473 | 473 | ||
474 | /* Restore list registers */ | 474 | /* Restore list registers */ |
475 | add r2, r2, #GICH_LR0 | 475 | add r2, r2, #GICH_LR0 |
476 | add r3, r11, #VGIC_CPU_LR | 476 | add r3, r11, #VGIC_V2_CPU_LR |
477 | ldr r4, [r11, #VGIC_CPU_NR_LR] | 477 | ldr r4, [r11, #VGIC_CPU_NR_LR] |
478 | 1: ldr r6, [r3], #4 | 478 | 1: ldr r6, [r3], #4 |
479 | str r6, [r2], #4 | 479 | str r6, [r2], #4 |
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 646f888387cd..20fd4887aab6 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c | |||
@@ -129,13 +129,13 @@ int main(void) | |||
129 | DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); | 129 | DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); |
130 | DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); | 130 | DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); |
131 | DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); | 131 | DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); |
132 | DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); | 132 | DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); |
133 | DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr)); | 133 | DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); |
134 | DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr)); | 134 | DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr)); |
135 | DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr)); | 135 | DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr)); |
136 | DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr)); | 136 | DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr)); |
137 | DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr)); | 137 | DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); |
138 | DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr)); | 138 | DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); |
139 | DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); | 139 | DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); |
140 | DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); | 140 | DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); |
141 | DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); | 141 | DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); |
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index b0d1512acf08..877d82a134bc 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S | |||
@@ -412,14 +412,14 @@ CPU_BE( rev w9, w9 ) | |||
412 | CPU_BE( rev w10, w10 ) | 412 | CPU_BE( rev w10, w10 ) |
413 | CPU_BE( rev w11, w11 ) | 413 | CPU_BE( rev w11, w11 ) |
414 | 414 | ||
415 | str w4, [x3, #VGIC_CPU_HCR] | 415 | str w4, [x3, #VGIC_V2_CPU_HCR] |
416 | str w5, [x3, #VGIC_CPU_VMCR] | 416 | str w5, [x3, #VGIC_V2_CPU_VMCR] |
417 | str w6, [x3, #VGIC_CPU_MISR] | 417 | str w6, [x3, #VGIC_V2_CPU_MISR] |
418 | str w7, [x3, #VGIC_CPU_EISR] | 418 | str w7, [x3, #VGIC_V2_CPU_EISR] |
419 | str w8, [x3, #(VGIC_CPU_EISR + 4)] | 419 | str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] |
420 | str w9, [x3, #VGIC_CPU_ELRSR] | 420 | str w9, [x3, #VGIC_V2_CPU_ELRSR] |
421 | str w10, [x3, #(VGIC_CPU_ELRSR + 4)] | 421 | str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] |
422 | str w11, [x3, #VGIC_CPU_APR] | 422 | str w11, [x3, #VGIC_V2_CPU_APR] |
423 | 423 | ||
424 | /* Clear GICH_HCR */ | 424 | /* Clear GICH_HCR */ |
425 | str wzr, [x2, #GICH_HCR] | 425 | str wzr, [x2, #GICH_HCR] |
@@ -427,7 +427,7 @@ CPU_BE( rev w11, w11 ) | |||
427 | /* Save list registers */ | 427 | /* Save list registers */ |
428 | add x2, x2, #GICH_LR0 | 428 | add x2, x2, #GICH_LR0 |
429 | ldr w4, [x3, #VGIC_CPU_NR_LR] | 429 | ldr w4, [x3, #VGIC_CPU_NR_LR] |
430 | add x3, x3, #VGIC_CPU_LR | 430 | add x3, x3, #VGIC_V2_CPU_LR |
431 | 1: ldr w5, [x2], #4 | 431 | 1: ldr w5, [x2], #4 |
432 | CPU_BE( rev w5, w5 ) | 432 | CPU_BE( rev w5, w5 ) |
433 | str w5, [x3], #4 | 433 | str w5, [x3], #4 |
@@ -452,9 +452,9 @@ CPU_BE( rev w5, w5 ) | |||
452 | add x3, x0, #VCPU_VGIC_CPU | 452 | add x3, x0, #VCPU_VGIC_CPU |
453 | 453 | ||
454 | /* We only restore a minimal set of registers */ | 454 | /* We only restore a minimal set of registers */ |
455 | ldr w4, [x3, #VGIC_CPU_HCR] | 455 | ldr w4, [x3, #VGIC_V2_CPU_HCR] |
456 | ldr w5, [x3, #VGIC_CPU_VMCR] | 456 | ldr w5, [x3, #VGIC_V2_CPU_VMCR] |
457 | ldr w6, [x3, #VGIC_CPU_APR] | 457 | ldr w6, [x3, #VGIC_V2_CPU_APR] |
458 | CPU_BE( rev w4, w4 ) | 458 | CPU_BE( rev w4, w4 ) |
459 | CPU_BE( rev w5, w5 ) | 459 | CPU_BE( rev w5, w5 ) |
460 | CPU_BE( rev w6, w6 ) | 460 | CPU_BE( rev w6, w6 ) |
@@ -466,7 +466,7 @@ CPU_BE( rev w6, w6 ) | |||
466 | /* Restore list registers */ | 466 | /* Restore list registers */ |
467 | add x2, x2, #GICH_LR0 | 467 | add x2, x2, #GICH_LR0 |
468 | ldr w4, [x3, #VGIC_CPU_NR_LR] | 468 | ldr w4, [x3, #VGIC_CPU_NR_LR] |
469 | add x3, x3, #VGIC_CPU_LR | 469 | add x3, x3, #VGIC_V2_CPU_LR |
470 | 1: ldr w5, [x3], #4 | 470 | 1: ldr w5, [x3], #4 |
471 | CPU_BE( rev w5, w5 ) | 471 | CPU_BE( rev w5, w5 ) |
472 | str w5, [x2], #4 | 472 | str w5, [x2], #4 |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index f27000f55a83..f738e5a69ee9 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -110,6 +110,16 @@ struct vgic_dist { | |||
110 | #endif | 110 | #endif |
111 | }; | 111 | }; |
112 | 112 | ||
113 | struct vgic_v2_cpu_if { | ||
114 | u32 vgic_hcr; | ||
115 | u32 vgic_vmcr; | ||
116 | u32 vgic_misr; /* Saved only */ | ||
117 | u32 vgic_eisr[2]; /* Saved only */ | ||
118 | u32 vgic_elrsr[2]; /* Saved only */ | ||
119 | u32 vgic_apr; | ||
120 | u32 vgic_lr[VGIC_MAX_LRS]; | ||
121 | }; | ||
122 | |||
113 | struct vgic_cpu { | 123 | struct vgic_cpu { |
114 | #ifdef CONFIG_KVM_ARM_VGIC | 124 | #ifdef CONFIG_KVM_ARM_VGIC |
115 | /* per IRQ to LR mapping */ | 125 | /* per IRQ to LR mapping */ |
@@ -126,13 +136,9 @@ struct vgic_cpu { | |||
126 | int nr_lr; | 136 | int nr_lr; |
127 | 137 | ||
128 | /* CPU vif control registers for world switch */ | 138 | /* CPU vif control registers for world switch */ |
129 | u32 vgic_hcr; | 139 | union { |
130 | u32 vgic_vmcr; | 140 | struct vgic_v2_cpu_if vgic_v2; |
131 | u32 vgic_misr; /* Saved only */ | 141 | }; |
132 | u32 vgic_eisr[2]; /* Saved only */ | ||
133 | u32 vgic_elrsr[2]; /* Saved only */ | ||
134 | u32 vgic_apr; | ||
135 | u32 vgic_lr[VGIC_MAX_LRS]; | ||
136 | #endif | 142 | #endif |
137 | }; | 143 | }; |
138 | 144 | ||
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 56ff9bebb577..0ba1ab0721fd 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -601,7 +601,7 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu, | |||
601 | static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu) | 601 | static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu) |
602 | { | 602 | { |
603 | clear_bit(lr_nr, vgic_cpu->lr_used); | 603 | clear_bit(lr_nr, vgic_cpu->lr_used); |
604 | vgic_cpu->vgic_lr[lr_nr] &= ~GICH_LR_STATE; | 604 | vgic_cpu->vgic_v2.vgic_lr[lr_nr] &= ~GICH_LR_STATE; |
605 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; | 605 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; |
606 | } | 606 | } |
607 | 607 | ||
@@ -626,7 +626,7 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) | |||
626 | u32 *lr; | 626 | u32 *lr; |
627 | 627 | ||
628 | for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) { | 628 | for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) { |
629 | lr = &vgic_cpu->vgic_lr[i]; | 629 | lr = &vgic_cpu->vgic_v2.vgic_lr[i]; |
630 | irq = LR_IRQID(*lr); | 630 | irq = LR_IRQID(*lr); |
631 | source_cpu = LR_CPUID(*lr); | 631 | source_cpu = LR_CPUID(*lr); |
632 | 632 | ||
@@ -1007,7 +1007,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) | |||
1007 | int lr; | 1007 | int lr; |
1008 | 1008 | ||
1009 | for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) { | 1009 | for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) { |
1010 | int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; | 1010 | int irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID; |
1011 | 1011 | ||
1012 | if (!vgic_irq_is_enabled(vcpu, irq)) { | 1012 | if (!vgic_irq_is_enabled(vcpu, irq)) { |
1013 | vgic_retire_lr(lr, irq, vgic_cpu); | 1013 | vgic_retire_lr(lr, irq, vgic_cpu); |
@@ -1037,11 +1037,11 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
1037 | 1037 | ||
1038 | /* Do we have an active interrupt for the same CPUID? */ | 1038 | /* Do we have an active interrupt for the same CPUID? */ |
1039 | if (lr != LR_EMPTY && | 1039 | if (lr != LR_EMPTY && |
1040 | (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) { | 1040 | (LR_CPUID(vgic_cpu->vgic_v2.vgic_lr[lr]) == sgi_source_id)) { |
1041 | kvm_debug("LR%d piggyback for IRQ%d %x\n", | 1041 | kvm_debug("LR%d piggyback for IRQ%d %x\n", |
1042 | lr, irq, vgic_cpu->vgic_lr[lr]); | 1042 | lr, irq, vgic_cpu->vgic_v2.vgic_lr[lr]); |
1043 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); | 1043 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); |
1044 | vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; | 1044 | vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_PENDING_BIT; |
1045 | return true; | 1045 | return true; |
1046 | } | 1046 | } |
1047 | 1047 | ||
@@ -1052,12 +1052,12 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
1052 | return false; | 1052 | return false; |
1053 | 1053 | ||
1054 | kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id); | 1054 | kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id); |
1055 | vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq); | 1055 | vgic_cpu->vgic_v2.vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq); |
1056 | vgic_cpu->vgic_irq_lr_map[irq] = lr; | 1056 | vgic_cpu->vgic_irq_lr_map[irq] = lr; |
1057 | set_bit(lr, vgic_cpu->lr_used); | 1057 | set_bit(lr, vgic_cpu->lr_used); |
1058 | 1058 | ||
1059 | if (!vgic_irq_is_edge(vcpu, irq)) | 1059 | if (!vgic_irq_is_edge(vcpu, irq)) |
1060 | vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; | 1060 | vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_EOI; |
1061 | 1061 | ||
1062 | return true; | 1062 | return true; |
1063 | } | 1063 | } |
@@ -1155,9 +1155,9 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
1155 | 1155 | ||
1156 | epilog: | 1156 | epilog: |
1157 | if (overflow) { | 1157 | if (overflow) { |
1158 | vgic_cpu->vgic_hcr |= GICH_HCR_UIE; | 1158 | vgic_cpu->vgic_v2.vgic_hcr |= GICH_HCR_UIE; |
1159 | } else { | 1159 | } else { |
1160 | vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; | 1160 | vgic_cpu->vgic_v2.vgic_hcr &= ~GICH_HCR_UIE; |
1161 | /* | 1161 | /* |
1162 | * We're about to run this VCPU, and we've consumed | 1162 | * We're about to run this VCPU, and we've consumed |
1163 | * everything the distributor had in store for | 1163 | * everything the distributor had in store for |
@@ -1173,21 +1173,21 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1173 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1173 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
1174 | bool level_pending = false; | 1174 | bool level_pending = false; |
1175 | 1175 | ||
1176 | kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); | 1176 | kvm_debug("MISR = %08x\n", vgic_cpu->vgic_v2.vgic_misr); |
1177 | 1177 | ||
1178 | if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { | 1178 | if (vgic_cpu->vgic_v2.vgic_misr & GICH_MISR_EOI) { |
1179 | /* | 1179 | /* |
1180 | * Some level interrupts have been EOIed. Clear their | 1180 | * Some level interrupts have been EOIed. Clear their |
1181 | * active bit. | 1181 | * active bit. |
1182 | */ | 1182 | */ |
1183 | int lr, irq; | 1183 | int lr, irq; |
1184 | 1184 | ||
1185 | for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr, | 1185 | for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_eisr, |
1186 | vgic_cpu->nr_lr) { | 1186 | vgic_cpu->nr_lr) { |
1187 | irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; | 1187 | irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID; |
1188 | 1188 | ||
1189 | vgic_irq_clear_active(vcpu, irq); | 1189 | vgic_irq_clear_active(vcpu, irq); |
1190 | vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI; | 1190 | vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_EOI; |
1191 | 1191 | ||
1192 | /* Any additional pending interrupt? */ | 1192 | /* Any additional pending interrupt? */ |
1193 | if (vgic_dist_irq_is_pending(vcpu, irq)) { | 1193 | if (vgic_dist_irq_is_pending(vcpu, irq)) { |
@@ -1201,13 +1201,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1201 | * Despite being EOIed, the LR may not have | 1201 | * Despite being EOIed, the LR may not have |
1202 | * been marked as empty. | 1202 | * been marked as empty. |
1203 | */ | 1203 | */ |
1204 | set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); | 1204 | set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr); |
1205 | vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; | 1205 | vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; |
1206 | } | 1206 | } |
1207 | } | 1207 | } |
1208 | 1208 | ||
1209 | if (vgic_cpu->vgic_misr & GICH_MISR_U) | 1209 | if (vgic_cpu->vgic_v2.vgic_misr & GICH_MISR_U) |
1210 | vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; | 1210 | vgic_cpu->vgic_v2.vgic_hcr &= ~GICH_HCR_UIE; |
1211 | 1211 | ||
1212 | return level_pending; | 1212 | return level_pending; |
1213 | } | 1213 | } |
@@ -1226,21 +1226,21 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | |||
1226 | level_pending = vgic_process_maintenance(vcpu); | 1226 | level_pending = vgic_process_maintenance(vcpu); |
1227 | 1227 | ||
1228 | /* Clear mappings for empty LRs */ | 1228 | /* Clear mappings for empty LRs */ |
1229 | for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr, | 1229 | for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr, |
1230 | vgic_cpu->nr_lr) { | 1230 | vgic_cpu->nr_lr) { |
1231 | int irq; | 1231 | int irq; |
1232 | 1232 | ||
1233 | if (!test_and_clear_bit(lr, vgic_cpu->lr_used)) | 1233 | if (!test_and_clear_bit(lr, vgic_cpu->lr_used)) |
1234 | continue; | 1234 | continue; |
1235 | 1235 | ||
1236 | irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; | 1236 | irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID; |
1237 | 1237 | ||
1238 | BUG_ON(irq >= VGIC_NR_IRQS); | 1238 | BUG_ON(irq >= VGIC_NR_IRQS); |
1239 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; | 1239 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; |
1240 | } | 1240 | } |
1241 | 1241 | ||
1242 | /* Check if we still have something up our sleeve... */ | 1242 | /* Check if we still have something up our sleeve... */ |
1243 | pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr, | 1243 | pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr, |
1244 | vgic_cpu->nr_lr); | 1244 | vgic_cpu->nr_lr); |
1245 | if (level_pending || pending < vgic_cpu->nr_lr) | 1245 | if (level_pending || pending < vgic_cpu->nr_lr) |
1246 | set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); | 1246 | set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); |
@@ -1436,10 +1436,10 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
1436 | * points to their reset values. Anything else resets to zero | 1436 | * points to their reset values. Anything else resets to zero |
1437 | * anyway. | 1437 | * anyway. |
1438 | */ | 1438 | */ |
1439 | vgic_cpu->vgic_vmcr = 0; | 1439 | vgic_cpu->vgic_v2.vgic_vmcr = 0; |
1440 | 1440 | ||
1441 | vgic_cpu->nr_lr = vgic_nr_lr; | 1441 | vgic_cpu->nr_lr = vgic_nr_lr; |
1442 | vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */ | 1442 | vgic_cpu->vgic_v2.vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */ |
1443 | 1443 | ||
1444 | return 0; | 1444 | return 0; |
1445 | } | 1445 | } |
@@ -1746,15 +1746,15 @@ static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu, | |||
1746 | } | 1746 | } |
1747 | 1747 | ||
1748 | if (!mmio->is_write) { | 1748 | if (!mmio->is_write) { |
1749 | reg = (vgic_cpu->vgic_vmcr & mask) >> shift; | 1749 | reg = (vgic_cpu->vgic_v2.vgic_vmcr & mask) >> shift; |
1750 | mmio_data_write(mmio, ~0, reg); | 1750 | mmio_data_write(mmio, ~0, reg); |
1751 | } else { | 1751 | } else { |
1752 | reg = mmio_data_read(mmio, ~0); | 1752 | reg = mmio_data_read(mmio, ~0); |
1753 | reg = (reg << shift) & mask; | 1753 | reg = (reg << shift) & mask; |
1754 | if (reg != (vgic_cpu->vgic_vmcr & mask)) | 1754 | if (reg != (vgic_cpu->vgic_v2.vgic_vmcr & mask)) |
1755 | updated = true; | 1755 | updated = true; |
1756 | vgic_cpu->vgic_vmcr &= ~mask; | 1756 | vgic_cpu->vgic_v2.vgic_vmcr &= ~mask; |
1757 | vgic_cpu->vgic_vmcr |= reg; | 1757 | vgic_cpu->vgic_v2.vgic_vmcr |= reg; |
1758 | } | 1758 | } |
1759 | return updated; | 1759 | return updated; |
1760 | } | 1760 | } |