aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-06-11 14:07:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-06-11 14:07:25 -0400
commit9d0eb4624601ac978b9e89be4aeadbd51ab2c830 (patch)
treeac1d331cff77291d1bbb4ee2f7b41078c72c8b05
parent5faab9e0f03c4eef97886b45436015e107f79f5f (diff)
parent9bc1f09f6fa76fdf31eb7d6a4a4df43574725f93 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "Bug fixes (ARM, s390, x86)" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: async_pf: avoid async pf injection when in guest mode KVM: cpuid: Fix read/write out-of-bounds vulnerability in cpuid emulation arm: KVM: Allow unaligned accesses at HYP arm64: KVM: Allow unaligned accesses at EL2 arm64: KVM: Preserve RES1 bits in SCTLR_EL2 KVM: arm/arm64: Handle possible NULL stage2 pud when ageing pages KVM: nVMX: Fix exception injection kvm: async_pf: fix rcu_irq_enter() with irqs enabled KVM: arm/arm64: vgic-v3: Fix nr_pre_bits bitfield extraction KVM: s390: fix ais handling vs cpu model KVM: arm/arm64: Fix isues with GICv2 on GICv3 migration
-rw-r--r--arch/arm/kvm/init.S5
-rw-r--r--arch/arm64/include/asm/sysreg.h4
-rw-r--r--arch/arm64/kvm/hyp-init.S11
-rw-r--r--arch/arm64/kvm/vgic-sys-reg-v3.c10
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/kvm/interrupt.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kvm/cpuid.c20
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/kvm/mmu.h1
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--include/linux/irqchip/arm-gic-v3.h4
-rw-r--r--include/linux/irqchip/arm-gic.h28
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c2
-rw-r--r--virt/kvm/arm/mmu.c3
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c16
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c28
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c47
-rw-r--r--virt/kvm/arm/vgic/vgic.h12
21 files changed, 153 insertions, 59 deletions
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 570ed4a9c261..5386528665b5 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -104,7 +104,6 @@ __do_hyp_init:
104 @ - Write permission implies XN: disabled 104 @ - Write permission implies XN: disabled
105 @ - Instruction cache: enabled 105 @ - Instruction cache: enabled
106 @ - Data/Unified cache: enabled 106 @ - Data/Unified cache: enabled
107 @ - Memory alignment checks: enabled
108 @ - MMU: enabled (this code must be run from an identity mapping) 107 @ - MMU: enabled (this code must be run from an identity mapping)
109 mrc p15, 4, r0, c1, c0, 0 @ HSCR 108 mrc p15, 4, r0, c1, c0, 0 @ HSCR
110 ldr r2, =HSCTLR_MASK 109 ldr r2, =HSCTLR_MASK
@@ -112,8 +111,8 @@ __do_hyp_init:
112 mrc p15, 0, r1, c1, c0, 0 @ SCTLR 111 mrc p15, 0, r1, c1, c0, 0 @ SCTLR
113 ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) 112 ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
114 and r1, r1, r2 113 and r1, r1, r2
115 ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) ) 114 ARM( ldr r2, =(HSCTLR_M) )
116 THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) 115 THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
117 orr r1, r1, r2 116 orr r1, r1, r2
118 orr r0, r0, r1 117 orr r0, r0, r1
119 mcr p15, 4, r0, c1, c0, 0 @ HSCR 118 mcr p15, 4, r0, c1, c0, 0 @ HSCR
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 15c142ce991c..b4d13d9267ff 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -286,6 +286,10 @@
286#define SCTLR_ELx_A (1 << 1) 286#define SCTLR_ELx_A (1 << 1)
287#define SCTLR_ELx_M 1 287#define SCTLR_ELx_M 1
288 288
289#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
290 (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
291 (1 << 28) | (1 << 29))
292
289#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ 293#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
290 SCTLR_ELx_SA | SCTLR_ELx_I) 294 SCTLR_ELx_SA | SCTLR_ELx_I)
291 295
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 839425c24b1c..3f9615582377 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -106,10 +106,13 @@ __do_hyp_init:
106 tlbi alle2 106 tlbi alle2
107 dsb sy 107 dsb sy
108 108
109 mrs x4, sctlr_el2 109 /*
110 and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2 110 * Preserve all the RES1 bits while setting the default flags,
111 ldr x5, =SCTLR_ELx_FLAGS 111 * as well as the EE bit on BE. Drop the A flag since the compiler
112 orr x4, x4, x5 112 * is allowed to generate unaligned accesses.
113 */
114 ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
115CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
113 msr sctlr_el2, x4 116 msr sctlr_el2, x4
114 isb 117 isb
115 118
diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c
index 79f37e37d367..6260b69e5622 100644
--- a/arch/arm64/kvm/vgic-sys-reg-v3.c
+++ b/arch/arm64/kvm/vgic-sys-reg-v3.c
@@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
65 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout. 65 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
66 * The vgic_set_vmcr() will convert to ICH_VMCR layout. 66 * The vgic_set_vmcr() will convert to ICH_VMCR layout.
67 */ 67 */
68 vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK; 68 vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
69 vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK; 69 vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
70 vgic_set_vmcr(vcpu, &vmcr); 70 vgic_set_vmcr(vcpu, &vmcr);
71 } else { 71 } else {
72 val = 0; 72 val = 0;
@@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
83 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout. 83 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
84 * Extract it directly using ICC_CTLR_EL1 reg definitions. 84 * Extract it directly using ICC_CTLR_EL1 reg definitions.
85 */ 85 */
86 val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK; 86 val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
87 val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK; 87 val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
88 88
89 p->regval = val; 89 p->regval = val;
90 } 90 }
@@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
135 p->regval = 0; 135 p->regval = 0;
136 136
137 vgic_get_vmcr(vcpu, &vmcr); 137 vgic_get_vmcr(vcpu, &vmcr);
138 if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) { 138 if (!vmcr.cbpr) {
139 if (p->is_write) { 139 if (p->is_write) {
140 vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >> 140 vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
141 ICC_BPR1_EL1_SHIFT; 141 ICC_BPR1_EL1_SHIFT;
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 426614a882a9..65d07ac34647 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -541,7 +541,6 @@ struct kvm_s390_float_interrupt {
541 struct mutex ais_lock; 541 struct mutex ais_lock;
542 u8 simm; 542 u8 simm;
543 u8 nimm; 543 u8 nimm;
544 int ais_enabled;
545}; 544};
546 545
547struct kvm_hw_wp_info_arch { 546struct kvm_hw_wp_info_arch {
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index caf15c8a8948..2d120fef7d90 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -2160,7 +2160,7 @@ static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
2160 struct kvm_s390_ais_req req; 2160 struct kvm_s390_ais_req req;
2161 int ret = 0; 2161 int ret = 0;
2162 2162
2163 if (!fi->ais_enabled) 2163 if (!test_kvm_facility(kvm, 72))
2164 return -ENOTSUPP; 2164 return -ENOTSUPP;
2165 2165
2166 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) 2166 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
@@ -2204,7 +2204,7 @@ static int kvm_s390_inject_airq(struct kvm *kvm,
2204 }; 2204 };
2205 int ret = 0; 2205 int ret = 0;
2206 2206
2207 if (!fi->ais_enabled || !adapter->suppressible) 2207 if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
2208 return kvm_s390_inject_vm(kvm, &s390int); 2208 return kvm_s390_inject_vm(kvm, &s390int);
2209 2209
2210 mutex_lock(&fi->ais_lock); 2210 mutex_lock(&fi->ais_lock);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 689ac48361c6..f28e2e776931 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -558,7 +558,6 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
558 } else { 558 } else {
559 set_kvm_facility(kvm->arch.model.fac_mask, 72); 559 set_kvm_facility(kvm->arch.model.fac_mask, 72);
560 set_kvm_facility(kvm->arch.model.fac_list, 72); 560 set_kvm_facility(kvm->arch.model.fac_list, 72);
561 kvm->arch.float_int.ais_enabled = 1;
562 r = 0; 561 r = 0;
563 } 562 }
564 mutex_unlock(&kvm->lock); 563 mutex_unlock(&kvm->lock);
@@ -1533,7 +1532,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1533 mutex_init(&kvm->arch.float_int.ais_lock); 1532 mutex_init(&kvm->arch.float_int.ais_lock);
1534 kvm->arch.float_int.simm = 0; 1533 kvm->arch.float_int.simm = 0;
1535 kvm->arch.float_int.nimm = 0; 1534 kvm->arch.float_int.nimm = 0;
1536 kvm->arch.float_int.ais_enabled = 0;
1537 spin_lock_init(&kvm->arch.float_int.lock); 1535 spin_lock_init(&kvm->arch.float_int.lock);
1538 for (i = 0; i < FIRQ_LIST_COUNT; i++) 1536 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1539 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); 1537 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index da5c09789984..43e10d6fdbed 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token)
161 */ 161 */
162 rcu_irq_exit(); 162 rcu_irq_exit();
163 native_safe_halt(); 163 native_safe_halt();
164 rcu_irq_enter();
165 local_irq_disable(); 164 local_irq_disable();
165 rcu_irq_enter();
166 } 166 }
167 } 167 }
168 if (!n.halted) 168 if (!n.halted)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index a181ae76c71c..59ca2eea522c 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -780,18 +780,20 @@ out:
780static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) 780static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
781{ 781{
782 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; 782 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
783 int j, nent = vcpu->arch.cpuid_nent; 783 struct kvm_cpuid_entry2 *ej;
784 int j = i;
785 int nent = vcpu->arch.cpuid_nent;
784 786
785 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; 787 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
786 /* when no next entry is found, the current entry[i] is reselected */ 788 /* when no next entry is found, the current entry[i] is reselected */
787 for (j = i + 1; ; j = (j + 1) % nent) { 789 do {
788 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; 790 j = (j + 1) % nent;
789 if (ej->function == e->function) { 791 ej = &vcpu->arch.cpuid_entries[j];
790 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; 792 } while (ej->function != e->function);
791 return j; 793
792 } 794 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
793 } 795
794 return 0; /* silence gcc, even though control never reaches here */ 796 return j;
795} 797}
796 798
797/* find an entry with matching function, matching index (if needed), and that 799/* find an entry with matching function, matching index (if needed), and that
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5d3376f67794..cb8225969255 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3698,12 +3698,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
3698 return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); 3698 return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3699} 3699}
3700 3700
3701static bool can_do_async_pf(struct kvm_vcpu *vcpu) 3701bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
3702{ 3702{
3703 if (unlikely(!lapic_in_kernel(vcpu) || 3703 if (unlikely(!lapic_in_kernel(vcpu) ||
3704 kvm_event_needs_reinjection(vcpu))) 3704 kvm_event_needs_reinjection(vcpu)))
3705 return false; 3705 return false;
3706 3706
3707 if (is_guest_mode(vcpu))
3708 return false;
3709
3707 return kvm_x86_ops->interrupt_allowed(vcpu); 3710 return kvm_x86_ops->interrupt_allowed(vcpu);
3708} 3711}
3709 3712
@@ -3719,7 +3722,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3719 if (!async) 3722 if (!async)
3720 return false; /* *pfn has correct page already */ 3723 return false; /* *pfn has correct page already */
3721 3724
3722 if (!prefault && can_do_async_pf(vcpu)) { 3725 if (!prefault && kvm_can_do_async_pf(vcpu)) {
3723 trace_kvm_try_async_get_page(gva, gfn); 3726 trace_kvm_try_async_get_page(gva, gfn);
3724 if (kvm_find_async_pf_gfn(vcpu, gfn)) { 3727 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
3725 trace_kvm_async_pf_doublefault(gva, gfn); 3728 trace_kvm_async_pf_doublefault(gva, gfn);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 27975807cc64..330bf3a811fb 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -76,6 +76,7 @@ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
76void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); 76void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
77void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, 77void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
78 bool accessed_dirty); 78 bool accessed_dirty);
79bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
79 80
80static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 81static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
81{ 82{
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9b4b5d6dcd34..ca5d2b93385c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2425,7 +2425,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
2425 if (!(vmcs12->exception_bitmap & (1u << nr))) 2425 if (!(vmcs12->exception_bitmap & (1u << nr)))
2426 return 0; 2426 return 0;
2427 2427
2428 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, 2428 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
2429 vmcs_read32(VM_EXIT_INTR_INFO), 2429 vmcs_read32(VM_EXIT_INTR_INFO),
2430 vmcs_readl(EXIT_QUALIFICATION)); 2430 vmcs_readl(EXIT_QUALIFICATION));
2431 return 1; 2431 return 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a2cd0997343c..87d3cb901935 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8607,8 +8607,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
8607 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) 8607 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
8608 return true; 8608 return true;
8609 else 8609 else
8610 return !kvm_event_needs_reinjection(vcpu) && 8610 return kvm_can_do_async_pf(vcpu);
8611 kvm_x86_ops->interrupt_allowed(vcpu);
8612} 8611}
8613 8612
8614void kvm_arch_start_assignment(struct kvm *kvm) 8613void kvm_arch_start_assignment(struct kvm *kvm)
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index fffb91202bc9..1fa293a37f4a 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -417,6 +417,10 @@
417#define ICH_HCR_EN (1 << 0) 417#define ICH_HCR_EN (1 << 0)
418#define ICH_HCR_UIE (1 << 1) 418#define ICH_HCR_UIE (1 << 1)
419 419
420#define ICH_VMCR_ACK_CTL_SHIFT 2
421#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
422#define ICH_VMCR_FIQ_EN_SHIFT 3
423#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
420#define ICH_VMCR_CBPR_SHIFT 4 424#define ICH_VMCR_CBPR_SHIFT 4
421#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) 425#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
422#define ICH_VMCR_EOIM_SHIFT 9 426#define ICH_VMCR_EOIM_SHIFT 9
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index dc30f3d057eb..d3453ee072fc 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -25,7 +25,18 @@
25#define GICC_ENABLE 0x1 25#define GICC_ENABLE 0x1
26#define GICC_INT_PRI_THRESHOLD 0xf0 26#define GICC_INT_PRI_THRESHOLD 0xf0
27 27
28#define GIC_CPU_CTRL_EOImodeNS (1 << 9) 28#define GIC_CPU_CTRL_EnableGrp0_SHIFT 0
29#define GIC_CPU_CTRL_EnableGrp0 (1 << GIC_CPU_CTRL_EnableGrp0_SHIFT)
30#define GIC_CPU_CTRL_EnableGrp1_SHIFT 1
31#define GIC_CPU_CTRL_EnableGrp1 (1 << GIC_CPU_CTRL_EnableGrp1_SHIFT)
32#define GIC_CPU_CTRL_AckCtl_SHIFT 2
33#define GIC_CPU_CTRL_AckCtl (1 << GIC_CPU_CTRL_AckCtl_SHIFT)
34#define GIC_CPU_CTRL_FIQEn_SHIFT 3
35#define GIC_CPU_CTRL_FIQEn (1 << GIC_CPU_CTRL_FIQEn_SHIFT)
36#define GIC_CPU_CTRL_CBPR_SHIFT 4
37#define GIC_CPU_CTRL_CBPR (1 << GIC_CPU_CTRL_CBPR_SHIFT)
38#define GIC_CPU_CTRL_EOImodeNS_SHIFT 9
39#define GIC_CPU_CTRL_EOImodeNS (1 << GIC_CPU_CTRL_EOImodeNS_SHIFT)
29 40
30#define GICC_IAR_INT_ID_MASK 0x3ff 41#define GICC_IAR_INT_ID_MASK 0x3ff
31#define GICC_INT_SPURIOUS 1023 42#define GICC_INT_SPURIOUS 1023
@@ -84,8 +95,19 @@
84#define GICH_LR_EOI (1 << 19) 95#define GICH_LR_EOI (1 << 19)
85#define GICH_LR_HW (1 << 31) 96#define GICH_LR_HW (1 << 31)
86 97
87#define GICH_VMCR_CTRL_SHIFT 0 98#define GICH_VMCR_ENABLE_GRP0_SHIFT 0
88#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT) 99#define GICH_VMCR_ENABLE_GRP0_MASK (1 << GICH_VMCR_ENABLE_GRP0_SHIFT)
100#define GICH_VMCR_ENABLE_GRP1_SHIFT 1
101#define GICH_VMCR_ENABLE_GRP1_MASK (1 << GICH_VMCR_ENABLE_GRP1_SHIFT)
102#define GICH_VMCR_ACK_CTL_SHIFT 2
103#define GICH_VMCR_ACK_CTL_MASK (1 << GICH_VMCR_ACK_CTL_SHIFT)
104#define GICH_VMCR_FIQ_EN_SHIFT 3
105#define GICH_VMCR_FIQ_EN_MASK (1 << GICH_VMCR_FIQ_EN_SHIFT)
106#define GICH_VMCR_CBPR_SHIFT 4
107#define GICH_VMCR_CBPR_MASK (1 << GICH_VMCR_CBPR_SHIFT)
108#define GICH_VMCR_EOI_MODE_SHIFT 9
109#define GICH_VMCR_EOI_MODE_MASK (1 << GICH_VMCR_EOI_MODE_SHIFT)
110
89#define GICH_VMCR_PRIMASK_SHIFT 27 111#define GICH_VMCR_PRIMASK_SHIFT 27
90#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT) 112#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT)
91#define GICH_VMCR_BINPOINT_SHIFT 21 113#define GICH_VMCR_BINPOINT_SHIFT 21
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 32c3295929b0..87940364570b 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -22,7 +22,7 @@
22#include <asm/kvm_hyp.h> 22#include <asm/kvm_hyp.h>
23 23
24#define vtr_to_max_lr_idx(v) ((v) & 0xf) 24#define vtr_to_max_lr_idx(v) ((v) & 0xf)
25#define vtr_to_nr_pre_bits(v) (((u32)(v) >> 26) + 1) 25#define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
26 26
27static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) 27static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
28{ 28{
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index a2d63247d1bb..e2e5effba2a9 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -879,6 +879,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
879 pmd_t *pmd; 879 pmd_t *pmd;
880 880
881 pud = stage2_get_pud(kvm, cache, addr); 881 pud = stage2_get_pud(kvm, cache, addr);
882 if (!pud)
883 return NULL;
884
882 if (stage2_pud_none(*pud)) { 885 if (stage2_pud_none(*pud)) {
883 if (!cache) 886 if (!cache)
884 return NULL; 887 return NULL;
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index 0a4283ed9aa7..63e0bbdcddcc 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -226,7 +226,13 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
226 226
227 switch (addr & 0xff) { 227 switch (addr & 0xff) {
228 case GIC_CPU_CTRL: 228 case GIC_CPU_CTRL:
229 val = vmcr.ctlr; 229 val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
230 val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
231 val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
232 val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
233 val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
234 val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
235
230 break; 236 break;
231 case GIC_CPU_PRIMASK: 237 case GIC_CPU_PRIMASK:
232 /* 238 /*
@@ -267,7 +273,13 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
267 273
268 switch (addr & 0xff) { 274 switch (addr & 0xff) {
269 case GIC_CPU_CTRL: 275 case GIC_CPU_CTRL:
270 vmcr.ctlr = val; 276 vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
277 vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
278 vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
279 vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
280 vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
281 vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
282
271 break; 283 break;
272 case GIC_CPU_PRIMASK: 284 case GIC_CPU_PRIMASK:
273 /* 285 /*
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 504b4bd0d651..e4187e52bb26 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -177,7 +177,18 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
177 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; 177 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
178 u32 vmcr; 178 u32 vmcr;
179 179
180 vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK; 180 vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
181 GICH_VMCR_ENABLE_GRP0_MASK;
182 vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
183 GICH_VMCR_ENABLE_GRP1_MASK;
184 vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
185 GICH_VMCR_ACK_CTL_MASK;
186 vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
187 GICH_VMCR_FIQ_EN_MASK;
188 vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
189 GICH_VMCR_CBPR_MASK;
190 vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
191 GICH_VMCR_EOI_MODE_MASK;
181 vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & 192 vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
182 GICH_VMCR_ALIAS_BINPOINT_MASK; 193 GICH_VMCR_ALIAS_BINPOINT_MASK;
183 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & 194 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
@@ -195,8 +206,19 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
195 206
196 vmcr = cpu_if->vgic_vmcr; 207 vmcr = cpu_if->vgic_vmcr;
197 208
198 vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> 209 vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
199 GICH_VMCR_CTRL_SHIFT; 210 GICH_VMCR_ENABLE_GRP0_SHIFT;
211 vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
212 GICH_VMCR_ENABLE_GRP1_SHIFT;
213 vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
214 GICH_VMCR_ACK_CTL_SHIFT;
215 vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
216 GICH_VMCR_FIQ_EN_SHIFT;
217 vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
218 GICH_VMCR_CBPR_SHIFT;
219 vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
220 GICH_VMCR_EOI_MODE_SHIFT;
221
200 vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> 222 vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
201 GICH_VMCR_ALIAS_BINPOINT_SHIFT; 223 GICH_VMCR_ALIAS_BINPOINT_SHIFT;
202 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> 224 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 6fe3f003636a..030248e669f6 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -159,15 +159,24 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
159void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 159void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
160{ 160{
161 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 161 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
162 u32 model = vcpu->kvm->arch.vgic.vgic_model;
162 u32 vmcr; 163 u32 vmcr;
163 164
164 /* 165 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
165 * Ignore the FIQen bit, because GIC emulation always implies 166 vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
166 * SRE=1 which means the vFIQEn bit is also RES1. 167 ICH_VMCR_ACK_CTL_MASK;
167 */ 168 vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
168 vmcr = ((vmcrp->ctlr >> ICC_CTLR_EL1_EOImode_SHIFT) << 169 ICH_VMCR_FIQ_EN_MASK;
169 ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK; 170 } else {
170 vmcr |= (vmcrp->ctlr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK; 171 /*
172 * When emulating GICv3 on GICv3 with SRE=1 on the
173 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
174 */
175 vmcr = ICH_VMCR_FIQ_EN_MASK;
176 }
177
178 vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
179 vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
171 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; 180 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
172 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; 181 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
173 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; 182 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
@@ -180,17 +189,27 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
180void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 189void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
181{ 190{
182 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 191 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
192 u32 model = vcpu->kvm->arch.vgic.vgic_model;
183 u32 vmcr; 193 u32 vmcr;
184 194
185 vmcr = cpu_if->vgic_vmcr; 195 vmcr = cpu_if->vgic_vmcr;
186 196
187 /* 197 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
188 * Ignore the FIQen bit, because GIC emulation always implies 198 vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
189 * SRE=1 which means the vFIQEn bit is also RES1. 199 ICH_VMCR_ACK_CTL_SHIFT;
190 */ 200 vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
191 vmcrp->ctlr = ((vmcr >> ICH_VMCR_EOIM_SHIFT) << 201 ICH_VMCR_FIQ_EN_SHIFT;
192 ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK; 202 } else {
193 vmcrp->ctlr |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; 203 /*
204 * When emulating GICv3 on GICv3 with SRE=1 on the
205 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
206 */
207 vmcrp->fiqen = 1;
208 vmcrp->ackctl = 0;
209 }
210
211 vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
212 vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
194 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; 213 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
195 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; 214 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
196 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; 215 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index da83e4caa272..bba7fa22a7f7 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -111,14 +111,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
111 * registers regardless of the hardware backed GIC used. 111 * registers regardless of the hardware backed GIC used.
112 */ 112 */
113struct vgic_vmcr { 113struct vgic_vmcr {
114 u32 ctlr; 114 u32 grpen0;
115 u32 grpen1;
116
117 u32 ackctl;
118 u32 fiqen;
119 u32 cbpr;
120 u32 eoim;
121
115 u32 abpr; 122 u32 abpr;
116 u32 bpr; 123 u32 bpr;
117 u32 pmr; /* Priority mask field in the GICC_PMR and 124 u32 pmr; /* Priority mask field in the GICC_PMR and
118 * ICC_PMR_EL1 priority field format */ 125 * ICC_PMR_EL1 priority field format */
119 /* Below member variable are valid only for GICv3 */
120 u32 grpen0;
121 u32 grpen1;
122}; 126};
123 127
124struct vgic_reg_attr { 128struct vgic_reg_attr {