aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/virtual/kvm/api.txt4
-rw-r--r--arch/arm/include/asm/kvm_arm.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h1
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/kvm/handle_exit.c19
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/kvm/handle_exit.c19
-rw-r--r--arch/arm64/kvm/hyp/tlb.c64
-rw-r--r--arch/x86/kvm/vmx.c30
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c109
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c32
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c5
13 files changed, 191 insertions, 101 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 069450938b79..3c248f772ae6 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -951,6 +951,10 @@ This ioctl allows the user to create or modify a guest physical memory
951slot. When changing an existing slot, it may be moved in the guest 951slot. When changing an existing slot, it may be moved in the guest
952physical memory space, or its flags may be modified. It may not be 952physical memory space, or its flags may be modified. It may not be
953resized. Slots may not overlap in guest physical address space. 953resized. Slots may not overlap in guest physical address space.
954Bits 0-15 of "slot" specifies the slot id and this value should be
955less than the maximum number of user memory slots supported per VM.
956The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
957if this capability is supported by the architecture.
954 958
955If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot" 959If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot"
956specifies the address space which is being modified. They must be 960specifies the address space which is being modified. They must be
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index e22089fb44dc..a3f0b3d50089 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -209,6 +209,7 @@
209#define HSR_EC_IABT_HYP (0x21) 209#define HSR_EC_IABT_HYP (0x21)
210#define HSR_EC_DABT (0x24) 210#define HSR_EC_DABT (0x24)
211#define HSR_EC_DABT_HYP (0x25) 211#define HSR_EC_DABT_HYP (0x25)
212#define HSR_EC_MAX (0x3f)
212 213
213#define HSR_WFI_IS_WFE (_AC(1, UL) << 0) 214#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
214 215
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index cc495d799c67..31ee468ce667 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -30,7 +30,6 @@
30#define __KVM_HAVE_ARCH_INTC_INITIALIZED 30#define __KVM_HAVE_ARCH_INTC_INITIALIZED
31 31
32#define KVM_USER_MEM_SLOTS 32 32#define KVM_USER_MEM_SLOTS 32
33#define KVM_PRIVATE_MEM_SLOTS 4
34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 33#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
35#define KVM_HAVE_ONE_REG 34#define KVM_HAVE_ONE_REG
36#define KVM_HALT_POLL_NS_DEFAULT 500000 35#define KVM_HALT_POLL_NS_DEFAULT 500000
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index c9a2103faeb9..96dba7cd8be7 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -221,6 +221,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
221 case KVM_CAP_MAX_VCPUS: 221 case KVM_CAP_MAX_VCPUS:
222 r = KVM_MAX_VCPUS; 222 r = KVM_MAX_VCPUS;
223 break; 223 break;
224 case KVM_CAP_NR_MEMSLOTS:
225 r = KVM_USER_MEM_SLOTS;
226 break;
224 case KVM_CAP_MSI_DEVID: 227 case KVM_CAP_MSI_DEVID:
225 if (!kvm) 228 if (!kvm)
226 r = -EINVAL; 229 r = -EINVAL;
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 4e40d1955e35..96af65a30d78 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -79,7 +79,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
79 return 1; 79 return 1;
80} 80}
81 81
82static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
83{
84 u32 hsr = kvm_vcpu_get_hsr(vcpu);
85
86 kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
87 hsr);
88
89 kvm_inject_undefined(vcpu);
90 return 1;
91}
92
82static exit_handle_fn arm_exit_handlers[] = { 93static exit_handle_fn arm_exit_handlers[] = {
94 [0 ... HSR_EC_MAX] = kvm_handle_unknown_ec,
83 [HSR_EC_WFI] = kvm_handle_wfx, 95 [HSR_EC_WFI] = kvm_handle_wfx,
84 [HSR_EC_CP15_32] = kvm_handle_cp15_32, 96 [HSR_EC_CP15_32] = kvm_handle_cp15_32,
85 [HSR_EC_CP15_64] = kvm_handle_cp15_64, 97 [HSR_EC_CP15_64] = kvm_handle_cp15_64,
@@ -98,13 +110,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
98{ 110{
99 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); 111 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
100 112
101 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
102 !arm_exit_handlers[hsr_ec]) {
103 kvm_err("Unknown exception class: hsr: %#08x\n",
104 (unsigned int)kvm_vcpu_get_hsr(vcpu));
105 BUG();
106 }
107
108 return arm_exit_handlers[hsr_ec]; 113 return arm_exit_handlers[hsr_ec];
109} 114}
110 115
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f21fd3894370..e7705e7bb07b 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -30,8 +30,7 @@
30 30
31#define __KVM_HAVE_ARCH_INTC_INITIALIZED 31#define __KVM_HAVE_ARCH_INTC_INITIALIZED
32 32
33#define KVM_USER_MEM_SLOTS 32 33#define KVM_USER_MEM_SLOTS 512
34#define KVM_PRIVATE_MEM_SLOTS 4
35#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
36#define KVM_HALT_POLL_NS_DEFAULT 500000 35#define KVM_HALT_POLL_NS_DEFAULT 500000
37 36
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 1bfe30dfbfe7..fa1b18e364fc 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -135,7 +135,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
135 return ret; 135 return ret;
136} 136}
137 137
138static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
139{
140 u32 hsr = kvm_vcpu_get_hsr(vcpu);
141
142 kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
143 hsr, esr_get_class_string(hsr));
144
145 kvm_inject_undefined(vcpu);
146 return 1;
147}
148
138static exit_handle_fn arm_exit_handlers[] = { 149static exit_handle_fn arm_exit_handlers[] = {
150 [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
139 [ESR_ELx_EC_WFx] = kvm_handle_wfx, 151 [ESR_ELx_EC_WFx] = kvm_handle_wfx,
140 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32, 152 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
141 [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64, 153 [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
@@ -162,13 +174,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
162 u32 hsr = kvm_vcpu_get_hsr(vcpu); 174 u32 hsr = kvm_vcpu_get_hsr(vcpu);
163 u8 hsr_ec = ESR_ELx_EC(hsr); 175 u8 hsr_ec = ESR_ELx_EC(hsr);
164 176
165 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
166 !arm_exit_handlers[hsr_ec]) {
167 kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
168 hsr, esr_get_class_string(hsr));
169 BUG();
170 }
171
172 return arm_exit_handlers[hsr_ec]; 177 return arm_exit_handlers[hsr_ec];
173} 178}
174 179
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index e8e7ba2bc11f..9e1d2b75eecd 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -18,14 +18,62 @@
18#include <asm/kvm_hyp.h> 18#include <asm/kvm_hyp.h>
19#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
20 20
21static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
22{
23 u64 val;
24
25 /*
26 * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
27 * most TLB operations target EL2/EL0. In order to affect the
28 * guest TLBs (EL1/EL0), we need to change one of these two
29 * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
30 * let's flip TGE before executing the TLB operation.
31 */
32 write_sysreg(kvm->arch.vttbr, vttbr_el2);
33 val = read_sysreg(hcr_el2);
34 val &= ~HCR_TGE;
35 write_sysreg(val, hcr_el2);
36 isb();
37}
38
39static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
40{
41 write_sysreg(kvm->arch.vttbr, vttbr_el2);
42 isb();
43}
44
45static hyp_alternate_select(__tlb_switch_to_guest,
46 __tlb_switch_to_guest_nvhe,
47 __tlb_switch_to_guest_vhe,
48 ARM64_HAS_VIRT_HOST_EXTN);
49
50static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
51{
52 /*
53 * We're done with the TLB operation, let's restore the host's
54 * view of HCR_EL2.
55 */
56 write_sysreg(0, vttbr_el2);
57 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
58}
59
60static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
61{
62 write_sysreg(0, vttbr_el2);
63}
64
65static hyp_alternate_select(__tlb_switch_to_host,
66 __tlb_switch_to_host_nvhe,
67 __tlb_switch_to_host_vhe,
68 ARM64_HAS_VIRT_HOST_EXTN);
69
21void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) 70void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
22{ 71{
23 dsb(ishst); 72 dsb(ishst);
24 73
25 /* Switch to requested VMID */ 74 /* Switch to requested VMID */
26 kvm = kern_hyp_va(kvm); 75 kvm = kern_hyp_va(kvm);
27 write_sysreg(kvm->arch.vttbr, vttbr_el2); 76 __tlb_switch_to_guest()(kvm);
28 isb();
29 77
30 /* 78 /*
31 * We could do so much better if we had the VA as well. 79 * We could do so much better if we had the VA as well.
@@ -46,7 +94,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
46 dsb(ish); 94 dsb(ish);
47 isb(); 95 isb();
48 96
49 write_sysreg(0, vttbr_el2); 97 __tlb_switch_to_host()(kvm);
50} 98}
51 99
52void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) 100void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
@@ -55,14 +103,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
55 103
56 /* Switch to requested VMID */ 104 /* Switch to requested VMID */
57 kvm = kern_hyp_va(kvm); 105 kvm = kern_hyp_va(kvm);
58 write_sysreg(kvm->arch.vttbr, vttbr_el2); 106 __tlb_switch_to_guest()(kvm);
59 isb();
60 107
61 __tlbi(vmalls12e1is); 108 __tlbi(vmalls12e1is);
62 dsb(ish); 109 dsb(ish);
63 isb(); 110 isb();
64 111
65 write_sysreg(0, vttbr_el2); 112 __tlb_switch_to_host()(kvm);
66} 113}
67 114
68void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) 115void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
@@ -70,14 +117,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
70 struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); 117 struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
71 118
72 /* Switch to requested VMID */ 119 /* Switch to requested VMID */
73 write_sysreg(kvm->arch.vttbr, vttbr_el2); 120 __tlb_switch_to_guest()(kvm);
74 isb();
75 121
76 __tlbi(vmalle1); 122 __tlbi(vmalle1);
77 dsb(nsh); 123 dsb(nsh);
78 isb(); 124 isb();
79 125
80 write_sysreg(0, vttbr_el2); 126 __tlb_switch_to_host()(kvm);
81} 127}
82 128
83void __hyp_text __kvm_flush_vm_context(void) 129void __hyp_text __kvm_flush_vm_context(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 283aa8601833..98e82ee1e699 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7258,9 +7258,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
7258static int handle_vmclear(struct kvm_vcpu *vcpu) 7258static int handle_vmclear(struct kvm_vcpu *vcpu)
7259{ 7259{
7260 struct vcpu_vmx *vmx = to_vmx(vcpu); 7260 struct vcpu_vmx *vmx = to_vmx(vcpu);
7261 u32 zero = 0;
7261 gpa_t vmptr; 7262 gpa_t vmptr;
7262 struct vmcs12 *vmcs12;
7263 struct page *page;
7264 7263
7265 if (!nested_vmx_check_permission(vcpu)) 7264 if (!nested_vmx_check_permission(vcpu))
7266 return 1; 7265 return 1;
@@ -7271,22 +7270,9 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
7271 if (vmptr == vmx->nested.current_vmptr) 7270 if (vmptr == vmx->nested.current_vmptr)
7272 nested_release_vmcs12(vmx); 7271 nested_release_vmcs12(vmx);
7273 7272
7274 page = nested_get_page(vcpu, vmptr); 7273 kvm_vcpu_write_guest(vcpu,
7275 if (page == NULL) { 7274 vmptr + offsetof(struct vmcs12, launch_state),
7276 /* 7275 &zero, sizeof(zero));
7277 * For accurate processor emulation, VMCLEAR beyond available
7278 * physical memory should do nothing at all. However, it is
7279 * possible that a nested vmx bug, not a guest hypervisor bug,
7280 * resulted in this case, so let's shut down before doing any
7281 * more damage:
7282 */
7283 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
7284 return 1;
7285 }
7286 vmcs12 = kmap(page);
7287 vmcs12->launch_state = 0;
7288 kunmap(page);
7289 nested_release_page(page);
7290 7276
7291 nested_free_vmcs02(vmx, vmptr); 7277 nested_free_vmcs02(vmx, vmptr);
7292 7278
@@ -9694,10 +9680,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
9694 return false; 9680 return false;
9695 9681
9696 page = nested_get_page(vcpu, vmcs12->msr_bitmap); 9682 page = nested_get_page(vcpu, vmcs12->msr_bitmap);
9697 if (!page) { 9683 if (!page)
9698 WARN_ON(1);
9699 return false; 9684 return false;
9700 }
9701 msr_bitmap_l1 = (unsigned long *)kmap(page); 9685 msr_bitmap_l1 = (unsigned long *)kmap(page);
9702 9686
9703 memset(msr_bitmap_l0, 0xff, PAGE_SIZE); 9687 memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
@@ -11121,8 +11105,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
11121 */ 11105 */
11122static void vmx_leave_nested(struct kvm_vcpu *vcpu) 11106static void vmx_leave_nested(struct kvm_vcpu *vcpu)
11123{ 11107{
11124 if (is_guest_mode(vcpu)) 11108 if (is_guest_mode(vcpu)) {
11109 to_vmx(vcpu)->nested.nested_run_pending = 0;
11125 nested_vmx_vmexit(vcpu, -1, 0, 0); 11110 nested_vmx_vmexit(vcpu, -1, 0, 0);
11111 }
11126 free_nested(to_vmx(vcpu)); 11112 free_nested(to_vmx(vcpu));
11127} 11113}
11128 11114
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 672cfef72fc8..97cbca19430d 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -373,6 +373,8 @@
373#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) 373#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT)
374#define ICC_IGRPEN1_EL1_SHIFT 0 374#define ICC_IGRPEN1_EL1_SHIFT 0
375#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) 375#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT)
376#define ICC_SRE_EL1_DIB (1U << 2)
377#define ICC_SRE_EL1_DFB (1U << 1)
376#define ICC_SRE_EL1_SRE (1U << 0) 378#define ICC_SRE_EL1_SRE (1U << 0)
377 379
378/* 380/*
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 571b64a01c50..8d1da1af4b09 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -360,29 +360,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
360 return ret; 360 return ret;
361} 361}
362 362
363static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
364 struct vgic_its *its,
365 gpa_t addr, unsigned int len)
366{
367 u32 reg = 0;
368
369 mutex_lock(&its->cmd_lock);
370 if (its->creadr == its->cwriter)
371 reg |= GITS_CTLR_QUIESCENT;
372 if (its->enabled)
373 reg |= GITS_CTLR_ENABLE;
374 mutex_unlock(&its->cmd_lock);
375
376 return reg;
377}
378
379static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
380 gpa_t addr, unsigned int len,
381 unsigned long val)
382{
383 its->enabled = !!(val & GITS_CTLR_ENABLE);
384}
385
386static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm, 363static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
387 struct vgic_its *its, 364 struct vgic_its *its,
388 gpa_t addr, unsigned int len) 365 gpa_t addr, unsigned int len)
@@ -1161,33 +1138,16 @@ static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1161#define ITS_CMD_SIZE 32 1138#define ITS_CMD_SIZE 32
1162#define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5)) 1139#define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1163 1140
1164/* 1141/* Must be called with the cmd_lock held. */
1165 * By writing to CWRITER the guest announces new commands to be processed. 1142static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1166 * To avoid any races in the first place, we take the its_cmd lock, which
1167 * protects our ring buffer variables, so that there is only one user
1168 * per ITS handling commands at a given time.
1169 */
1170static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1171 gpa_t addr, unsigned int len,
1172 unsigned long val)
1173{ 1143{
1174 gpa_t cbaser; 1144 gpa_t cbaser;
1175 u64 cmd_buf[4]; 1145 u64 cmd_buf[4];
1176 u32 reg;
1177 1146
1178 if (!its) 1147 /* Commands are only processed when the ITS is enabled. */
1179 return; 1148 if (!its->enabled)
1180
1181 mutex_lock(&its->cmd_lock);
1182
1183 reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1184 reg = ITS_CMD_OFFSET(reg);
1185 if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1186 mutex_unlock(&its->cmd_lock);
1187 return; 1149 return;
1188 }
1189 1150
1190 its->cwriter = reg;
1191 cbaser = CBASER_ADDRESS(its->cbaser); 1151 cbaser = CBASER_ADDRESS(its->cbaser);
1192 1152
1193 while (its->cwriter != its->creadr) { 1153 while (its->cwriter != its->creadr) {
@@ -1207,6 +1167,34 @@ static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1207 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser)) 1167 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1208 its->creadr = 0; 1168 its->creadr = 0;
1209 } 1169 }
1170}
1171
1172/*
1173 * By writing to CWRITER the guest announces new commands to be processed.
1174 * To avoid any races in the first place, we take the its_cmd lock, which
1175 * protects our ring buffer variables, so that there is only one user
1176 * per ITS handling commands at a given time.
1177 */
1178static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1179 gpa_t addr, unsigned int len,
1180 unsigned long val)
1181{
1182 u64 reg;
1183
1184 if (!its)
1185 return;
1186
1187 mutex_lock(&its->cmd_lock);
1188
1189 reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1190 reg = ITS_CMD_OFFSET(reg);
1191 if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1192 mutex_unlock(&its->cmd_lock);
1193 return;
1194 }
1195 its->cwriter = reg;
1196
1197 vgic_its_process_commands(kvm, its);
1210 1198
1211 mutex_unlock(&its->cmd_lock); 1199 mutex_unlock(&its->cmd_lock);
1212} 1200}
@@ -1287,6 +1275,39 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm,
1287 *regptr = reg; 1275 *regptr = reg;
1288} 1276}
1289 1277
1278static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1279 struct vgic_its *its,
1280 gpa_t addr, unsigned int len)
1281{
1282 u32 reg = 0;
1283
1284 mutex_lock(&its->cmd_lock);
1285 if (its->creadr == its->cwriter)
1286 reg |= GITS_CTLR_QUIESCENT;
1287 if (its->enabled)
1288 reg |= GITS_CTLR_ENABLE;
1289 mutex_unlock(&its->cmd_lock);
1290
1291 return reg;
1292}
1293
1294static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1295 gpa_t addr, unsigned int len,
1296 unsigned long val)
1297{
1298 mutex_lock(&its->cmd_lock);
1299
1300 its->enabled = !!(val & GITS_CTLR_ENABLE);
1301
1302 /*
1303 * Try to process any pending commands. This function bails out early
1304 * if the ITS is disabled or no commands have been queued.
1305 */
1306 vgic_its_process_commands(kvm, its);
1307
1308 mutex_unlock(&its->cmd_lock);
1309}
1310
1290#define REGISTER_ITS_DESC(off, rd, wr, length, acc) \ 1311#define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1291{ \ 1312{ \
1292 .reg_offset = off, \ 1313 .reg_offset = off, \
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 3654b4c835ef..2a5db1352722 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -180,21 +180,37 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
180static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, 180static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
181 bool new_active_state) 181 bool new_active_state)
182{ 182{
183 struct kvm_vcpu *requester_vcpu;
183 spin_lock(&irq->irq_lock); 184 spin_lock(&irq->irq_lock);
185
186 /*
187 * The vcpu parameter here can mean multiple things depending on how
188 * this function is called; when handling a trap from the kernel it
189 * depends on the GIC version, and these functions are also called as
190 * part of save/restore from userspace.
191 *
192 * Therefore, we have to figure out the requester in a reliable way.
193 *
194 * When accessing VGIC state from user space, the requester_vcpu is
195 * NULL, which is fine, because we guarantee that no VCPUs are running
196 * when accessing VGIC state from user space so irq->vcpu->cpu is
197 * always -1.
198 */
199 requester_vcpu = kvm_arm_get_running_vcpu();
200
184 /* 201 /*
185 * If this virtual IRQ was written into a list register, we 202 * If this virtual IRQ was written into a list register, we
186 * have to make sure the CPU that runs the VCPU thread has 203 * have to make sure the CPU that runs the VCPU thread has
187 * synced back LR state to the struct vgic_irq. We can only 204 * synced back the LR state to the struct vgic_irq.
188 * know this for sure, when either this irq is not assigned to
189 * anyone's AP list anymore, or the VCPU thread is not
190 * running on any CPUs.
191 * 205 *
192 * In the opposite case, we know the VCPU thread may be on its 206 * As long as the conditions below are true, we know the VCPU thread
193 * way back from the guest and still has to sync back this 207 * may be on its way back from the guest (we kicked the VCPU thread in
194 * IRQ, so we release and re-acquire the spin_lock to let the 208 * vgic_change_active_prepare) and still has to sync back this IRQ,
195 * other thread sync back the IRQ. 209 * so we release and re-acquire the spin_lock to let the other thread
210 * sync back the IRQ.
196 */ 211 */
197 while (irq->vcpu && /* IRQ may have state in an LR somewhere */ 212 while (irq->vcpu && /* IRQ may have state in an LR somewhere */
213 irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
198 irq->vcpu->cpu != -1) /* VCPU thread is running */ 214 irq->vcpu->cpu != -1) /* VCPU thread is running */
199 cond_resched_lock(&irq->irq_lock); 215 cond_resched_lock(&irq->irq_lock);
200 216
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index edc6ee2dc852..be0f4c3e0142 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -229,10 +229,13 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu)
229 /* 229 /*
230 * If we are emulating a GICv3, we do it in an non-GICv2-compatible 230 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
231 * way, so we force SRE to 1 to demonstrate this to the guest. 231 * way, so we force SRE to 1 to demonstrate this to the guest.
232 * Also, we don't support any form of IRQ/FIQ bypass.
232 * This goes with the spec allowing the value to be RAO/WI. 233 * This goes with the spec allowing the value to be RAO/WI.
233 */ 234 */
234 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { 235 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
235 vgic_v3->vgic_sre = ICC_SRE_EL1_SRE; 236 vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
237 ICC_SRE_EL1_DFB |
238 ICC_SRE_EL1_SRE);
236 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE; 239 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
237 } else { 240 } else {
238 vgic_v3->vgic_sre = 0; 241 vgic_v3->vgic_sre = 0;