diff options
author | Radim Krčmář <rkrcmar@redhat.com> | 2016-09-29 10:01:51 -0400 |
---|---|---|
committer | Radim Krčmář <rkrcmar@redhat.com> | 2016-09-29 10:01:51 -0400 |
commit | 45ca877ad0519a02c22aaff2e2cdf333a1421a0a (patch) | |
tree | d7abbc767611379f878bf30b8c3b507d4e31982e | |
parent | c5a6d5f7faad8549bb5ff7e3e5792e33933c5b9f (diff) | |
parent | 0099b7701f5296a758d9e6b945ec96f96847cc2f (diff) |
Merge tag 'kvm-arm-for-v4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into next
KVM/ARM Changes for v4.9
- Various cleanups and removal of redundant code
- Two important fixes for not using an in-kernel irqchip
- A bit of optimizations
- Handle SError exceptions and present them to guests if appropriate
- Proxying of GICV access at EL2 if guest mappings are unsafe
- GICv3 on AArch32 on ARMv8
- Preparations for GICv3 save/restore, including ABI docs
56 files changed, 1076 insertions, 562 deletions
diff --git a/Documentation/virtual/kvm/devices/arm-vgic-its.txt b/Documentation/virtual/kvm/devices/arm-vgic-its.txt new file mode 100644 index 000000000000..6081a5b7fc1e --- /dev/null +++ b/Documentation/virtual/kvm/devices/arm-vgic-its.txt | |||
@@ -0,0 +1,38 @@ | |||
1 | ARM Virtual Interrupt Translation Service (ITS) | ||
2 | =============================================== | ||
3 | |||
4 | Device types supported: | ||
5 | KVM_DEV_TYPE_ARM_VGIC_ITS ARM Interrupt Translation Service Controller | ||
6 | |||
7 | The ITS allows MSI(-X) interrupts to be injected into guests. This extension is | ||
8 | optional. Creating a virtual ITS controller also requires a host GICv3 (see | ||
9 | arm-vgic-v3.txt), but does not depend on having physical ITS controllers. | ||
10 | |||
11 | There can be multiple ITS controllers per guest, each of them has to have | ||
12 | a separate, non-overlapping MMIO region. | ||
13 | |||
14 | |||
15 | Groups: | ||
16 | KVM_DEV_ARM_VGIC_GRP_ADDR | ||
17 | Attributes: | ||
18 | KVM_VGIC_ITS_ADDR_TYPE (rw, 64-bit) | ||
19 | Base address in the guest physical address space of the GICv3 ITS | ||
20 | control register frame. | ||
21 | This address needs to be 64K aligned and the region covers 128K. | ||
22 | Errors: | ||
23 | -E2BIG: Address outside of addressable IPA range | ||
24 | -EINVAL: Incorrectly aligned address | ||
25 | -EEXIST: Address already configured | ||
26 | -EFAULT: Invalid user pointer for attr->addr. | ||
27 | -ENODEV: Incorrect attribute or the ITS is not supported. | ||
28 | |||
29 | |||
30 | KVM_DEV_ARM_VGIC_GRP_CTRL | ||
31 | Attributes: | ||
32 | KVM_DEV_ARM_VGIC_CTRL_INIT | ||
33 | request the initialization of the ITS, no additional parameter in | ||
34 | kvm_device_attr.addr. | ||
35 | Errors: | ||
36 | -ENXIO: ITS not properly configured as required prior to setting | ||
37 | this attribute | ||
38 | -ENOMEM: Memory shortage when allocating ITS internal data | ||
diff --git a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt b/Documentation/virtual/kvm/devices/arm-vgic-v3.txt new file mode 100644 index 000000000000..9348b3caccd7 --- /dev/null +++ b/Documentation/virtual/kvm/devices/arm-vgic-v3.txt | |||
@@ -0,0 +1,206 @@ | |||
1 | ARM Virtual Generic Interrupt Controller v3 and later (VGICv3) | ||
2 | ============================================================== | ||
3 | |||
4 | |||
5 | Device types supported: | ||
6 | KVM_DEV_TYPE_ARM_VGIC_V3 ARM Generic Interrupt Controller v3.0 | ||
7 | |||
8 | Only one VGIC instance may be instantiated through this API. The created VGIC | ||
9 | will act as the VM interrupt controller, requiring emulated user-space devices | ||
10 | to inject interrupts to the VGIC instead of directly to CPUs. It is not | ||
11 | possible to create both a GICv3 and GICv2 on the same VM. | ||
12 | |||
13 | Creating a guest GICv3 device requires a host GICv3 as well. | ||
14 | |||
15 | |||
16 | Groups: | ||
17 | KVM_DEV_ARM_VGIC_GRP_ADDR | ||
18 | Attributes: | ||
19 | KVM_VGIC_V3_ADDR_TYPE_DIST (rw, 64-bit) | ||
20 | Base address in the guest physical address space of the GICv3 distributor | ||
21 | register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V3. | ||
22 | This address needs to be 64K aligned and the region covers 64 KByte. | ||
23 | |||
24 | KVM_VGIC_V3_ADDR_TYPE_REDIST (rw, 64-bit) | ||
25 | Base address in the guest physical address space of the GICv3 | ||
26 | redistributor register mappings. There are two 64K pages for each | ||
27 | VCPU and all of the redistributor pages are contiguous. | ||
28 | Only valid for KVM_DEV_TYPE_ARM_VGIC_V3. | ||
29 | This address needs to be 64K aligned. | ||
30 | Errors: | ||
31 | -E2BIG: Address outside of addressable IPA range | ||
32 | -EINVAL: Incorrectly aligned address | ||
33 | -EEXIST: Address already configured | ||
34 | -ENXIO: The group or attribute is unknown/unsupported for this device | ||
35 | or hardware support is missing. | ||
36 | -EFAULT: Invalid user pointer for attr->addr. | ||
37 | |||
38 | |||
39 | |||
40 | KVM_DEV_ARM_VGIC_GRP_DIST_REGS | ||
41 | KVM_DEV_ARM_VGIC_GRP_REDIST_REGS | ||
42 | Attributes: | ||
43 | The attr field of kvm_device_attr encodes two values: | ||
44 | bits: | 63 .... 32 | 31 .... 0 | | ||
45 | values: | mpidr | offset | | ||
46 | |||
47 | All distributor regs are (rw, 32-bit) and kvm_device_attr.addr points to a | ||
48 | __u32 value. 64-bit registers must be accessed by separately accessing the | ||
49 | lower and higher word. | ||
50 | |||
51 | Writes to read-only registers are ignored by the kernel. | ||
52 | |||
53 | KVM_DEV_ARM_VGIC_GRP_DIST_REGS accesses the main distributor registers. | ||
54 | KVM_DEV_ARM_VGIC_GRP_REDIST_REGS accesses the redistributor of the CPU | ||
55 | specified by the mpidr. | ||
56 | |||
57 | The offset is relative to the "[Re]Distributor base address" as defined | ||
58 | in the GICv3/4 specs. Getting or setting such a register has the same | ||
59 | effect as reading or writing the register on real hardware, except for the | ||
60 | following registers: GICD_STATUSR, GICR_STATUSR, GICD_ISPENDR, | ||
61 | GICR_ISPENDR0, GICD_ICPENDR, and GICR_ICPENDR0. These registers behave | ||
62 | differently when accessed via this interface compared to their | ||
63 | architecturally defined behavior to allow software a full view of the | ||
64 | VGIC's internal state. | ||
65 | |||
66 | The mpidr field is used to specify which | ||
67 | redistributor is accessed. The mpidr is ignored for the distributor. | ||
68 | |||
69 | The mpidr encoding is based on the affinity information in the | ||
70 | architecture defined MPIDR, and the field is encoded as follows: | ||
71 | | 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 | | ||
72 | | Aff3 | Aff2 | Aff1 | Aff0 | | ||
73 | |||
74 | Note that distributor fields are not banked, but return the same value | ||
75 | regardless of the mpidr used to access the register. | ||
76 | |||
77 | The GICD_STATUSR and GICR_STATUSR registers are architecturally defined such | ||
78 | that a write of a clear bit has no effect, whereas a write with a set bit | ||
79 | clears that value. To allow userspace to freely set the values of these two | ||
80 | registers, setting the attributes with the register offsets for these two | ||
81 | registers simply sets the non-reserved bits to the value written. | ||
82 | |||
83 | |||
84 | Accesses (reads and writes) to the GICD_ISPENDR register region and | ||
85 | GICR_ISPENDR0 registers get/set the value of the latched pending state for | ||
86 | the interrupts. | ||
87 | |||
88 | This is identical to the value returned by a guest read from ISPENDR for an | ||
89 | edge triggered interrupt, but may differ for level triggered interrupts. | ||
90 | For edge triggered interrupts, once an interrupt becomes pending (whether | ||
91 | because of an edge detected on the input line or because of a guest write | ||
92 | to ISPENDR) this state is "latched", and only cleared when either the | ||
93 | interrupt is activated or when the guest writes to ICPENDR. A level | ||
94 | triggered interrupt may be pending either because the level input is held | ||
95 | high by a device, or because of a guest write to the ISPENDR register. Only | ||
96 | ISPENDR writes are latched; if the device lowers the line level then the | ||
97 | interrupt is no longer pending unless the guest also wrote to ISPENDR, and | ||
98 | conversely writes to ICPENDR or activations of the interrupt do not clear | ||
99 | the pending status if the line level is still being held high. (These | ||
100 | rules are documented in the GICv3 specification descriptions of the ICPENDR | ||
101 | and ISPENDR registers.) For a level triggered interrupt the value accessed | ||
102 | here is that of the latch which is set by ISPENDR and cleared by ICPENDR or | ||
103 | interrupt activation, whereas the value returned by a guest read from | ||
104 | ISPENDR is the logical OR of the latch value and the input line level. | ||
105 | |||
106 | Raw access to the latch state is provided to userspace so that it can save | ||
107 | and restore the entire GIC internal state (which is defined by the | ||
108 | combination of the current input line level and the latch state, and cannot | ||
109 | be deduced from purely the line level and the value of the ISPENDR | ||
110 | registers). | ||
111 | |||
112 | Accesses to GICD_ICPENDR register region and GICR_ICPENDR0 registers have | ||
113 | RAZ/WI semantics, meaning that reads always return 0 and writes are always | ||
114 | ignored. | ||
115 | |||
116 | Errors: | ||
117 | -ENXIO: Getting or setting this register is not yet supported | ||
118 | -EBUSY: One or more VCPUs are running | ||
119 | |||
120 | |||
121 | KVM_DEV_ARM_VGIC_CPU_SYSREGS | ||
122 | Attributes: | ||
123 | The attr field of kvm_device_attr encodes two values: | ||
124 | bits: | 63 .... 32 | 31 .... 16 | 15 .... 0 | | ||
125 | values: | mpidr | RES | instr | | ||
126 | |||
127 | The mpidr field encodes the CPU ID based on the affinity information in the | ||
128 | architecture defined MPIDR, and the field is encoded as follows: | ||
129 | | 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 | | ||
130 | | Aff3 | Aff2 | Aff1 | Aff0 | | ||
131 | |||
132 | The instr field encodes the system register to access based on the fields | ||
133 | defined in the A64 instruction set encoding for system register access | ||
134 | (RES means the bits are reserved for future use and should be zero): | ||
135 | |||
136 | | 15 ... 14 | 13 ... 11 | 10 ... 7 | 6 ... 3 | 2 ... 0 | | ||
137 | | Op 0 | Op1 | CRn | CRm | Op2 | | ||
138 | |||
139 | All system regs accessed through this API are (rw, 64-bit) and | ||
140 | kvm_device_attr.addr points to a __u64 value. | ||
141 | |||
142 | KVM_DEV_ARM_VGIC_CPU_SYSREGS accesses the CPU interface registers for the | ||
143 | CPU specified by the mpidr field. | ||
144 | |||
145 | Errors: | ||
146 | -ENXIO: Getting or setting this register is not yet supported | ||
147 | -EBUSY: VCPU is running | ||
148 | -EINVAL: Invalid mpidr supplied | ||
149 | |||
150 | |||
151 | KVM_DEV_ARM_VGIC_GRP_NR_IRQS | ||
152 | Attributes: | ||
153 | A value describing the number of interrupts (SGI, PPI and SPI) for | ||
154 | this GIC instance, ranging from 64 to 1024, in increments of 32. | ||
155 | |||
156 | kvm_device_attr.addr points to a __u32 value. | ||
157 | |||
158 | Errors: | ||
159 | -EINVAL: Value set is out of the expected range | ||
160 | -EBUSY: Value has already be set. | ||
161 | |||
162 | |||
163 | KVM_DEV_ARM_VGIC_GRP_CTRL | ||
164 | Attributes: | ||
165 | KVM_DEV_ARM_VGIC_CTRL_INIT | ||
166 | request the initialization of the VGIC, no additional parameter in | ||
167 | kvm_device_attr.addr. | ||
168 | Errors: | ||
169 | -ENXIO: VGIC not properly configured as required prior to calling | ||
170 | this attribute | ||
171 | -ENODEV: no online VCPU | ||
172 | -ENOMEM: memory shortage when allocating vgic internal data | ||
173 | |||
174 | |||
175 | KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO | ||
176 | Attributes: | ||
177 | The attr field of kvm_device_attr encodes the following values: | ||
178 | bits: | 63 .... 32 | 31 .... 10 | 9 .... 0 | | ||
179 | values: | mpidr | info | vINTID | | ||
180 | |||
181 | The vINTID specifies which set of IRQs is reported on. | ||
182 | |||
183 | The info field specifies which information userspace wants to get or set | ||
184 | using this interface. Currently we support the following info values: | ||
185 | |||
186 | VGIC_LEVEL_INFO_LINE_LEVEL: | ||
187 | Get/Set the input level of the IRQ line for a set of 32 contiguously | ||
188 | numbered interrupts. | ||
189 | vINTID must be a multiple of 32. | ||
190 | |||
191 | kvm_device_attr.addr points to a __u32 value which will contain a | ||
192 | bitmap where a set bit means the interrupt level is asserted. | ||
193 | |||
194 | Bit[n] indicates the status for interrupt vINTID + n. | ||
195 | |||
196 | SGIs and any interrupt with a higher ID than the number of interrupts | ||
197 | supported, will be RAZ/WI. LPIs are always edge-triggered and are | ||
198 | therefore not supported by this interface. | ||
199 | |||
200 | PPIs are reported per VCPU as specified in the mpidr field, and SPIs are | ||
201 | reported with the same value regardless of the mpidr specified. | ||
202 | |||
203 | The mpidr field encodes the CPU ID based on the affinity information in the | ||
204 | architecture defined MPIDR, and the field is encoded as follows: | ||
205 | | 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 | | ||
206 | | Aff3 | Aff2 | Aff1 | Aff0 | | ||
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt index 89182f80cc7f..76e61c883347 100644 --- a/Documentation/virtual/kvm/devices/arm-vgic.txt +++ b/Documentation/virtual/kvm/devices/arm-vgic.txt | |||
@@ -1,24 +1,19 @@ | |||
1 | ARM Virtual Generic Interrupt Controller (VGIC) | 1 | ARM Virtual Generic Interrupt Controller v2 (VGIC) |
2 | =============================================== | 2 | ================================================== |
3 | 3 | ||
4 | Device types supported: | 4 | Device types supported: |
5 | KVM_DEV_TYPE_ARM_VGIC_V2 ARM Generic Interrupt Controller v2.0 | 5 | KVM_DEV_TYPE_ARM_VGIC_V2 ARM Generic Interrupt Controller v2.0 |
6 | KVM_DEV_TYPE_ARM_VGIC_V3 ARM Generic Interrupt Controller v3.0 | ||
7 | KVM_DEV_TYPE_ARM_VGIC_ITS ARM Interrupt Translation Service Controller | ||
8 | 6 | ||
9 | Only one VGIC instance of the V2/V3 types above may be instantiated through | 7 | Only one VGIC instance may be instantiated through either this API or the |
10 | either this API or the legacy KVM_CREATE_IRQCHIP api. The created VGIC will | 8 | legacy KVM_CREATE_IRQCHIP API. The created VGIC will act as the VM interrupt |
11 | act as the VM interrupt controller, requiring emulated user-space devices to | 9 | controller, requiring emulated user-space devices to inject interrupts to the |
12 | inject interrupts to the VGIC instead of directly to CPUs. | 10 | VGIC instead of directly to CPUs. |
13 | 11 | ||
14 | Creating a guest GICv3 device requires a host GICv3 as well. | 12 | GICv3 implementations with hardware compatibility support allow creating a |
15 | GICv3 implementations with hardware compatibility support allow a guest GICv2 | 13 | guest GICv2 through this interface. For information on creating a guest GICv3 |
16 | as well. | 14 | device and guest ITS devices, see arm-vgic-v3.txt. It is not possible to |
15 | create both a GICv3 and GICv2 device on the same VM. | ||
17 | 16 | ||
18 | Creating a virtual ITS controller requires a host GICv3 (but does not depend | ||
19 | on having physical ITS controllers). | ||
20 | There can be multiple ITS controllers per guest, each of them has to have | ||
21 | a separate, non-overlapping MMIO region. | ||
22 | 17 | ||
23 | Groups: | 18 | Groups: |
24 | KVM_DEV_ARM_VGIC_GRP_ADDR | 19 | KVM_DEV_ARM_VGIC_GRP_ADDR |
@@ -32,26 +27,13 @@ Groups: | |||
32 | Base address in the guest physical address space of the GIC virtual cpu | 27 | Base address in the guest physical address space of the GIC virtual cpu |
33 | interface register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V2. | 28 | interface register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V2. |
34 | This address needs to be 4K aligned and the region covers 4 KByte. | 29 | This address needs to be 4K aligned and the region covers 4 KByte. |
35 | 30 | Errors: | |
36 | KVM_VGIC_V3_ADDR_TYPE_DIST (rw, 64-bit) | 31 | -E2BIG: Address outside of addressable IPA range |
37 | Base address in the guest physical address space of the GICv3 distributor | 32 | -EINVAL: Incorrectly aligned address |
38 | register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V3. | 33 | -EEXIST: Address already configured |
39 | This address needs to be 64K aligned and the region covers 64 KByte. | 34 | -ENXIO: The group or attribute is unknown/unsupported for this device |
40 | 35 | or hardware support is missing. | |
41 | KVM_VGIC_V3_ADDR_TYPE_REDIST (rw, 64-bit) | 36 | -EFAULT: Invalid user pointer for attr->addr. |
42 | Base address in the guest physical address space of the GICv3 | ||
43 | redistributor register mappings. There are two 64K pages for each | ||
44 | VCPU and all of the redistributor pages are contiguous. | ||
45 | Only valid for KVM_DEV_TYPE_ARM_VGIC_V3. | ||
46 | This address needs to be 64K aligned. | ||
47 | |||
48 | KVM_VGIC_V3_ADDR_TYPE_ITS (rw, 64-bit) | ||
49 | Base address in the guest physical address space of the GICv3 ITS | ||
50 | control register frame. The ITS allows MSI(-X) interrupts to be | ||
51 | injected into guests. This extension is optional. If the kernel | ||
52 | does not support the ITS, the call returns -ENODEV. | ||
53 | Only valid for KVM_DEV_TYPE_ARM_VGIC_ITS. | ||
54 | This address needs to be 64K aligned and the region covers 128K. | ||
55 | 37 | ||
56 | KVM_DEV_ARM_VGIC_GRP_DIST_REGS | 38 | KVM_DEV_ARM_VGIC_GRP_DIST_REGS |
57 | Attributes: | 39 | Attributes: |
diff --git a/Documentation/virtual/kvm/devices/vcpu.txt b/Documentation/virtual/kvm/devices/vcpu.txt index c04165868faf..02f50686c418 100644 --- a/Documentation/virtual/kvm/devices/vcpu.txt +++ b/Documentation/virtual/kvm/devices/vcpu.txt | |||
@@ -30,4 +30,6 @@ Returns: -ENODEV: PMUv3 not supported | |||
30 | attribute | 30 | attribute |
31 | -EBUSY: PMUv3 already initialized | 31 | -EBUSY: PMUv3 already initialized |
32 | 32 | ||
33 | Request the initialization of the PMUv3. | 33 | Request the initialization of the PMUv3. This must be done after creating the |
34 | in-kernel irqchip. Creating a PMU with a userspace irqchip is currently not | ||
35 | supported. | ||
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index e08d15184056..1fee657d3827 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h | |||
@@ -22,9 +22,7 @@ | |||
22 | 22 | ||
23 | #include <linux/io.h> | 23 | #include <linux/io.h> |
24 | #include <asm/barrier.h> | 24 | #include <asm/barrier.h> |
25 | 25 | #include <asm/cp15.h> | |
26 | #define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2 | ||
27 | #define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm | ||
28 | 26 | ||
29 | #define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1) | 27 | #define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1) |
30 | #define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1) | 28 | #define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1) |
@@ -98,65 +96,135 @@ | |||
98 | #define ICH_AP1R2 __AP1Rx(2) | 96 | #define ICH_AP1R2 __AP1Rx(2) |
99 | #define ICH_AP1R3 __AP1Rx(3) | 97 | #define ICH_AP1R3 __AP1Rx(3) |
100 | 98 | ||
99 | /* A32-to-A64 mappings used by VGIC save/restore */ | ||
100 | |||
101 | #define CPUIF_MAP(a32, a64) \ | ||
102 | static inline void write_ ## a64(u32 val) \ | ||
103 | { \ | ||
104 | write_sysreg(val, a32); \ | ||
105 | } \ | ||
106 | static inline u32 read_ ## a64(void) \ | ||
107 | { \ | ||
108 | return read_sysreg(a32); \ | ||
109 | } \ | ||
110 | |||
111 | #define CPUIF_MAP_LO_HI(a32lo, a32hi, a64) \ | ||
112 | static inline void write_ ## a64(u64 val) \ | ||
113 | { \ | ||
114 | write_sysreg(lower_32_bits(val), a32lo);\ | ||
115 | write_sysreg(upper_32_bits(val), a32hi);\ | ||
116 | } \ | ||
117 | static inline u64 read_ ## a64(void) \ | ||
118 | { \ | ||
119 | u64 val = read_sysreg(a32lo); \ | ||
120 | \ | ||
121 | val |= (u64)read_sysreg(a32hi) << 32; \ | ||
122 | \ | ||
123 | return val; \ | ||
124 | } | ||
125 | |||
126 | CPUIF_MAP(ICH_HCR, ICH_HCR_EL2) | ||
127 | CPUIF_MAP(ICH_VTR, ICH_VTR_EL2) | ||
128 | CPUIF_MAP(ICH_MISR, ICH_MISR_EL2) | ||
129 | CPUIF_MAP(ICH_EISR, ICH_EISR_EL2) | ||
130 | CPUIF_MAP(ICH_ELSR, ICH_ELSR_EL2) | ||
131 | CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2) | ||
132 | CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2) | ||
133 | CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2) | ||
134 | CPUIF_MAP(ICH_AP0R1, ICH_AP0R1_EL2) | ||
135 | CPUIF_MAP(ICH_AP0R0, ICH_AP0R0_EL2) | ||
136 | CPUIF_MAP(ICH_AP1R3, ICH_AP1R3_EL2) | ||
137 | CPUIF_MAP(ICH_AP1R2, ICH_AP1R2_EL2) | ||
138 | CPUIF_MAP(ICH_AP1R1, ICH_AP1R1_EL2) | ||
139 | CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2) | ||
140 | CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2) | ||
141 | CPUIF_MAP(ICC_SRE, ICC_SRE_EL1) | ||
142 | |||
143 | CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2) | ||
144 | CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2) | ||
145 | CPUIF_MAP_LO_HI(ICH_LR13, ICH_LRC13, ICH_LR13_EL2) | ||
146 | CPUIF_MAP_LO_HI(ICH_LR12, ICH_LRC12, ICH_LR12_EL2) | ||
147 | CPUIF_MAP_LO_HI(ICH_LR11, ICH_LRC11, ICH_LR11_EL2) | ||
148 | CPUIF_MAP_LO_HI(ICH_LR10, ICH_LRC10, ICH_LR10_EL2) | ||
149 | CPUIF_MAP_LO_HI(ICH_LR9, ICH_LRC9, ICH_LR9_EL2) | ||
150 | CPUIF_MAP_LO_HI(ICH_LR8, ICH_LRC8, ICH_LR8_EL2) | ||
151 | CPUIF_MAP_LO_HI(ICH_LR7, ICH_LRC7, ICH_LR7_EL2) | ||
152 | CPUIF_MAP_LO_HI(ICH_LR6, ICH_LRC6, ICH_LR6_EL2) | ||
153 | CPUIF_MAP_LO_HI(ICH_LR5, ICH_LRC5, ICH_LR5_EL2) | ||
154 | CPUIF_MAP_LO_HI(ICH_LR4, ICH_LRC4, ICH_LR4_EL2) | ||
155 | CPUIF_MAP_LO_HI(ICH_LR3, ICH_LRC3, ICH_LR3_EL2) | ||
156 | CPUIF_MAP_LO_HI(ICH_LR2, ICH_LRC2, ICH_LR2_EL2) | ||
157 | CPUIF_MAP_LO_HI(ICH_LR1, ICH_LRC1, ICH_LR1_EL2) | ||
158 | CPUIF_MAP_LO_HI(ICH_LR0, ICH_LRC0, ICH_LR0_EL2) | ||
159 | |||
160 | #define read_gicreg(r) read_##r() | ||
161 | #define write_gicreg(v, r) write_##r(v) | ||
162 | |||
101 | /* Low-level accessors */ | 163 | /* Low-level accessors */ |
102 | 164 | ||
103 | static inline void gic_write_eoir(u32 irq) | 165 | static inline void gic_write_eoir(u32 irq) |
104 | { | 166 | { |
105 | asm volatile("mcr " __stringify(ICC_EOIR1) : : "r" (irq)); | 167 | write_sysreg(irq, ICC_EOIR1); |
106 | isb(); | 168 | isb(); |
107 | } | 169 | } |
108 | 170 | ||
109 | static inline void gic_write_dir(u32 val) | 171 | static inline void gic_write_dir(u32 val) |
110 | { | 172 | { |
111 | asm volatile("mcr " __stringify(ICC_DIR) : : "r" (val)); | 173 | write_sysreg(val, ICC_DIR); |
112 | isb(); | 174 | isb(); |
113 | } | 175 | } |
114 | 176 | ||
115 | static inline u32 gic_read_iar(void) | 177 | static inline u32 gic_read_iar(void) |
116 | { | 178 | { |
117 | u32 irqstat; | 179 | u32 irqstat = read_sysreg(ICC_IAR1); |
118 | 180 | ||
119 | asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat)); | ||
120 | dsb(sy); | 181 | dsb(sy); |
182 | |||
121 | return irqstat; | 183 | return irqstat; |
122 | } | 184 | } |
123 | 185 | ||
124 | static inline void gic_write_pmr(u32 val) | 186 | static inline void gic_write_pmr(u32 val) |
125 | { | 187 | { |
126 | asm volatile("mcr " __stringify(ICC_PMR) : : "r" (val)); | 188 | write_sysreg(val, ICC_PMR); |
127 | } | 189 | } |
128 | 190 | ||
129 | static inline void gic_write_ctlr(u32 val) | 191 | static inline void gic_write_ctlr(u32 val) |
130 | { | 192 | { |
131 | asm volatile("mcr " __stringify(ICC_CTLR) : : "r" (val)); | 193 | write_sysreg(val, ICC_CTLR); |
132 | isb(); | 194 | isb(); |
133 | } | 195 | } |
134 | 196 | ||
135 | static inline void gic_write_grpen1(u32 val) | 197 | static inline void gic_write_grpen1(u32 val) |
136 | { | 198 | { |
137 | asm volatile("mcr " __stringify(ICC_IGRPEN1) : : "r" (val)); | 199 | write_sysreg(val, ICC_IGRPEN1); |
138 | isb(); | 200 | isb(); |
139 | } | 201 | } |
140 | 202 | ||
141 | static inline void gic_write_sgi1r(u64 val) | 203 | static inline void gic_write_sgi1r(u64 val) |
142 | { | 204 | { |
143 | asm volatile("mcrr " __stringify(ICC_SGI1R) : : "r" (val)); | 205 | write_sysreg(val, ICC_SGI1R); |
144 | } | 206 | } |
145 | 207 | ||
146 | static inline u32 gic_read_sre(void) | 208 | static inline u32 gic_read_sre(void) |
147 | { | 209 | { |
148 | u32 val; | 210 | return read_sysreg(ICC_SRE); |
149 | |||
150 | asm volatile("mrc " __stringify(ICC_SRE) : "=r" (val)); | ||
151 | return val; | ||
152 | } | 211 | } |
153 | 212 | ||
154 | static inline void gic_write_sre(u32 val) | 213 | static inline void gic_write_sre(u32 val) |
155 | { | 214 | { |
156 | asm volatile("mcr " __stringify(ICC_SRE) : : "r" (val)); | 215 | write_sysreg(val, ICC_SRE); |
157 | isb(); | 216 | isb(); |
158 | } | 217 | } |
159 | 218 | ||
219 | static inline void gic_write_bpr1(u32 val) | ||
220 | { | ||
221 | #if defined(__write_sysreg) && defined(ICC_BPR1) | ||
222 | write_sysreg(val, ICC_BPR1); | ||
223 | #else | ||
224 | asm volatile("mcr " __stringify(ICC_BPR1) : : "r" (val)); | ||
225 | #endif | ||
226 | } | ||
227 | |||
160 | /* | 228 | /* |
161 | * Even in 32bit systems that use LPAE, there is no guarantee that the I/O | 229 | * Even in 32bit systems that use LPAE, there is no guarantee that the I/O |
162 | * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't | 230 | * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't |
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index c3f11524f10c..dbdbce1b3a72 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h | |||
@@ -49,6 +49,21 @@ | |||
49 | 49 | ||
50 | #ifdef CONFIG_CPU_CP15 | 50 | #ifdef CONFIG_CPU_CP15 |
51 | 51 | ||
52 | #define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ | ||
53 | "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 | ||
54 | #define __ACCESS_CP15_64(Op1, CRm) \ | ||
55 | "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 | ||
56 | |||
57 | #define __read_sysreg(r, w, c, t) ({ \ | ||
58 | t __val; \ | ||
59 | asm volatile(r " " c : "=r" (__val)); \ | ||
60 | __val; \ | ||
61 | }) | ||
62 | #define read_sysreg(...) __read_sysreg(__VA_ARGS__) | ||
63 | |||
64 | #define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) | ||
65 | #define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) | ||
66 | |||
52 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ | 67 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ |
53 | 68 | ||
54 | static inline unsigned long get_cr(void) | 69 | static inline unsigned long get_cr(void) |
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 1ee94c716a7f..e2d94c1b07b8 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h | |||
@@ -55,6 +55,7 @@ | |||
55 | 55 | ||
56 | #define MPIDR_LEVEL_BITS 8 | 56 | #define MPIDR_LEVEL_BITS 8 |
57 | #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) | 57 | #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) |
58 | #define MPIDR_LEVEL_SHIFT(level) (MPIDR_LEVEL_BITS * level) | ||
58 | 59 | ||
59 | #define MPIDR_AFFINITY_LEVEL(mpidr, level) \ | 60 | #define MPIDR_AFFINITY_LEVEL(mpidr, level) \ |
60 | ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) | 61 | ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) |
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 58faff5f1eb2..d7ea6bcb29bf 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h | |||
@@ -21,6 +21,10 @@ | |||
21 | 21 | ||
22 | #include <asm/virt.h> | 22 | #include <asm/virt.h> |
23 | 23 | ||
24 | #define ARM_EXIT_WITH_ABORT_BIT 31 | ||
25 | #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT)) | ||
26 | #define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT)) | ||
27 | |||
24 | #define ARM_EXCEPTION_RESET 0 | 28 | #define ARM_EXCEPTION_RESET 0 |
25 | #define ARM_EXCEPTION_UNDEFINED 1 | 29 | #define ARM_EXCEPTION_UNDEFINED 1 |
26 | #define ARM_EXCEPTION_SOFTWARE 2 | 30 | #define ARM_EXCEPTION_SOFTWARE 2 |
@@ -68,6 +72,9 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); | |||
68 | extern void __init_stage2_translation(void); | 72 | extern void __init_stage2_translation(void); |
69 | 73 | ||
70 | extern void __kvm_hyp_reset(unsigned long); | 74 | extern void __kvm_hyp_reset(unsigned long); |
75 | |||
76 | extern u64 __vgic_v3_get_ich_vtr_el2(void); | ||
77 | extern void __vgic_v3_init_lrs(void); | ||
71 | #endif | 78 | #endif |
72 | 79 | ||
73 | #endif /* __ARM_KVM_ASM_H__ */ | 80 | #endif /* __ARM_KVM_ASM_H__ */ |
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index ee5328fc4b06..9a8a45aaf19a 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -40,18 +40,29 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, | |||
40 | *vcpu_reg(vcpu, reg_num) = val; | 40 | *vcpu_reg(vcpu, reg_num) = val; |
41 | } | 41 | } |
42 | 42 | ||
43 | bool kvm_condition_valid(struct kvm_vcpu *vcpu); | 43 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); |
44 | void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); | 44 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); |
45 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); | 45 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); |
46 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); | ||
46 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | 47 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
47 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | 48 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); |
48 | 49 | ||
50 | static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) | ||
51 | { | ||
52 | return kvm_condition_valid32(vcpu); | ||
53 | } | ||
54 | |||
55 | static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) | ||
56 | { | ||
57 | kvm_skip_instr32(vcpu, is_wide_instr); | ||
58 | } | ||
59 | |||
49 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) | 60 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) |
50 | { | 61 | { |
51 | vcpu->arch.hcr = HCR_GUEST_MASK; | 62 | vcpu->arch.hcr = HCR_GUEST_MASK; |
52 | } | 63 | } |
53 | 64 | ||
54 | static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) | 65 | static inline unsigned long vcpu_get_hcr(const struct kvm_vcpu *vcpu) |
55 | { | 66 | { |
56 | return vcpu->arch.hcr; | 67 | return vcpu->arch.hcr; |
57 | } | 68 | } |
@@ -61,7 +72,7 @@ static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr) | |||
61 | vcpu->arch.hcr = hcr; | 72 | vcpu->arch.hcr = hcr; |
62 | } | 73 | } |
63 | 74 | ||
64 | static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) | 75 | static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) |
65 | { | 76 | { |
66 | return 1; | 77 | return 1; |
67 | } | 78 | } |
@@ -71,9 +82,9 @@ static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) | |||
71 | return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc; | 82 | return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc; |
72 | } | 83 | } |
73 | 84 | ||
74 | static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) | 85 | static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) |
75 | { | 86 | { |
76 | return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; | 87 | return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; |
77 | } | 88 | } |
78 | 89 | ||
79 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) | 90 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) |
@@ -93,11 +104,21 @@ static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) | |||
93 | return cpsr_mode > USR_MODE;; | 104 | return cpsr_mode > USR_MODE;; |
94 | } | 105 | } |
95 | 106 | ||
96 | static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) | 107 | static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) |
97 | { | 108 | { |
98 | return vcpu->arch.fault.hsr; | 109 | return vcpu->arch.fault.hsr; |
99 | } | 110 | } |
100 | 111 | ||
112 | static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) | ||
113 | { | ||
114 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | ||
115 | |||
116 | if (hsr & HSR_CV) | ||
117 | return (hsr & HSR_COND) >> HSR_COND_SHIFT; | ||
118 | |||
119 | return -1; | ||
120 | } | ||
121 | |||
101 | static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) | 122 | static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) |
102 | { | 123 | { |
103 | return vcpu->arch.fault.hxfar; | 124 | return vcpu->arch.fault.hxfar; |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 6ad21f04a922..2d19e02d03fd 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -39,7 +39,12 @@ | |||
39 | 39 | ||
40 | #include <kvm/arm_vgic.h> | 40 | #include <kvm/arm_vgic.h> |
41 | 41 | ||
42 | |||
43 | #ifdef CONFIG_ARM_GIC_V3 | ||
44 | #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS | ||
45 | #else | ||
42 | #define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS | 46 | #define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS |
47 | #endif | ||
43 | 48 | ||
44 | #define KVM_REQ_VCPU_EXIT 8 | 49 | #define KVM_REQ_VCPU_EXIT 8 |
45 | 50 | ||
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index 6eaff28f2ff3..343135ede5fa 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h | |||
@@ -20,28 +20,15 @@ | |||
20 | 20 | ||
21 | #include <linux/compiler.h> | 21 | #include <linux/compiler.h> |
22 | #include <linux/kvm_host.h> | 22 | #include <linux/kvm_host.h> |
23 | #include <asm/cp15.h> | ||
23 | #include <asm/kvm_mmu.h> | 24 | #include <asm/kvm_mmu.h> |
24 | #include <asm/vfp.h> | 25 | #include <asm/vfp.h> |
25 | 26 | ||
26 | #define __hyp_text __section(.hyp.text) notrace | 27 | #define __hyp_text __section(.hyp.text) notrace |
27 | 28 | ||
28 | #define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ | ||
29 | "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 | ||
30 | #define __ACCESS_CP15_64(Op1, CRm) \ | ||
31 | "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 | ||
32 | #define __ACCESS_VFP(CRn) \ | 29 | #define __ACCESS_VFP(CRn) \ |
33 | "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32 | 30 | "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32 |
34 | 31 | ||
35 | #define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) | ||
36 | #define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) | ||
37 | |||
38 | #define __read_sysreg(r, w, c, t) ({ \ | ||
39 | t __val; \ | ||
40 | asm volatile(r " " c : "=r" (__val)); \ | ||
41 | __val; \ | ||
42 | }) | ||
43 | #define read_sysreg(...) __read_sysreg(__VA_ARGS__) | ||
44 | |||
45 | #define write_special(v, r) \ | 32 | #define write_special(v, r) \ |
46 | asm volatile("msr " __stringify(r) ", %0" : : "r" (v)) | 33 | asm volatile("msr " __stringify(r) ", %0" : : "r" (v)) |
47 | #define read_special(r) ({ \ | 34 | #define read_special(r) ({ \ |
@@ -119,6 +106,9 @@ void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); | |||
119 | void __sysreg_save_state(struct kvm_cpu_context *ctxt); | 106 | void __sysreg_save_state(struct kvm_cpu_context *ctxt); |
120 | void __sysreg_restore_state(struct kvm_cpu_context *ctxt); | 107 | void __sysreg_restore_state(struct kvm_cpu_context *ctxt); |
121 | 108 | ||
109 | void __vgic_v3_save_state(struct kvm_vcpu *vcpu); | ||
110 | void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); | ||
111 | |||
122 | void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp); | 112 | void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp); |
123 | void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp); | 113 | void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp); |
124 | static inline bool __vfp_enabled(void) | 114 | static inline bool __vfp_enabled(void) |
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 3bb803d6814b..74a44727f8e1 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -63,37 +63,13 @@ void kvm_clear_hyp_idmap(void); | |||
63 | static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd) | 63 | static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd) |
64 | { | 64 | { |
65 | *pmd = new_pmd; | 65 | *pmd = new_pmd; |
66 | flush_pmd_entry(pmd); | 66 | dsb(ishst); |
67 | } | 67 | } |
68 | 68 | ||
69 | static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) | 69 | static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) |
70 | { | 70 | { |
71 | *pte = new_pte; | 71 | *pte = new_pte; |
72 | /* | 72 | dsb(ishst); |
73 | * flush_pmd_entry just takes a void pointer and cleans the necessary | ||
74 | * cache entries, so we can reuse the function for ptes. | ||
75 | */ | ||
76 | flush_pmd_entry(pte); | ||
77 | } | ||
78 | |||
79 | static inline void kvm_clean_pgd(pgd_t *pgd) | ||
80 | { | ||
81 | clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); | ||
82 | } | ||
83 | |||
84 | static inline void kvm_clean_pmd(pmd_t *pmd) | ||
85 | { | ||
86 | clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t)); | ||
87 | } | ||
88 | |||
89 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) | ||
90 | { | ||
91 | clean_pmd_entry(pmd); | ||
92 | } | ||
93 | |||
94 | static inline void kvm_clean_pte(pte_t *pte) | ||
95 | { | ||
96 | clean_pte_table(pte); | ||
97 | } | 73 | } |
98 | 74 | ||
99 | static inline pte_t kvm_s2pte_mkwrite(pte_t pte) | 75 | static inline pte_t kvm_s2pte_mkwrite(pte_t pte) |
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index a2b3eb313a25..b38c10c73579 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h | |||
@@ -84,6 +84,13 @@ struct kvm_regs { | |||
84 | #define KVM_VGIC_V2_DIST_SIZE 0x1000 | 84 | #define KVM_VGIC_V2_DIST_SIZE 0x1000 |
85 | #define KVM_VGIC_V2_CPU_SIZE 0x2000 | 85 | #define KVM_VGIC_V2_CPU_SIZE 0x2000 |
86 | 86 | ||
87 | /* Supported VGICv3 address types */ | ||
88 | #define KVM_VGIC_V3_ADDR_TYPE_DIST 2 | ||
89 | #define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 | ||
90 | |||
91 | #define KVM_VGIC_V3_DIST_SIZE SZ_64K | ||
92 | #define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) | ||
93 | |||
87 | #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ | 94 | #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ |
88 | #define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */ | 95 | #define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */ |
89 | 96 | ||
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index 10d77a66cad5..f19842ea5418 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile | |||
@@ -21,13 +21,16 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/ | |||
21 | obj-y += kvm-arm.o init.o interrupts.o | 21 | obj-y += kvm-arm.o init.o interrupts.o |
22 | obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o | 22 | obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o |
23 | obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o | 23 | obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o |
24 | obj-y += $(KVM)/arm/aarch32.o | ||
24 | 25 | ||
25 | obj-y += $(KVM)/arm/vgic/vgic.o | 26 | obj-y += $(KVM)/arm/vgic/vgic.o |
26 | obj-y += $(KVM)/arm/vgic/vgic-init.o | 27 | obj-y += $(KVM)/arm/vgic/vgic-init.o |
27 | obj-y += $(KVM)/arm/vgic/vgic-irqfd.o | 28 | obj-y += $(KVM)/arm/vgic/vgic-irqfd.o |
28 | obj-y += $(KVM)/arm/vgic/vgic-v2.o | 29 | obj-y += $(KVM)/arm/vgic/vgic-v2.o |
30 | obj-y += $(KVM)/arm/vgic/vgic-v3.o | ||
29 | obj-y += $(KVM)/arm/vgic/vgic-mmio.o | 31 | obj-y += $(KVM)/arm/vgic/vgic-mmio.o |
30 | obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o | 32 | obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o |
33 | obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o | ||
31 | obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o | 34 | obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o |
32 | obj-y += $(KVM)/irqchip.o | 35 | obj-y += $(KVM)/irqchip.o |
33 | obj-y += $(KVM)/arm/arch_timer.o | 36 | obj-y += $(KVM)/arm/arch_timer.o |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index c638935baad6..8a4a5637fab1 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -1188,6 +1188,10 @@ static int init_common_resources(void) | |||
1188 | return -ENOMEM; | 1188 | return -ENOMEM; |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | /* set size of VMID supported by CPU */ | ||
1192 | kvm_vmid_bits = kvm_get_vmid_bits(); | ||
1193 | kvm_info("%d-bit VMID\n", kvm_vmid_bits); | ||
1194 | |||
1191 | return 0; | 1195 | return 0; |
1192 | } | 1196 | } |
1193 | 1197 | ||
@@ -1253,10 +1257,6 @@ static void teardown_hyp_mode(void) | |||
1253 | 1257 | ||
1254 | static int init_vhe_mode(void) | 1258 | static int init_vhe_mode(void) |
1255 | { | 1259 | { |
1256 | /* set size of VMID supported by CPU */ | ||
1257 | kvm_vmid_bits = kvm_get_vmid_bits(); | ||
1258 | kvm_info("%d-bit VMID\n", kvm_vmid_bits); | ||
1259 | |||
1260 | kvm_info("VHE mode initialized successfully\n"); | 1260 | kvm_info("VHE mode initialized successfully\n"); |
1261 | return 0; | 1261 | return 0; |
1262 | } | 1262 | } |
@@ -1340,10 +1340,6 @@ static int init_hyp_mode(void) | |||
1340 | } | 1340 | } |
1341 | } | 1341 | } |
1342 | 1342 | ||
1343 | /* set size of VMID supported by CPU */ | ||
1344 | kvm_vmid_bits = kvm_get_vmid_bits(); | ||
1345 | kvm_info("%d-bit VMID\n", kvm_vmid_bits); | ||
1346 | |||
1347 | kvm_info("Hyp mode initialized successfully\n"); | 1343 | kvm_info("Hyp mode initialized successfully\n"); |
1348 | 1344 | ||
1349 | return 0; | 1345 | return 0; |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 1bb2b79c01ff..3e5e4194ef86 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -228,6 +228,35 @@ bool access_vm_reg(struct kvm_vcpu *vcpu, | |||
228 | return true; | 228 | return true; |
229 | } | 229 | } |
230 | 230 | ||
231 | static bool access_gic_sgi(struct kvm_vcpu *vcpu, | ||
232 | const struct coproc_params *p, | ||
233 | const struct coproc_reg *r) | ||
234 | { | ||
235 | u64 reg; | ||
236 | |||
237 | if (!p->is_write) | ||
238 | return read_from_write_only(vcpu, p); | ||
239 | |||
240 | reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32; | ||
241 | reg |= *vcpu_reg(vcpu, p->Rt1) ; | ||
242 | |||
243 | vgic_v3_dispatch_sgi(vcpu, reg); | ||
244 | |||
245 | return true; | ||
246 | } | ||
247 | |||
248 | static bool access_gic_sre(struct kvm_vcpu *vcpu, | ||
249 | const struct coproc_params *p, | ||
250 | const struct coproc_reg *r) | ||
251 | { | ||
252 | if (p->is_write) | ||
253 | return ignore_write(vcpu, p); | ||
254 | |||
255 | *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; | ||
256 | |||
257 | return true; | ||
258 | } | ||
259 | |||
231 | /* | 260 | /* |
232 | * We could trap ID_DFR0 and tell the guest we don't support performance | 261 | * We could trap ID_DFR0 and tell the guest we don't support performance |
233 | * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was | 262 | * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was |
@@ -361,10 +390,16 @@ static const struct coproc_reg cp15_regs[] = { | |||
361 | { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32, | 390 | { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32, |
362 | access_vm_reg, reset_unknown, c10_AMAIR1}, | 391 | access_vm_reg, reset_unknown, c10_AMAIR1}, |
363 | 392 | ||
393 | /* ICC_SGI1R */ | ||
394 | { CRm64(12), Op1( 0), is64, access_gic_sgi}, | ||
395 | |||
364 | /* VBAR: swapped by interrupt.S. */ | 396 | /* VBAR: swapped by interrupt.S. */ |
365 | { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, | 397 | { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, |
366 | NULL, reset_val, c12_VBAR, 0x00000000 }, | 398 | NULL, reset_val, c12_VBAR, 0x00000000 }, |
367 | 399 | ||
400 | /* ICC_SRE */ | ||
401 | { CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre }, | ||
402 | |||
368 | /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */ | 403 | /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */ |
369 | { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32, | 404 | { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32, |
370 | access_vm_reg, reset_val, c13_CID, 0x00000000 }, | 405 | access_vm_reg, reset_val, c13_CID, 0x00000000 }, |
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index af93e3ffc9f3..0064b86a2c87 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c | |||
@@ -161,105 +161,6 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) | |||
161 | } | 161 | } |
162 | } | 162 | } |
163 | 163 | ||
164 | /* | ||
165 | * A conditional instruction is allowed to trap, even though it | ||
166 | * wouldn't be executed. So let's re-implement the hardware, in | ||
167 | * software! | ||
168 | */ | ||
169 | bool kvm_condition_valid(struct kvm_vcpu *vcpu) | ||
170 | { | ||
171 | unsigned long cpsr, cond, insn; | ||
172 | |||
173 | /* | ||
174 | * Exception Code 0 can only happen if we set HCR.TGE to 1, to | ||
175 | * catch undefined instructions, and then we won't get past | ||
176 | * the arm_exit_handlers test anyway. | ||
177 | */ | ||
178 | BUG_ON(!kvm_vcpu_trap_get_class(vcpu)); | ||
179 | |||
180 | /* Top two bits non-zero? Unconditional. */ | ||
181 | if (kvm_vcpu_get_hsr(vcpu) >> 30) | ||
182 | return true; | ||
183 | |||
184 | cpsr = *vcpu_cpsr(vcpu); | ||
185 | |||
186 | /* Is condition field valid? */ | ||
187 | if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT) | ||
188 | cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT; | ||
189 | else { | ||
190 | /* This can happen in Thumb mode: examine IT state. */ | ||
191 | unsigned long it; | ||
192 | |||
193 | it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); | ||
194 | |||
195 | /* it == 0 => unconditional. */ | ||
196 | if (it == 0) | ||
197 | return true; | ||
198 | |||
199 | /* The cond for this insn works out as the top 4 bits. */ | ||
200 | cond = (it >> 4); | ||
201 | } | ||
202 | |||
203 | /* Shift makes it look like an ARM-mode instruction */ | ||
204 | insn = cond << 28; | ||
205 | return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block | ||
210 | * @vcpu: The VCPU pointer | ||
211 | * | ||
212 | * When exceptions occur while instructions are executed in Thumb IF-THEN | ||
213 | * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have | ||
214 | * to do this little bit of work manually. The fields map like this: | ||
215 | * | ||
216 | * IT[7:0] -> CPSR[26:25],CPSR[15:10] | ||
217 | */ | ||
218 | static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) | ||
219 | { | ||
220 | unsigned long itbits, cond; | ||
221 | unsigned long cpsr = *vcpu_cpsr(vcpu); | ||
222 | bool is_arm = !(cpsr & PSR_T_BIT); | ||
223 | |||
224 | BUG_ON(is_arm && (cpsr & PSR_IT_MASK)); | ||
225 | |||
226 | if (!(cpsr & PSR_IT_MASK)) | ||
227 | return; | ||
228 | |||
229 | cond = (cpsr & 0xe000) >> 13; | ||
230 | itbits = (cpsr & 0x1c00) >> (10 - 2); | ||
231 | itbits |= (cpsr & (0x3 << 25)) >> 25; | ||
232 | |||
233 | /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */ | ||
234 | if ((itbits & 0x7) == 0) | ||
235 | itbits = cond = 0; | ||
236 | else | ||
237 | itbits = (itbits << 1) & 0x1f; | ||
238 | |||
239 | cpsr &= ~PSR_IT_MASK; | ||
240 | cpsr |= cond << 13; | ||
241 | cpsr |= (itbits & 0x1c) << (10 - 2); | ||
242 | cpsr |= (itbits & 0x3) << 25; | ||
243 | *vcpu_cpsr(vcpu) = cpsr; | ||
244 | } | ||
245 | |||
246 | /** | ||
247 | * kvm_skip_instr - skip a trapped instruction and proceed to the next | ||
248 | * @vcpu: The vcpu pointer | ||
249 | */ | ||
250 | void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) | ||
251 | { | ||
252 | bool is_thumb; | ||
253 | |||
254 | is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT); | ||
255 | if (is_thumb && !is_wide_instr) | ||
256 | *vcpu_pc(vcpu) += 2; | ||
257 | else | ||
258 | *vcpu_pc(vcpu) += 4; | ||
259 | kvm_adjust_itstate(vcpu); | ||
260 | } | ||
261 | |||
262 | |||
263 | /****************************************************************************** | 164 | /****************************************************************************** |
264 | * Inject exceptions into the guest | 165 | * Inject exceptions into the guest |
265 | */ | 166 | */ |
@@ -402,3 +303,15 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | |||
402 | { | 303 | { |
403 | inject_abt(vcpu, true, addr); | 304 | inject_abt(vcpu, true, addr); |
404 | } | 305 | } |
306 | |||
307 | /** | ||
308 | * kvm_inject_vabt - inject an async abort / SError into the guest | ||
309 | * @vcpu: The VCPU to receive the exception | ||
310 | * | ||
311 | * It is assumed that this code is called from the VCPU thread and that the | ||
312 | * VCPU therefore is not currently executing guest code. | ||
313 | */ | ||
314 | void kvm_inject_vabt(struct kvm_vcpu *vcpu) | ||
315 | { | ||
316 | vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VA); | ||
317 | } | ||
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 3f1ef0dbc899..4e40d1955e35 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c | |||
@@ -28,14 +28,6 @@ | |||
28 | 28 | ||
29 | typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); | 29 | typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); |
30 | 30 | ||
31 | static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
32 | { | ||
33 | /* SVC called from Hyp mode should never get here */ | ||
34 | kvm_debug("SVC called from Hyp mode shouldn't go here\n"); | ||
35 | BUG(); | ||
36 | return -EINVAL; /* Squash warning */ | ||
37 | } | ||
38 | |||
39 | static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | 31 | static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) |
40 | { | 32 | { |
41 | int ret; | 33 | int ret; |
@@ -59,22 +51,6 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
59 | return 1; | 51 | return 1; |
60 | } | 52 | } |
61 | 53 | ||
62 | static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
63 | { | ||
64 | /* The hypervisor should never cause aborts */ | ||
65 | kvm_err("Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n", | ||
66 | kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu)); | ||
67 | return -EFAULT; | ||
68 | } | ||
69 | |||
70 | static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
71 | { | ||
72 | /* This is either an error in the ws. code or an external abort */ | ||
73 | kvm_err("Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n", | ||
74 | kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu)); | ||
75 | return -EFAULT; | ||
76 | } | ||
77 | |||
78 | /** | 54 | /** |
79 | * kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests | 55 | * kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests |
80 | * @vcpu: the vcpu pointer | 56 | * @vcpu: the vcpu pointer |
@@ -112,13 +88,10 @@ static exit_handle_fn arm_exit_handlers[] = { | |||
112 | [HSR_EC_CP14_64] = kvm_handle_cp14_access, | 88 | [HSR_EC_CP14_64] = kvm_handle_cp14_access, |
113 | [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, | 89 | [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, |
114 | [HSR_EC_CP10_ID] = kvm_handle_cp10_id, | 90 | [HSR_EC_CP10_ID] = kvm_handle_cp10_id, |
115 | [HSR_EC_SVC_HYP] = handle_svc_hyp, | ||
116 | [HSR_EC_HVC] = handle_hvc, | 91 | [HSR_EC_HVC] = handle_hvc, |
117 | [HSR_EC_SMC] = handle_smc, | 92 | [HSR_EC_SMC] = handle_smc, |
118 | [HSR_EC_IABT] = kvm_handle_guest_abort, | 93 | [HSR_EC_IABT] = kvm_handle_guest_abort, |
119 | [HSR_EC_IABT_HYP] = handle_pabt_hyp, | ||
120 | [HSR_EC_DABT] = kvm_handle_guest_abort, | 94 | [HSR_EC_DABT] = kvm_handle_guest_abort, |
121 | [HSR_EC_DABT_HYP] = handle_dabt_hyp, | ||
122 | }; | 95 | }; |
123 | 96 | ||
124 | static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) | 97 | static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) |
@@ -144,6 +117,25 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
144 | { | 117 | { |
145 | exit_handle_fn exit_handler; | 118 | exit_handle_fn exit_handler; |
146 | 119 | ||
120 | if (ARM_ABORT_PENDING(exception_index)) { | ||
121 | u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); | ||
122 | |||
123 | /* | ||
124 | * HVC/SMC already have an adjusted PC, which we need | ||
125 | * to correct in order to return to after having | ||
126 | * injected the abort. | ||
127 | */ | ||
128 | if (hsr_ec == HSR_EC_HVC || hsr_ec == HSR_EC_SMC) { | ||
129 | u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2; | ||
130 | *vcpu_pc(vcpu) -= adj; | ||
131 | } | ||
132 | |||
133 | kvm_inject_vabt(vcpu); | ||
134 | return 1; | ||
135 | } | ||
136 | |||
137 | exception_index = ARM_EXCEPTION_CODE(exception_index); | ||
138 | |||
147 | switch (exception_index) { | 139 | switch (exception_index) { |
148 | case ARM_EXCEPTION_IRQ: | 140 | case ARM_EXCEPTION_IRQ: |
149 | return 1; | 141 | return 1; |
@@ -160,6 +152,9 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
160 | exit_handler = kvm_get_exit_handler(vcpu); | 152 | exit_handler = kvm_get_exit_handler(vcpu); |
161 | 153 | ||
162 | return exit_handler(vcpu, run); | 154 | return exit_handler(vcpu, run); |
155 | case ARM_EXCEPTION_DATA_ABORT: | ||
156 | kvm_inject_vabt(vcpu); | ||
157 | return 1; | ||
163 | default: | 158 | default: |
164 | kvm_pr_unimpl("Unsupported exception type: %d", | 159 | kvm_pr_unimpl("Unsupported exception type: %d", |
165 | exception_index); | 160 | exception_index); |
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 8dfa5f7f9290..3023bb530edf 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile | |||
@@ -5,6 +5,7 @@ | |||
5 | KVM=../../../../virt/kvm | 5 | KVM=../../../../virt/kvm |
6 | 6 | ||
7 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o | 7 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o |
8 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o | ||
8 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o | 9 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o |
9 | 10 | ||
10 | obj-$(CONFIG_KVM_ARM_HOST) += tlb.o | 11 | obj-$(CONFIG_KVM_ARM_HOST) += tlb.o |
diff --git a/arch/arm/kvm/hyp/entry.S b/arch/arm/kvm/hyp/entry.S index 21c238871c9e..60783f3b57cc 100644 --- a/arch/arm/kvm/hyp/entry.S +++ b/arch/arm/kvm/hyp/entry.S | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/linkage.h> | 18 | #include <linux/linkage.h> |
19 | #include <asm/asm-offsets.h> | 19 | #include <asm/asm-offsets.h> |
20 | #include <asm/kvm_arm.h> | 20 | #include <asm/kvm_arm.h> |
21 | #include <asm/kvm_asm.h> | ||
21 | 22 | ||
22 | .arch_extension virt | 23 | .arch_extension virt |
23 | 24 | ||
@@ -63,6 +64,36 @@ ENTRY(__guest_exit) | |||
63 | ldr lr, [r0, #4] | 64 | ldr lr, [r0, #4] |
64 | 65 | ||
65 | mov r0, r1 | 66 | mov r0, r1 |
67 | mrs r1, SPSR | ||
68 | mrs r2, ELR_hyp | ||
69 | mrc p15, 4, r3, c5, c2, 0 @ HSR | ||
70 | |||
71 | /* | ||
72 | * Force loads and stores to complete before unmasking aborts | ||
73 | * and forcing the delivery of the exception. This gives us a | ||
74 | * single instruction window, which the handler will try to | ||
75 | * match. | ||
76 | */ | ||
77 | dsb sy | ||
78 | cpsie a | ||
79 | |||
80 | .global abort_guest_exit_start | ||
81 | abort_guest_exit_start: | ||
82 | |||
83 | isb | ||
84 | |||
85 | .global abort_guest_exit_end | ||
86 | abort_guest_exit_end: | ||
87 | |||
88 | /* | ||
89 | * If we took an abort, r0[31] will be set, and cmp will set | ||
90 | * the N bit in PSTATE. | ||
91 | */ | ||
92 | cmp r0, #0 | ||
93 | msrmi SPSR_cxsf, r1 | ||
94 | msrmi ELR_hyp, r2 | ||
95 | mcrmi p15, 4, r3, c5, c2, 0 @ HSR | ||
96 | |||
66 | bx lr | 97 | bx lr |
67 | ENDPROC(__guest_exit) | 98 | ENDPROC(__guest_exit) |
68 | 99 | ||
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S index 78091383a5d9..96beb53934c9 100644 --- a/arch/arm/kvm/hyp/hyp-entry.S +++ b/arch/arm/kvm/hyp/hyp-entry.S | |||
@@ -81,7 +81,6 @@ __kvm_hyp_vector: | |||
81 | invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED | 81 | invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED |
82 | invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE | 82 | invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE |
83 | invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT | 83 | invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT |
84 | invalid_vector hyp_dabt ARM_EXCEPTION_DATA_ABORT | ||
85 | invalid_vector hyp_fiq ARM_EXCEPTION_FIQ | 84 | invalid_vector hyp_fiq ARM_EXCEPTION_FIQ |
86 | 85 | ||
87 | ENTRY(__hyp_do_panic) | 86 | ENTRY(__hyp_do_panic) |
@@ -164,6 +163,21 @@ hyp_irq: | |||
164 | load_vcpu r0 @ Load VCPU pointer to r0 | 163 | load_vcpu r0 @ Load VCPU pointer to r0 |
165 | b __guest_exit | 164 | b __guest_exit |
166 | 165 | ||
166 | hyp_dabt: | ||
167 | push {r0, r1} | ||
168 | mrs r0, ELR_hyp | ||
169 | ldr r1, =abort_guest_exit_start | ||
170 | THUMB( add r1, r1, #1) | ||
171 | cmp r0, r1 | ||
172 | ldrne r1, =abort_guest_exit_end | ||
173 | THUMB( addne r1, r1, #1) | ||
174 | cmpne r0, r1 | ||
175 | pop {r0, r1} | ||
176 | bne __hyp_panic | ||
177 | |||
178 | orr r0, r0, #(1 << ARM_EXIT_WITH_ABORT_BIT) | ||
179 | eret | ||
180 | |||
167 | .ltorg | 181 | .ltorg |
168 | 182 | ||
169 | .popsection | 183 | .popsection |
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index b13caa90cd44..92678b7bd046 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c | |||
@@ -14,6 +14,7 @@ | |||
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
16 | */ | 16 | */ |
17 | #include <linux/jump_label.h> | ||
17 | 18 | ||
18 | #include <asm/kvm_asm.h> | 19 | #include <asm/kvm_asm.h> |
19 | #include <asm/kvm_hyp.h> | 20 | #include <asm/kvm_hyp.h> |
@@ -54,6 +55,15 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) | |||
54 | { | 55 | { |
55 | u32 val; | 56 | u32 val; |
56 | 57 | ||
58 | /* | ||
59 | * If we pended a virtual abort, preserve it until it gets | ||
60 | * cleared. See B1.9.9 (Virtual Abort exception) for details, | ||
61 | * but the crucial bit is the zeroing of HCR.VA in the | ||
62 | * pseudocode. | ||
63 | */ | ||
64 | if (vcpu->arch.hcr & HCR_VA) | ||
65 | vcpu->arch.hcr = read_sysreg(HCR); | ||
66 | |||
57 | write_sysreg(0, HCR); | 67 | write_sysreg(0, HCR); |
58 | write_sysreg(0, HSTR); | 68 | write_sysreg(0, HSTR); |
59 | val = read_sysreg(HDCR); | 69 | val = read_sysreg(HDCR); |
@@ -74,14 +84,21 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) | |||
74 | write_sysreg(read_sysreg(MIDR), VPIDR); | 84 | write_sysreg(read_sysreg(MIDR), VPIDR); |
75 | } | 85 | } |
76 | 86 | ||
87 | |||
77 | static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) | 88 | static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) |
78 | { | 89 | { |
79 | __vgic_v2_save_state(vcpu); | 90 | if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) |
91 | __vgic_v3_save_state(vcpu); | ||
92 | else | ||
93 | __vgic_v2_save_state(vcpu); | ||
80 | } | 94 | } |
81 | 95 | ||
82 | static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) | 96 | static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) |
83 | { | 97 | { |
84 | __vgic_v2_restore_state(vcpu); | 98 | if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) |
99 | __vgic_v3_restore_state(vcpu); | ||
100 | else | ||
101 | __vgic_v2_restore_state(vcpu); | ||
85 | } | 102 | } |
86 | 103 | ||
87 | static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) | 104 | static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) |
@@ -134,7 +151,7 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) | |||
134 | return true; | 151 | return true; |
135 | } | 152 | } |
136 | 153 | ||
137 | static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) | 154 | int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) |
138 | { | 155 | { |
139 | struct kvm_cpu_context *host_ctxt; | 156 | struct kvm_cpu_context *host_ctxt; |
140 | struct kvm_cpu_context *guest_ctxt; | 157 | struct kvm_cpu_context *guest_ctxt; |
@@ -191,8 +208,6 @@ again: | |||
191 | return exit_code; | 208 | return exit_code; |
192 | } | 209 | } |
193 | 210 | ||
194 | __alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu); | ||
195 | |||
196 | static const char * const __hyp_panic_string[] = { | 211 | static const char * const __hyp_panic_string[] = { |
197 | [ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x", | 212 | [ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x", |
198 | [ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x", | 213 | [ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x", |
diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c index a2636001e616..729652854f90 100644 --- a/arch/arm/kvm/hyp/tlb.c +++ b/arch/arm/kvm/hyp/tlb.c | |||
@@ -34,7 +34,7 @@ | |||
34 | * As v7 does not support flushing per IPA, just nuke the whole TLB | 34 | * As v7 does not support flushing per IPA, just nuke the whole TLB |
35 | * instead, ignoring the ipa value. | 35 | * instead, ignoring the ipa value. |
36 | */ | 36 | */ |
37 | static void __hyp_text __tlb_flush_vmid(struct kvm *kvm) | 37 | void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) |
38 | { | 38 | { |
39 | dsb(ishst); | 39 | dsb(ishst); |
40 | 40 | ||
@@ -50,21 +50,14 @@ static void __hyp_text __tlb_flush_vmid(struct kvm *kvm) | |||
50 | write_sysreg(0, VTTBR); | 50 | write_sysreg(0, VTTBR); |
51 | } | 51 | } |
52 | 52 | ||
53 | __alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm); | 53 | void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
54 | |||
55 | static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | ||
56 | { | 54 | { |
57 | __tlb_flush_vmid(kvm); | 55 | __kvm_tlb_flush_vmid(kvm); |
58 | } | 56 | } |
59 | 57 | ||
60 | __alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, | 58 | void __hyp_text __kvm_flush_vm_context(void) |
61 | phys_addr_t ipa); | ||
62 | |||
63 | static void __hyp_text __tlb_flush_vm_context(void) | ||
64 | { | 59 | { |
65 | write_sysreg(0, TLBIALLNSNHIS); | 60 | write_sysreg(0, TLBIALLNSNHIS); |
66 | write_sysreg(0, ICIALLUIS); | 61 | write_sysreg(0, ICIALLUIS); |
67 | dsb(ish); | 62 | dsb(ish); |
68 | } | 63 | } |
69 | |||
70 | __alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void); | ||
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 10f80a6c797a..b6e715fd3c90 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c | |||
@@ -126,12 +126,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len) | |||
126 | int access_size; | 126 | int access_size; |
127 | bool sign_extend; | 127 | bool sign_extend; |
128 | 128 | ||
129 | if (kvm_vcpu_dabt_isextabt(vcpu)) { | ||
130 | /* cache operation on I/O addr, tell guest unsupported */ | ||
131 | kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); | ||
132 | return 1; | ||
133 | } | ||
134 | |||
135 | if (kvm_vcpu_dabt_iss1tw(vcpu)) { | 129 | if (kvm_vcpu_dabt_iss1tw(vcpu)) { |
136 | /* page table accesses IO mem: tell guest to fix its TTBR */ | 130 | /* page table accesses IO mem: tell guest to fix its TTBR */ |
137 | kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); | 131 | kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 29d0b23af2a9..60e0c1ac86e8 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -744,7 +744,6 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) | |||
744 | if (!pgd) | 744 | if (!pgd) |
745 | return -ENOMEM; | 745 | return -ENOMEM; |
746 | 746 | ||
747 | kvm_clean_pgd(pgd); | ||
748 | kvm->arch.pgd = pgd; | 747 | kvm->arch.pgd = pgd; |
749 | return 0; | 748 | return 0; |
750 | } | 749 | } |
@@ -936,7 +935,6 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | |||
936 | if (!cache) | 935 | if (!cache) |
937 | return 0; /* ignore calls from kvm_set_spte_hva */ | 936 | return 0; /* ignore calls from kvm_set_spte_hva */ |
938 | pte = mmu_memory_cache_alloc(cache); | 937 | pte = mmu_memory_cache_alloc(cache); |
939 | kvm_clean_pte(pte); | ||
940 | pmd_populate_kernel(NULL, pmd, pte); | 938 | pmd_populate_kernel(NULL, pmd, pte); |
941 | get_page(virt_to_page(pmd)); | 939 | get_page(virt_to_page(pmd)); |
942 | } | 940 | } |
@@ -1434,6 +1432,11 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
1434 | int ret, idx; | 1432 | int ret, idx; |
1435 | 1433 | ||
1436 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); | 1434 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); |
1435 | if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) { | ||
1436 | kvm_inject_vabt(vcpu); | ||
1437 | return 1; | ||
1438 | } | ||
1439 | |||
1437 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); | 1440 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); |
1438 | 1441 | ||
1439 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), | 1442 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), |
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 8ec88e5b290f..ae7dbd79e257 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h | |||
@@ -79,6 +79,19 @@ | |||
79 | #include <linux/stringify.h> | 79 | #include <linux/stringify.h> |
80 | #include <asm/barrier.h> | 80 | #include <asm/barrier.h> |
81 | 81 | ||
82 | #define read_gicreg(r) \ | ||
83 | ({ \ | ||
84 | u64 reg; \ | ||
85 | asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \ | ||
86 | reg; \ | ||
87 | }) | ||
88 | |||
89 | #define write_gicreg(v,r) \ | ||
90 | do { \ | ||
91 | u64 __val = (v); \ | ||
92 | asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\ | ||
93 | } while (0) | ||
94 | |||
82 | /* | 95 | /* |
83 | * Low-level accessors | 96 | * Low-level accessors |
84 | * | 97 | * |
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 4b5c977af465..2a2752b5b6aa 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h | |||
@@ -50,7 +50,7 @@ | |||
50 | #define HCR_BSU (3 << 10) | 50 | #define HCR_BSU (3 << 10) |
51 | #define HCR_BSU_IS (UL(1) << 10) | 51 | #define HCR_BSU_IS (UL(1) << 10) |
52 | #define HCR_FB (UL(1) << 9) | 52 | #define HCR_FB (UL(1) << 9) |
53 | #define HCR_VA (UL(1) << 8) | 53 | #define HCR_VSE (UL(1) << 8) |
54 | #define HCR_VI (UL(1) << 7) | 54 | #define HCR_VI (UL(1) << 7) |
55 | #define HCR_VF (UL(1) << 6) | 55 | #define HCR_VF (UL(1) << 6) |
56 | #define HCR_AMO (UL(1) << 5) | 56 | #define HCR_AMO (UL(1) << 5) |
@@ -80,7 +80,7 @@ | |||
80 | #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ | 80 | #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ |
81 | HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ | 81 | HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ |
82 | HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW) | 82 | HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW) |
83 | #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) | 83 | #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) |
84 | #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO) | 84 | #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO) |
85 | #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) | 85 | #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) |
86 | 86 | ||
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 7561f63f1c28..18f746551bf6 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h | |||
@@ -20,10 +20,15 @@ | |||
20 | 20 | ||
21 | #include <asm/virt.h> | 21 | #include <asm/virt.h> |
22 | 22 | ||
23 | #define ARM_EXIT_WITH_SERROR_BIT 31 | ||
24 | #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT)) | ||
25 | #define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT)) | ||
26 | |||
23 | #define ARM_EXCEPTION_IRQ 0 | 27 | #define ARM_EXCEPTION_IRQ 0 |
24 | #define ARM_EXCEPTION_TRAP 1 | 28 | #define ARM_EXCEPTION_EL1_SERROR 1 |
29 | #define ARM_EXCEPTION_TRAP 2 | ||
25 | /* The hyp-stub will return this for any kvm_call_hyp() call */ | 30 | /* The hyp-stub will return this for any kvm_call_hyp() call */ |
26 | #define ARM_EXCEPTION_HYP_GONE 2 | 31 | #define ARM_EXCEPTION_HYP_GONE 3 |
27 | 32 | ||
28 | #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0 | 33 | #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0 |
29 | #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT) | 34 | #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT) |
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 4cdeae3b17c6..fd9d5fd788f5 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -38,6 +38,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); | |||
38 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); | 38 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); |
39 | 39 | ||
40 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); | 40 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); |
41 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); | ||
41 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | 42 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
42 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | 43 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); |
43 | 44 | ||
@@ -147,6 +148,16 @@ static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) | |||
147 | return vcpu->arch.fault.esr_el2; | 148 | return vcpu->arch.fault.esr_el2; |
148 | } | 149 | } |
149 | 150 | ||
151 | static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) | ||
152 | { | ||
153 | u32 esr = kvm_vcpu_get_hsr(vcpu); | ||
154 | |||
155 | if (esr & ESR_ELx_CV) | ||
156 | return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; | ||
157 | |||
158 | return -1; | ||
159 | } | ||
160 | |||
150 | static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) | 161 | static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) |
151 | { | 162 | { |
152 | return vcpu->arch.fault.far_el2; | 163 | return vcpu->arch.fault.far_el2; |
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index cff510574fae..b18e852d27e8 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h | |||
@@ -123,6 +123,7 @@ typeof(orig) * __hyp_text fname(void) \ | |||
123 | 123 | ||
124 | void __vgic_v2_save_state(struct kvm_vcpu *vcpu); | 124 | void __vgic_v2_save_state(struct kvm_vcpu *vcpu); |
125 | void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); | 125 | void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); |
126 | int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); | ||
126 | 127 | ||
127 | void __vgic_v3_save_state(struct kvm_vcpu *vcpu); | 128 | void __vgic_v3_save_state(struct kvm_vcpu *vcpu); |
128 | void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); | 129 | void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); |
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index b6bb83400cd8..8f99ab625fc4 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -166,12 +166,6 @@ void kvm_clear_hyp_idmap(void); | |||
166 | #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) | 166 | #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) |
167 | #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) | 167 | #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) |
168 | 168 | ||
169 | static inline void kvm_clean_pgd(pgd_t *pgd) {} | ||
170 | static inline void kvm_clean_pmd(pmd_t *pmd) {} | ||
171 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} | ||
172 | static inline void kvm_clean_pte(pte_t *pte) {} | ||
173 | static inline void kvm_clean_pte_entry(pte_t *pte) {} | ||
174 | |||
175 | static inline pte_t kvm_s2pte_mkwrite(pte_t pte) | 169 | static inline pte_t kvm_s2pte_mkwrite(pte_t pte) |
176 | { | 170 | { |
177 | pte_val(pte) |= PTE_S2_RDWR; | 171 | pte_val(pte) |= PTE_S2_RDWR; |
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 9c9edc98d271..6eaf12c1d627 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig | |||
@@ -16,7 +16,7 @@ menuconfig VIRTUALIZATION | |||
16 | 16 | ||
17 | if VIRTUALIZATION | 17 | if VIRTUALIZATION |
18 | 18 | ||
19 | config KVM_ARM_VGIC_V3 | 19 | config KVM_ARM_VGIC_V3_ITS |
20 | bool | 20 | bool |
21 | 21 | ||
22 | config KVM | 22 | config KVM |
@@ -34,7 +34,7 @@ config KVM | |||
34 | select KVM_VFIO | 34 | select KVM_VFIO |
35 | select HAVE_KVM_EVENTFD | 35 | select HAVE_KVM_EVENTFD |
36 | select HAVE_KVM_IRQFD | 36 | select HAVE_KVM_IRQFD |
37 | select KVM_ARM_VGIC_V3 | 37 | select KVM_ARM_VGIC_V3_ITS |
38 | select KVM_ARM_PMU if HW_PERF_EVENTS | 38 | select KVM_ARM_PMU if HW_PERF_EVENTS |
39 | select HAVE_KVM_MSI | 39 | select HAVE_KVM_MSI |
40 | select HAVE_KVM_IRQCHIP | 40 | select HAVE_KVM_IRQCHIP |
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 695eb3c7ef41..d50a82a16ff6 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile | |||
@@ -16,9 +16,10 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/e | |||
16 | kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o | 16 | kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o |
17 | kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o | 17 | kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o |
18 | 18 | ||
19 | kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o | 19 | kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o |
20 | kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o | 20 | kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o |
21 | kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o | 21 | kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o |
22 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o | ||
22 | 23 | ||
23 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o | 24 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o |
24 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o | 25 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o |
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index fa96fe2bd469..a204adf29f0a 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c | |||
@@ -170,9 +170,32 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
170 | { | 170 | { |
171 | exit_handle_fn exit_handler; | 171 | exit_handle_fn exit_handler; |
172 | 172 | ||
173 | if (ARM_SERROR_PENDING(exception_index)) { | ||
174 | u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); | ||
175 | |||
176 | /* | ||
177 | * HVC/SMC already have an adjusted PC, which we need | ||
178 | * to correct in order to return to after having | ||
179 | * injected the SError. | ||
180 | */ | ||
181 | if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 || | ||
182 | hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) { | ||
183 | u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2; | ||
184 | *vcpu_pc(vcpu) -= adj; | ||
185 | } | ||
186 | |||
187 | kvm_inject_vabt(vcpu); | ||
188 | return 1; | ||
189 | } | ||
190 | |||
191 | exception_index = ARM_EXCEPTION_CODE(exception_index); | ||
192 | |||
173 | switch (exception_index) { | 193 | switch (exception_index) { |
174 | case ARM_EXCEPTION_IRQ: | 194 | case ARM_EXCEPTION_IRQ: |
175 | return 1; | 195 | return 1; |
196 | case ARM_EXCEPTION_EL1_SERROR: | ||
197 | kvm_inject_vabt(vcpu); | ||
198 | return 1; | ||
176 | case ARM_EXCEPTION_TRAP: | 199 | case ARM_EXCEPTION_TRAP: |
177 | /* | 200 | /* |
178 | * See ARM ARM B1.14.1: "Hyp traps on instructions | 201 | * See ARM ARM B1.14.1: "Hyp traps on instructions |
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 0c85febcc1eb..aaf42ae8d8c3 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile | |||
@@ -5,9 +5,9 @@ | |||
5 | KVM=../../../../virt/kvm | 5 | KVM=../../../../virt/kvm |
6 | 6 | ||
7 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o | 7 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o |
8 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o | ||
8 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o | 9 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o |
9 | 10 | ||
10 | obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o | ||
11 | obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o | 11 | obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o |
12 | obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o | 12 | obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o |
13 | obj-$(CONFIG_KVM_ARM_HOST) += entry.o | 13 | obj-$(CONFIG_KVM_ARM_HOST) += entry.o |
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index 33342a776ec7..4ba5c9095d03 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c | |||
@@ -131,9 +131,7 @@ void __hyp_text __debug_cond_restore_host_state(struct kvm_vcpu *vcpu) | |||
131 | vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY; | 131 | vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY; |
132 | } | 132 | } |
133 | 133 | ||
134 | static u32 __hyp_text __debug_read_mdcr_el2(void) | 134 | u32 __hyp_text __kvm_get_mdcr_el2(void) |
135 | { | 135 | { |
136 | return read_sysreg(mdcr_el2); | 136 | return read_sysreg(mdcr_el2); |
137 | } | 137 | } |
138 | |||
139 | __alias(__debug_read_mdcr_el2) u32 __kvm_get_mdcr_el2(void); | ||
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index ce9e5e5f28cf..12ee62d6d410 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S | |||
@@ -55,79 +55,111 @@ | |||
55 | */ | 55 | */ |
56 | ENTRY(__guest_enter) | 56 | ENTRY(__guest_enter) |
57 | // x0: vcpu | 57 | // x0: vcpu |
58 | // x1: host/guest context | 58 | // x1: host context |
59 | // x2-x18: clobbered by macros | 59 | // x2-x17: clobbered by macros |
60 | // x18: guest context | ||
60 | 61 | ||
61 | // Store the host regs | 62 | // Store the host regs |
62 | save_callee_saved_regs x1 | 63 | save_callee_saved_regs x1 |
63 | 64 | ||
64 | // Preserve vcpu & host_ctxt for use at exit time | 65 | // Store the host_ctxt for use at exit time |
65 | stp x0, x1, [sp, #-16]! | 66 | str x1, [sp, #-16]! |
66 | 67 | ||
67 | add x1, x0, #VCPU_CONTEXT | 68 | add x18, x0, #VCPU_CONTEXT |
68 | 69 | ||
69 | // Prepare x0-x1 for later restore by pushing them onto the stack | 70 | // Restore guest regs x0-x17 |
70 | ldp x2, x3, [x1, #CPU_XREG_OFFSET(0)] | 71 | ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)] |
71 | stp x2, x3, [sp, #-16]! | 72 | ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)] |
73 | ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)] | ||
74 | ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)] | ||
75 | ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)] | ||
76 | ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)] | ||
77 | ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)] | ||
78 | ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)] | ||
79 | ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)] | ||
72 | 80 | ||
73 | // x2-x18 | 81 | // Restore guest regs x19-x29, lr |
74 | ldp x2, x3, [x1, #CPU_XREG_OFFSET(2)] | 82 | restore_callee_saved_regs x18 |
75 | ldp x4, x5, [x1, #CPU_XREG_OFFSET(4)] | 83 | |
76 | ldp x6, x7, [x1, #CPU_XREG_OFFSET(6)] | 84 | // Restore guest reg x18 |
77 | ldp x8, x9, [x1, #CPU_XREG_OFFSET(8)] | 85 | ldr x18, [x18, #CPU_XREG_OFFSET(18)] |
78 | ldp x10, x11, [x1, #CPU_XREG_OFFSET(10)] | ||
79 | ldp x12, x13, [x1, #CPU_XREG_OFFSET(12)] | ||
80 | ldp x14, x15, [x1, #CPU_XREG_OFFSET(14)] | ||
81 | ldp x16, x17, [x1, #CPU_XREG_OFFSET(16)] | ||
82 | ldr x18, [x1, #CPU_XREG_OFFSET(18)] | ||
83 | |||
84 | // x19-x29, lr | ||
85 | restore_callee_saved_regs x1 | ||
86 | |||
87 | // Last bits of the 64bit state | ||
88 | ldp x0, x1, [sp], #16 | ||
89 | 86 | ||
90 | // Do not touch any register after this! | 87 | // Do not touch any register after this! |
91 | eret | 88 | eret |
92 | ENDPROC(__guest_enter) | 89 | ENDPROC(__guest_enter) |
93 | 90 | ||
94 | ENTRY(__guest_exit) | 91 | ENTRY(__guest_exit) |
95 | // x0: vcpu | 92 | // x0: return code |
96 | // x1: return code | 93 | // x1: vcpu |
97 | // x2-x3: free | 94 | // x2-x29,lr: vcpu regs |
98 | // x4-x29,lr: vcpu regs | 95 | // vcpu x0-x1 on the stack |
99 | // vcpu x0-x3 on the stack | ||
100 | 96 | ||
101 | add x2, x0, #VCPU_CONTEXT | 97 | add x1, x1, #VCPU_CONTEXT |
102 | 98 | ||
103 | stp x4, x5, [x2, #CPU_XREG_OFFSET(4)] | 99 | ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN) |
104 | stp x6, x7, [x2, #CPU_XREG_OFFSET(6)] | ||
105 | stp x8, x9, [x2, #CPU_XREG_OFFSET(8)] | ||
106 | stp x10, x11, [x2, #CPU_XREG_OFFSET(10)] | ||
107 | stp x12, x13, [x2, #CPU_XREG_OFFSET(12)] | ||
108 | stp x14, x15, [x2, #CPU_XREG_OFFSET(14)] | ||
109 | stp x16, x17, [x2, #CPU_XREG_OFFSET(16)] | ||
110 | str x18, [x2, #CPU_XREG_OFFSET(18)] | ||
111 | 100 | ||
112 | ldp x6, x7, [sp], #16 // x2, x3 | 101 | // Store the guest regs x2 and x3 |
113 | ldp x4, x5, [sp], #16 // x0, x1 | 102 | stp x2, x3, [x1, #CPU_XREG_OFFSET(2)] |
114 | 103 | ||
115 | stp x4, x5, [x2, #CPU_XREG_OFFSET(0)] | 104 | // Retrieve the guest regs x0-x1 from the stack |
116 | stp x6, x7, [x2, #CPU_XREG_OFFSET(2)] | 105 | ldp x2, x3, [sp], #16 // x0, x1 |
106 | |||
107 | // Store the guest regs x0-x1 and x4-x18 | ||
108 | stp x2, x3, [x1, #CPU_XREG_OFFSET(0)] | ||
109 | stp x4, x5, [x1, #CPU_XREG_OFFSET(4)] | ||
110 | stp x6, x7, [x1, #CPU_XREG_OFFSET(6)] | ||
111 | stp x8, x9, [x1, #CPU_XREG_OFFSET(8)] | ||
112 | stp x10, x11, [x1, #CPU_XREG_OFFSET(10)] | ||
113 | stp x12, x13, [x1, #CPU_XREG_OFFSET(12)] | ||
114 | stp x14, x15, [x1, #CPU_XREG_OFFSET(14)] | ||
115 | stp x16, x17, [x1, #CPU_XREG_OFFSET(16)] | ||
116 | str x18, [x1, #CPU_XREG_OFFSET(18)] | ||
117 | |||
118 | // Store the guest regs x19-x29, lr | ||
119 | save_callee_saved_regs x1 | ||
117 | 120 | ||
118 | save_callee_saved_regs x2 | 121 | // Restore the host_ctxt from the stack |
122 | ldr x2, [sp], #16 | ||
119 | 123 | ||
120 | // Restore vcpu & host_ctxt from the stack | ||
121 | // (preserving return code in x1) | ||
122 | ldp x0, x2, [sp], #16 | ||
123 | // Now restore the host regs | 124 | // Now restore the host regs |
124 | restore_callee_saved_regs x2 | 125 | restore_callee_saved_regs x2 |
125 | 126 | ||
126 | mov x0, x1 | 127 | // If we have a pending asynchronous abort, now is the |
127 | ret | 128 | // time to find out. From your VAXorcist book, page 666: |
129 | // "Threaten me not, oh Evil one! For I speak with | ||
130 | // the power of DEC, and I command thee to show thyself!" | ||
131 | mrs x2, elr_el2 | ||
132 | mrs x3, esr_el2 | ||
133 | mrs x4, spsr_el2 | ||
134 | mov x5, x0 | ||
135 | |||
136 | dsb sy // Synchronize against in-flight ld/st | ||
137 | msr daifclr, #4 // Unmask aborts | ||
138 | |||
139 | // This is our single instruction exception window. A pending | ||
140 | // SError is guaranteed to occur at the earliest when we unmask | ||
141 | // it, and at the latest just after the ISB. | ||
142 | .global abort_guest_exit_start | ||
143 | abort_guest_exit_start: | ||
144 | |||
145 | isb | ||
146 | |||
147 | .global abort_guest_exit_end | ||
148 | abort_guest_exit_end: | ||
149 | |||
150 | // If the exception took place, restore the EL1 exception | ||
151 | // context so that we can report some information. | ||
152 | // Merge the exception code with the SError pending bit. | ||
153 | tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f | ||
154 | msr elr_el2, x2 | ||
155 | msr esr_el2, x3 | ||
156 | msr spsr_el2, x4 | ||
157 | orr x0, x0, x5 | ||
158 | 1: ret | ||
128 | ENDPROC(__guest_exit) | 159 | ENDPROC(__guest_exit) |
129 | 160 | ||
130 | ENTRY(__fpsimd_guest_restore) | 161 | ENTRY(__fpsimd_guest_restore) |
162 | stp x2, x3, [sp, #-16]! | ||
131 | stp x4, lr, [sp, #-16]! | 163 | stp x4, lr, [sp, #-16]! |
132 | 164 | ||
133 | alternative_if_not ARM64_HAS_VIRT_HOST_EXTN | 165 | alternative_if_not ARM64_HAS_VIRT_HOST_EXTN |
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index f6d9694ae3b1..4e92399f7105 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S | |||
@@ -27,16 +27,6 @@ | |||
27 | .text | 27 | .text |
28 | .pushsection .hyp.text, "ax" | 28 | .pushsection .hyp.text, "ax" |
29 | 29 | ||
30 | .macro save_x0_to_x3 | ||
31 | stp x0, x1, [sp, #-16]! | ||
32 | stp x2, x3, [sp, #-16]! | ||
33 | .endm | ||
34 | |||
35 | .macro restore_x0_to_x3 | ||
36 | ldp x2, x3, [sp], #16 | ||
37 | ldp x0, x1, [sp], #16 | ||
38 | .endm | ||
39 | |||
40 | .macro do_el2_call | 30 | .macro do_el2_call |
41 | /* | 31 | /* |
42 | * Shuffle the parameters before calling the function | 32 | * Shuffle the parameters before calling the function |
@@ -79,23 +69,23 @@ ENTRY(__kvm_hyp_teardown) | |||
79 | ENDPROC(__kvm_hyp_teardown) | 69 | ENDPROC(__kvm_hyp_teardown) |
80 | 70 | ||
81 | el1_sync: // Guest trapped into EL2 | 71 | el1_sync: // Guest trapped into EL2 |
82 | save_x0_to_x3 | 72 | stp x0, x1, [sp, #-16]! |
83 | 73 | ||
84 | alternative_if_not ARM64_HAS_VIRT_HOST_EXTN | 74 | alternative_if_not ARM64_HAS_VIRT_HOST_EXTN |
85 | mrs x1, esr_el2 | 75 | mrs x1, esr_el2 |
86 | alternative_else | 76 | alternative_else |
87 | mrs x1, esr_el1 | 77 | mrs x1, esr_el1 |
88 | alternative_endif | 78 | alternative_endif |
89 | lsr x2, x1, #ESR_ELx_EC_SHIFT | 79 | lsr x0, x1, #ESR_ELx_EC_SHIFT |
90 | 80 | ||
91 | cmp x2, #ESR_ELx_EC_HVC64 | 81 | cmp x0, #ESR_ELx_EC_HVC64 |
92 | b.ne el1_trap | 82 | b.ne el1_trap |
93 | 83 | ||
94 | mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest | 84 | mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest |
95 | cbnz x3, el1_trap // called HVC | 85 | cbnz x1, el1_trap // called HVC |
96 | 86 | ||
97 | /* Here, we're pretty sure the host called HVC. */ | 87 | /* Here, we're pretty sure the host called HVC. */ |
98 | restore_x0_to_x3 | 88 | ldp x0, x1, [sp], #16 |
99 | 89 | ||
100 | cmp x0, #HVC_GET_VECTORS | 90 | cmp x0, #HVC_GET_VECTORS |
101 | b.ne 1f | 91 | b.ne 1f |
@@ -113,24 +103,51 @@ alternative_endif | |||
113 | 103 | ||
114 | el1_trap: | 104 | el1_trap: |
115 | /* | 105 | /* |
116 | * x1: ESR | 106 | * x0: ESR_EC |
117 | * x2: ESR_EC | ||
118 | */ | 107 | */ |
119 | 108 | ||
120 | /* Guest accessed VFP/SIMD registers, save host, restore Guest */ | 109 | /* Guest accessed VFP/SIMD registers, save host, restore Guest */ |
121 | cmp x2, #ESR_ELx_EC_FP_ASIMD | 110 | cmp x0, #ESR_ELx_EC_FP_ASIMD |
122 | b.eq __fpsimd_guest_restore | 111 | b.eq __fpsimd_guest_restore |
123 | 112 | ||
124 | mrs x0, tpidr_el2 | 113 | mrs x1, tpidr_el2 |
125 | mov x1, #ARM_EXCEPTION_TRAP | 114 | mov x0, #ARM_EXCEPTION_TRAP |
126 | b __guest_exit | 115 | b __guest_exit |
127 | 116 | ||
128 | el1_irq: | 117 | el1_irq: |
129 | save_x0_to_x3 | 118 | stp x0, x1, [sp, #-16]! |
130 | mrs x0, tpidr_el2 | 119 | mrs x1, tpidr_el2 |
131 | mov x1, #ARM_EXCEPTION_IRQ | 120 | mov x0, #ARM_EXCEPTION_IRQ |
121 | b __guest_exit | ||
122 | |||
123 | el1_error: | ||
124 | stp x0, x1, [sp, #-16]! | ||
125 | mrs x1, tpidr_el2 | ||
126 | mov x0, #ARM_EXCEPTION_EL1_SERROR | ||
132 | b __guest_exit | 127 | b __guest_exit |
133 | 128 | ||
129 | el2_error: | ||
130 | /* | ||
131 | * Only two possibilities: | ||
132 | * 1) Either we come from the exit path, having just unmasked | ||
133 | * PSTATE.A: change the return code to an EL2 fault, and | ||
134 | * carry on, as we're already in a sane state to handle it. | ||
135 | * 2) Or we come from anywhere else, and that's a bug: we panic. | ||
136 | * | ||
137 | * For (1), x0 contains the original return code and x1 doesn't | ||
138 | * contain anything meaningful at that stage. We can reuse them | ||
139 | * as temp registers. | ||
140 | * For (2), who cares? | ||
141 | */ | ||
142 | mrs x0, elr_el2 | ||
143 | adr x1, abort_guest_exit_start | ||
144 | cmp x0, x1 | ||
145 | adr x1, abort_guest_exit_end | ||
146 | ccmp x0, x1, #4, ne | ||
147 | b.ne __hyp_panic | ||
148 | mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) | ||
149 | eret | ||
150 | |||
134 | ENTRY(__hyp_do_panic) | 151 | ENTRY(__hyp_do_panic) |
135 | mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ | 152 | mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ |
136 | PSR_MODE_EL1h) | 153 | PSR_MODE_EL1h) |
@@ -155,11 +172,9 @@ ENDPROC(\label) | |||
155 | invalid_vector el2h_sync_invalid | 172 | invalid_vector el2h_sync_invalid |
156 | invalid_vector el2h_irq_invalid | 173 | invalid_vector el2h_irq_invalid |
157 | invalid_vector el2h_fiq_invalid | 174 | invalid_vector el2h_fiq_invalid |
158 | invalid_vector el2h_error_invalid | ||
159 | invalid_vector el1_sync_invalid | 175 | invalid_vector el1_sync_invalid |
160 | invalid_vector el1_irq_invalid | 176 | invalid_vector el1_irq_invalid |
161 | invalid_vector el1_fiq_invalid | 177 | invalid_vector el1_fiq_invalid |
162 | invalid_vector el1_error_invalid | ||
163 | 178 | ||
164 | .ltorg | 179 | .ltorg |
165 | 180 | ||
@@ -174,15 +189,15 @@ ENTRY(__kvm_hyp_vector) | |||
174 | ventry el2h_sync_invalid // Synchronous EL2h | 189 | ventry el2h_sync_invalid // Synchronous EL2h |
175 | ventry el2h_irq_invalid // IRQ EL2h | 190 | ventry el2h_irq_invalid // IRQ EL2h |
176 | ventry el2h_fiq_invalid // FIQ EL2h | 191 | ventry el2h_fiq_invalid // FIQ EL2h |
177 | ventry el2h_error_invalid // Error EL2h | 192 | ventry el2_error // Error EL2h |
178 | 193 | ||
179 | ventry el1_sync // Synchronous 64-bit EL1 | 194 | ventry el1_sync // Synchronous 64-bit EL1 |
180 | ventry el1_irq // IRQ 64-bit EL1 | 195 | ventry el1_irq // IRQ 64-bit EL1 |
181 | ventry el1_fiq_invalid // FIQ 64-bit EL1 | 196 | ventry el1_fiq_invalid // FIQ 64-bit EL1 |
182 | ventry el1_error_invalid // Error 64-bit EL1 | 197 | ventry el1_error // Error 64-bit EL1 |
183 | 198 | ||
184 | ventry el1_sync // Synchronous 32-bit EL1 | 199 | ventry el1_sync // Synchronous 32-bit EL1 |
185 | ventry el1_irq // IRQ 32-bit EL1 | 200 | ventry el1_irq // IRQ 32-bit EL1 |
186 | ventry el1_fiq_invalid // FIQ 32-bit EL1 | 201 | ventry el1_fiq_invalid // FIQ 32-bit EL1 |
187 | ventry el1_error_invalid // Error 32-bit EL1 | 202 | ventry el1_error // Error 32-bit EL1 |
188 | ENDPROC(__kvm_hyp_vector) | 203 | ENDPROC(__kvm_hyp_vector) |
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 5a84b4562603..83037cd62d01 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c | |||
@@ -16,7 +16,10 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/jump_label.h> | ||
20 | |||
19 | #include <asm/kvm_asm.h> | 21 | #include <asm/kvm_asm.h> |
22 | #include <asm/kvm_emulate.h> | ||
20 | #include <asm/kvm_hyp.h> | 23 | #include <asm/kvm_hyp.h> |
21 | 24 | ||
22 | static bool __hyp_text __fpsimd_enabled_nvhe(void) | 25 | static bool __hyp_text __fpsimd_enabled_nvhe(void) |
@@ -109,6 +112,15 @@ static hyp_alternate_select(__deactivate_traps_arch, | |||
109 | 112 | ||
110 | static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) | 113 | static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) |
111 | { | 114 | { |
115 | /* | ||
116 | * If we pended a virtual abort, preserve it until it gets | ||
117 | * cleared. See D1.14.3 (Virtual Interrupts) for details, but | ||
118 | * the crucial bit is "On taking a vSError interrupt, | ||
119 | * HCR_EL2.VSE is cleared to 0." | ||
120 | */ | ||
121 | if (vcpu->arch.hcr_el2 & HCR_VSE) | ||
122 | vcpu->arch.hcr_el2 = read_sysreg(hcr_el2); | ||
123 | |||
112 | __deactivate_traps_arch()(); | 124 | __deactivate_traps_arch()(); |
113 | write_sysreg(0, hstr_el2); | 125 | write_sysreg(0, hstr_el2); |
114 | write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); | 126 | write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); |
@@ -126,17 +138,13 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) | |||
126 | write_sysreg(0, vttbr_el2); | 138 | write_sysreg(0, vttbr_el2); |
127 | } | 139 | } |
128 | 140 | ||
129 | static hyp_alternate_select(__vgic_call_save_state, | ||
130 | __vgic_v2_save_state, __vgic_v3_save_state, | ||
131 | ARM64_HAS_SYSREG_GIC_CPUIF); | ||
132 | |||
133 | static hyp_alternate_select(__vgic_call_restore_state, | ||
134 | __vgic_v2_restore_state, __vgic_v3_restore_state, | ||
135 | ARM64_HAS_SYSREG_GIC_CPUIF); | ||
136 | |||
137 | static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) | 141 | static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) |
138 | { | 142 | { |
139 | __vgic_call_save_state()(vcpu); | 143 | if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) |
144 | __vgic_v3_save_state(vcpu); | ||
145 | else | ||
146 | __vgic_v2_save_state(vcpu); | ||
147 | |||
140 | write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2); | 148 | write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2); |
141 | } | 149 | } |
142 | 150 | ||
@@ -149,7 +157,10 @@ static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) | |||
149 | val |= vcpu->arch.irq_lines; | 157 | val |= vcpu->arch.irq_lines; |
150 | write_sysreg(val, hcr_el2); | 158 | write_sysreg(val, hcr_el2); |
151 | 159 | ||
152 | __vgic_call_restore_state()(vcpu); | 160 | if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) |
161 | __vgic_v3_restore_state(vcpu); | ||
162 | else | ||
163 | __vgic_v2_restore_state(vcpu); | ||
153 | } | 164 | } |
154 | 165 | ||
155 | static bool __hyp_text __true_value(void) | 166 | static bool __hyp_text __true_value(void) |
@@ -232,7 +243,22 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) | |||
232 | return true; | 243 | return true; |
233 | } | 244 | } |
234 | 245 | ||
235 | static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) | 246 | static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu) |
247 | { | ||
248 | *vcpu_pc(vcpu) = read_sysreg_el2(elr); | ||
249 | |||
250 | if (vcpu_mode_is_32bit(vcpu)) { | ||
251 | vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr); | ||
252 | kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | ||
253 | write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr); | ||
254 | } else { | ||
255 | *vcpu_pc(vcpu) += 4; | ||
256 | } | ||
257 | |||
258 | write_sysreg_el2(*vcpu_pc(vcpu), elr); | ||
259 | } | ||
260 | |||
261 | int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) | ||
236 | { | 262 | { |
237 | struct kvm_cpu_context *host_ctxt; | 263 | struct kvm_cpu_context *host_ctxt; |
238 | struct kvm_cpu_context *guest_ctxt; | 264 | struct kvm_cpu_context *guest_ctxt; |
@@ -267,9 +293,43 @@ again: | |||
267 | exit_code = __guest_enter(vcpu, host_ctxt); | 293 | exit_code = __guest_enter(vcpu, host_ctxt); |
268 | /* And we're baaack! */ | 294 | /* And we're baaack! */ |
269 | 295 | ||
296 | /* | ||
297 | * We're using the raw exception code in order to only process | ||
298 | * the trap if no SError is pending. We will come back to the | ||
299 | * same PC once the SError has been injected, and replay the | ||
300 | * trapping instruction. | ||
301 | */ | ||
270 | if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) | 302 | if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) |
271 | goto again; | 303 | goto again; |
272 | 304 | ||
305 | if (static_branch_unlikely(&vgic_v2_cpuif_trap) && | ||
306 | exit_code == ARM_EXCEPTION_TRAP) { | ||
307 | bool valid; | ||
308 | |||
309 | valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW && | ||
310 | kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && | ||
311 | kvm_vcpu_dabt_isvalid(vcpu) && | ||
312 | !kvm_vcpu_dabt_isextabt(vcpu) && | ||
313 | !kvm_vcpu_dabt_iss1tw(vcpu); | ||
314 | |||
315 | if (valid) { | ||
316 | int ret = __vgic_v2_perform_cpuif_access(vcpu); | ||
317 | |||
318 | if (ret == 1) { | ||
319 | __skip_instr(vcpu); | ||
320 | goto again; | ||
321 | } | ||
322 | |||
323 | if (ret == -1) { | ||
324 | /* Promote an illegal access to an SError */ | ||
325 | __skip_instr(vcpu); | ||
326 | exit_code = ARM_EXCEPTION_EL1_SERROR; | ||
327 | } | ||
328 | |||
329 | /* 0 falls through to be handler out of EL2 */ | ||
330 | } | ||
331 | } | ||
332 | |||
273 | fp_enabled = __fpsimd_enabled(); | 333 | fp_enabled = __fpsimd_enabled(); |
274 | 334 | ||
275 | __sysreg_save_guest_state(guest_ctxt); | 335 | __sysreg_save_guest_state(guest_ctxt); |
@@ -293,8 +353,6 @@ again: | |||
293 | return exit_code; | 353 | return exit_code; |
294 | } | 354 | } |
295 | 355 | ||
296 | __alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu); | ||
297 | |||
298 | static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; | 356 | static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; |
299 | 357 | ||
300 | static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par) | 358 | static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par) |
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index be8177cdd3bf..9cc0ea784ae6 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | #include <asm/kvm_hyp.h> | 18 | #include <asm/kvm_hyp.h> |
19 | 19 | ||
20 | static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | 20 | void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
21 | { | 21 | { |
22 | dsb(ishst); | 22 | dsb(ishst); |
23 | 23 | ||
@@ -48,10 +48,7 @@ static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | |||
48 | write_sysreg(0, vttbr_el2); | 48 | write_sysreg(0, vttbr_el2); |
49 | } | 49 | } |
50 | 50 | ||
51 | __alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, | 51 | void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) |
52 | phys_addr_t ipa); | ||
53 | |||
54 | static void __hyp_text __tlb_flush_vmid(struct kvm *kvm) | ||
55 | { | 52 | { |
56 | dsb(ishst); | 53 | dsb(ishst); |
57 | 54 | ||
@@ -67,14 +64,10 @@ static void __hyp_text __tlb_flush_vmid(struct kvm *kvm) | |||
67 | write_sysreg(0, vttbr_el2); | 64 | write_sysreg(0, vttbr_el2); |
68 | } | 65 | } |
69 | 66 | ||
70 | __alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm); | 67 | void __hyp_text __kvm_flush_vm_context(void) |
71 | |||
72 | static void __hyp_text __tlb_flush_vm_context(void) | ||
73 | { | 68 | { |
74 | dsb(ishst); | 69 | dsb(ishst); |
75 | asm volatile("tlbi alle1is \n" | 70 | asm volatile("tlbi alle1is \n" |
76 | "ic ialluis ": : ); | 71 | "ic ialluis ": : ); |
77 | dsb(ish); | 72 | dsb(ish); |
78 | } | 73 | } |
79 | |||
80 | __alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void); | ||
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 898c0e6aedd4..da6a8cfa54a0 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c | |||
@@ -231,3 +231,15 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) | |||
231 | else | 231 | else |
232 | inject_undef64(vcpu); | 232 | inject_undef64(vcpu); |
233 | } | 233 | } |
234 | |||
235 | /** | ||
236 | * kvm_inject_vabt - inject an async abort / SError into the guest | ||
237 | * @vcpu: The VCPU to receive the exception | ||
238 | * | ||
239 | * It is assumed that this code is called from the VCPU thread and that the | ||
240 | * VCPU therefore is not currently executing guest code. | ||
241 | */ | ||
242 | void kvm_inject_vabt(struct kvm_vcpu *vcpu) | ||
243 | { | ||
244 | vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE); | ||
245 | } | ||
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 19b698ef3336..002f0922cd92 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -20,9 +20,11 @@ | |||
20 | #include <linux/kvm.h> | 20 | #include <linux/kvm.h> |
21 | #include <linux/irqreturn.h> | 21 | #include <linux/irqreturn.h> |
22 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
23 | #include <linux/static_key.h> | ||
23 | #include <linux/types.h> | 24 | #include <linux/types.h> |
24 | #include <kvm/iodev.h> | 25 | #include <kvm/iodev.h> |
25 | #include <linux/list.h> | 26 | #include <linux/list.h> |
27 | #include <linux/jump_label.h> | ||
26 | 28 | ||
27 | #define VGIC_V3_MAX_CPUS 255 | 29 | #define VGIC_V3_MAX_CPUS 255 |
28 | #define VGIC_V2_MAX_CPUS 8 | 30 | #define VGIC_V2_MAX_CPUS 8 |
@@ -49,6 +51,9 @@ struct vgic_global { | |||
49 | /* Physical address of vgic virtual cpu interface */ | 51 | /* Physical address of vgic virtual cpu interface */ |
50 | phys_addr_t vcpu_base; | 52 | phys_addr_t vcpu_base; |
51 | 53 | ||
54 | /* GICV mapping */ | ||
55 | void __iomem *vcpu_base_va; | ||
56 | |||
52 | /* virtual control interface mapping */ | 57 | /* virtual control interface mapping */ |
53 | void __iomem *vctrl_base; | 58 | void __iomem *vctrl_base; |
54 | 59 | ||
@@ -63,6 +68,9 @@ struct vgic_global { | |||
63 | 68 | ||
64 | /* Only needed for the legacy KVM_CREATE_IRQCHIP */ | 69 | /* Only needed for the legacy KVM_CREATE_IRQCHIP */ |
65 | bool can_emulate_gicv2; | 70 | bool can_emulate_gicv2; |
71 | |||
72 | /* GIC system register CPU interface */ | ||
73 | struct static_key_false gicv3_cpuif; | ||
66 | }; | 74 | }; |
67 | 75 | ||
68 | extern struct vgic_global kvm_vgic_global_state; | 76 | extern struct vgic_global kvm_vgic_global_state; |
@@ -217,7 +225,6 @@ struct vgic_v2_cpu_if { | |||
217 | }; | 225 | }; |
218 | 226 | ||
219 | struct vgic_v3_cpu_if { | 227 | struct vgic_v3_cpu_if { |
220 | #ifdef CONFIG_KVM_ARM_VGIC_V3 | ||
221 | u32 vgic_hcr; | 228 | u32 vgic_hcr; |
222 | u32 vgic_vmcr; | 229 | u32 vgic_vmcr; |
223 | u32 vgic_sre; /* Restored only, change ignored */ | 230 | u32 vgic_sre; /* Restored only, change ignored */ |
@@ -227,7 +234,6 @@ struct vgic_v3_cpu_if { | |||
227 | u32 vgic_ap0r[4]; | 234 | u32 vgic_ap0r[4]; |
228 | u32 vgic_ap1r[4]; | 235 | u32 vgic_ap1r[4]; |
229 | u64 vgic_lr[VGIC_V3_MAX_LRS]; | 236 | u64 vgic_lr[VGIC_V3_MAX_LRS]; |
230 | #endif | ||
231 | }; | 237 | }; |
232 | 238 | ||
233 | struct vgic_cpu { | 239 | struct vgic_cpu { |
@@ -265,6 +271,8 @@ struct vgic_cpu { | |||
265 | bool lpis_enabled; | 271 | bool lpis_enabled; |
266 | }; | 272 | }; |
267 | 273 | ||
274 | extern struct static_key_false vgic_v2_cpuif_trap; | ||
275 | |||
268 | int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); | 276 | int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); |
269 | void kvm_vgic_early_init(struct kvm *kvm); | 277 | void kvm_vgic_early_init(struct kvm *kvm); |
270 | int kvm_vgic_create(struct kvm *kvm, u32 type); | 278 | int kvm_vgic_create(struct kvm *kvm, u32 type); |
@@ -294,13 +302,7 @@ bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu); | |||
294 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); | 302 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); |
295 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); | 303 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); |
296 | 304 | ||
297 | #ifdef CONFIG_KVM_ARM_VGIC_V3 | ||
298 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); | 305 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); |
299 | #else | ||
300 | static inline void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg) | ||
301 | { | ||
302 | } | ||
303 | #endif | ||
304 | 306 | ||
305 | /** | 307 | /** |
306 | * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW | 308 | * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW |
diff --git a/arch/arm64/kvm/emulate.c b/virt/kvm/arm/aarch32.c index f87d8fbaa48d..528af4b2d09e 100644 --- a/arch/arm64/kvm/emulate.c +++ b/virt/kvm/arm/aarch32.c | |||
@@ -22,8 +22,13 @@ | |||
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include <asm/esr.h> | ||
26 | #include <asm/kvm_emulate.h> | 25 | #include <asm/kvm_emulate.h> |
26 | #include <asm/kvm_hyp.h> | ||
27 | |||
28 | #ifndef CONFIG_ARM64 | ||
29 | #define COMPAT_PSR_T_BIT PSR_T_BIT | ||
30 | #define COMPAT_PSR_IT_MASK PSR_IT_MASK | ||
31 | #endif | ||
27 | 32 | ||
28 | /* | 33 | /* |
29 | * stolen from arch/arm/kernel/opcodes.c | 34 | * stolen from arch/arm/kernel/opcodes.c |
@@ -52,16 +57,6 @@ static const unsigned short cc_map[16] = { | |||
52 | 0 /* NV */ | 57 | 0 /* NV */ |
53 | }; | 58 | }; |
54 | 59 | ||
55 | static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) | ||
56 | { | ||
57 | u32 esr = kvm_vcpu_get_hsr(vcpu); | ||
58 | |||
59 | if (esr & ESR_ELx_CV) | ||
60 | return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; | ||
61 | |||
62 | return -1; | ||
63 | } | ||
64 | |||
65 | /* | 60 | /* |
66 | * Check if a trapped instruction should have been executed or not. | 61 | * Check if a trapped instruction should have been executed or not. |
67 | */ | 62 | */ |
@@ -114,15 +109,13 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu) | |||
114 | * | 109 | * |
115 | * IT[7:0] -> CPSR[26:25],CPSR[15:10] | 110 | * IT[7:0] -> CPSR[26:25],CPSR[15:10] |
116 | */ | 111 | */ |
117 | static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) | 112 | static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) |
118 | { | 113 | { |
119 | unsigned long itbits, cond; | 114 | unsigned long itbits, cond; |
120 | unsigned long cpsr = *vcpu_cpsr(vcpu); | 115 | unsigned long cpsr = *vcpu_cpsr(vcpu); |
121 | bool is_arm = !(cpsr & COMPAT_PSR_T_BIT); | 116 | bool is_arm = !(cpsr & COMPAT_PSR_T_BIT); |
122 | 117 | ||
123 | BUG_ON(is_arm && (cpsr & COMPAT_PSR_IT_MASK)); | 118 | if (is_arm || !(cpsr & COMPAT_PSR_IT_MASK)) |
124 | |||
125 | if (!(cpsr & COMPAT_PSR_IT_MASK)) | ||
126 | return; | 119 | return; |
127 | 120 | ||
128 | cond = (cpsr & 0xe000) >> 13; | 121 | cond = (cpsr & 0xe000) >> 13; |
@@ -146,7 +139,7 @@ static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) | |||
146 | * kvm_skip_instr - skip a trapped instruction and proceed to the next | 139 | * kvm_skip_instr - skip a trapped instruction and proceed to the next |
147 | * @vcpu: The vcpu pointer | 140 | * @vcpu: The vcpu pointer |
148 | */ | 141 | */ |
149 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) | 142 | void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) |
150 | { | 143 | { |
151 | bool is_thumb; | 144 | bool is_thumb; |
152 | 145 | ||
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 4309b60ebf17..27a1f6341d41 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
@@ -445,7 +445,7 @@ int kvm_timer_hyp_init(void) | |||
445 | if (err) { | 445 | if (err) { |
446 | kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n", | 446 | kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n", |
447 | host_vtimer_irq, err); | 447 | host_vtimer_irq, err); |
448 | goto out; | 448 | return err; |
449 | } | 449 | } |
450 | 450 | ||
451 | kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); | 451 | kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); |
@@ -453,10 +453,6 @@ int kvm_timer_hyp_init(void) | |||
453 | cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, | 453 | cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, |
454 | "AP_KVM_ARM_TIMER_STARTING", kvm_timer_starting_cpu, | 454 | "AP_KVM_ARM_TIMER_STARTING", kvm_timer_starting_cpu, |
455 | kvm_timer_dying_cpu); | 455 | kvm_timer_dying_cpu); |
456 | goto out; | ||
457 | out_free: | ||
458 | free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus()); | ||
459 | out: | ||
460 | return err; | 456 | return err; |
461 | } | 457 | } |
462 | 458 | ||
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c index 7cffd9338c49..c8aeb7b91ec8 100644 --- a/virt/kvm/arm/hyp/vgic-v2-sr.c +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/irqchip/arm-gic.h> | 19 | #include <linux/irqchip/arm-gic.h> |
20 | #include <linux/kvm_host.h> | 20 | #include <linux/kvm_host.h> |
21 | 21 | ||
22 | #include <asm/kvm_emulate.h> | ||
22 | #include <asm/kvm_hyp.h> | 23 | #include <asm/kvm_hyp.h> |
23 | 24 | ||
24 | static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, | 25 | static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, |
@@ -167,3 +168,59 @@ void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu) | |||
167 | writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR); | 168 | writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR); |
168 | vcpu->arch.vgic_cpu.live_lrs = live_lrs; | 169 | vcpu->arch.vgic_cpu.live_lrs = live_lrs; |
169 | } | 170 | } |
171 | |||
172 | #ifdef CONFIG_ARM64 | ||
173 | /* | ||
174 | * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the | ||
175 | * guest. | ||
176 | * | ||
177 | * @vcpu: the offending vcpu | ||
178 | * | ||
179 | * Returns: | ||
180 | * 1: GICV access successfully performed | ||
181 | * 0: Not a GICV access | ||
182 | * -1: Illegal GICV access | ||
183 | */ | ||
184 | int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) | ||
185 | { | ||
186 | struct kvm *kvm = kern_hyp_va(vcpu->kvm); | ||
187 | struct vgic_dist *vgic = &kvm->arch.vgic; | ||
188 | phys_addr_t fault_ipa; | ||
189 | void __iomem *addr; | ||
190 | int rd; | ||
191 | |||
192 | /* Build the full address */ | ||
193 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); | ||
194 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); | ||
195 | |||
196 | /* If not for GICV, move on */ | ||
197 | if (fault_ipa < vgic->vgic_cpu_base || | ||
198 | fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE)) | ||
199 | return 0; | ||
200 | |||
201 | /* Reject anything but a 32bit access */ | ||
202 | if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32)) | ||
203 | return -1; | ||
204 | |||
205 | /* Not aligned? Don't bother */ | ||
206 | if (fault_ipa & 3) | ||
207 | return -1; | ||
208 | |||
209 | rd = kvm_vcpu_dabt_get_rd(vcpu); | ||
210 | addr = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va); | ||
211 | addr += fault_ipa - vgic->vgic_cpu_base; | ||
212 | |||
213 | if (kvm_vcpu_dabt_iswrite(vcpu)) { | ||
214 | u32 data = vcpu_data_guest_to_host(vcpu, | ||
215 | vcpu_get_reg(vcpu, rd), | ||
216 | sizeof(u32)); | ||
217 | writel_relaxed(data, addr); | ||
218 | } else { | ||
219 | u32 data = readl_relaxed(addr); | ||
220 | vcpu_set_reg(vcpu, rd, vcpu_data_host_to_guest(vcpu, data, | ||
221 | sizeof(u32))); | ||
222 | } | ||
223 | |||
224 | return 1; | ||
225 | } | ||
226 | #endif | ||
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c index 5f8f80b4a224..3947095cc0a1 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/virt/kvm/arm/hyp/vgic-v3-sr.c | |||
@@ -24,19 +24,6 @@ | |||
24 | #define vtr_to_max_lr_idx(v) ((v) & 0xf) | 24 | #define vtr_to_max_lr_idx(v) ((v) & 0xf) |
25 | #define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1) | 25 | #define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1) |
26 | 26 | ||
27 | #define read_gicreg(r) \ | ||
28 | ({ \ | ||
29 | u64 reg; \ | ||
30 | asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \ | ||
31 | reg; \ | ||
32 | }) | ||
33 | |||
34 | #define write_gicreg(v,r) \ | ||
35 | do { \ | ||
36 | u64 __val = (v); \ | ||
37 | asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\ | ||
38 | } while (0) | ||
39 | |||
40 | static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) | 27 | static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) |
41 | { | 28 | { |
42 | switch (lr & 0xf) { | 29 | switch (lr & 0xf) { |
@@ -335,9 +322,7 @@ void __hyp_text __vgic_v3_init_lrs(void) | |||
335 | __gic_v3_set_lr(0, i); | 322 | __gic_v3_set_lr(0, i); |
336 | } | 323 | } |
337 | 324 | ||
338 | static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void) | 325 | u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void) |
339 | { | 326 | { |
340 | return read_gicreg(ICH_VTR_EL2); | 327 | return read_gicreg(ICH_VTR_EL2); |
341 | } | 328 | } |
342 | |||
343 | __alias(__vgic_v3_read_ich_vtr_el2) u64 __vgic_v3_get_ich_vtr_el2(void); | ||
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index a027569facfa..6e9c40eea208 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c | |||
@@ -423,6 +423,14 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) | |||
423 | if (!kvm_arm_support_pmu_v3()) | 423 | if (!kvm_arm_support_pmu_v3()) |
424 | return -ENODEV; | 424 | return -ENODEV; |
425 | 425 | ||
426 | /* | ||
427 | * We currently require an in-kernel VGIC to use the PMU emulation, | ||
428 | * because we do not support forwarding PMU overflow interrupts to | ||
429 | * userspace yet. | ||
430 | */ | ||
431 | if (!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm)) | ||
432 | return -ENODEV; | ||
433 | |||
426 | if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features) || | 434 | if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features) || |
427 | !kvm_arm_pmu_irq_initialized(vcpu)) | 435 | !kvm_arm_pmu_irq_initialized(vcpu)) |
428 | return -ENXIO; | 436 | return -ENXIO; |
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 83777c1cbae0..8cebfbc19e90 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c | |||
@@ -405,6 +405,10 @@ int kvm_vgic_hyp_init(void) | |||
405 | break; | 405 | break; |
406 | case GIC_V3: | 406 | case GIC_V3: |
407 | ret = vgic_v3_probe(gic_kvm_info); | 407 | ret = vgic_v3_probe(gic_kvm_info); |
408 | if (!ret) { | ||
409 | static_branch_enable(&kvm_vgic_global_state.gicv3_cpuif); | ||
410 | kvm_info("GIC system register CPU interface enabled\n"); | ||
411 | } | ||
408 | break; | 412 | break; |
409 | default: | 413 | default: |
410 | ret = -ENODEV; | 414 | ret = -ENODEV; |
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c index b31a51a14efb..d918dcf26a5a 100644 --- a/virt/kvm/arm/vgic/vgic-irqfd.c +++ b/virt/kvm/arm/vgic/vgic-irqfd.c | |||
@@ -46,15 +46,9 @@ static int vgic_irqfd_set_irq(struct kvm_kernel_irq_routing_entry *e, | |||
46 | * @ue: user api routing entry handle | 46 | * @ue: user api routing entry handle |
47 | * return 0 on success, -EINVAL on errors. | 47 | * return 0 on success, -EINVAL on errors. |
48 | */ | 48 | */ |
49 | #ifdef KVM_CAP_X2APIC_API | ||
50 | int kvm_set_routing_entry(struct kvm *kvm, | 49 | int kvm_set_routing_entry(struct kvm *kvm, |
51 | struct kvm_kernel_irq_routing_entry *e, | 50 | struct kvm_kernel_irq_routing_entry *e, |
52 | const struct kvm_irq_routing_entry *ue) | 51 | const struct kvm_irq_routing_entry *ue) |
53 | #else | ||
54 | /* Remove this version and the ifdefery once merged into 4.8 */ | ||
55 | int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e, | ||
56 | const struct kvm_irq_routing_entry *ue) | ||
57 | #endif | ||
58 | { | 52 | { |
59 | int r = -EINVAL; | 53 | int r = -EINVAL; |
60 | 54 | ||
diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c index 1813f93b5cde..ce1f4ed9daf4 100644 --- a/virt/kvm/arm/vgic/vgic-kvm-device.c +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c | |||
@@ -71,7 +71,6 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) | |||
71 | addr_ptr = &vgic->vgic_cpu_base; | 71 | addr_ptr = &vgic->vgic_cpu_base; |
72 | alignment = SZ_4K; | 72 | alignment = SZ_4K; |
73 | break; | 73 | break; |
74 | #ifdef CONFIG_KVM_ARM_VGIC_V3 | ||
75 | case KVM_VGIC_V3_ADDR_TYPE_DIST: | 74 | case KVM_VGIC_V3_ADDR_TYPE_DIST: |
76 | type_needed = KVM_DEV_TYPE_ARM_VGIC_V3; | 75 | type_needed = KVM_DEV_TYPE_ARM_VGIC_V3; |
77 | addr_ptr = &vgic->vgic_dist_base; | 76 | addr_ptr = &vgic->vgic_dist_base; |
@@ -82,7 +81,6 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) | |||
82 | addr_ptr = &vgic->vgic_redist_base; | 81 | addr_ptr = &vgic->vgic_redist_base; |
83 | alignment = SZ_64K; | 82 | alignment = SZ_64K; |
84 | break; | 83 | break; |
85 | #endif | ||
86 | default: | 84 | default: |
87 | r = -ENODEV; | 85 | r = -ENODEV; |
88 | goto out; | 86 | goto out; |
@@ -219,52 +217,65 @@ int kvm_register_vgic_device(unsigned long type) | |||
219 | ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops, | 217 | ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops, |
220 | KVM_DEV_TYPE_ARM_VGIC_V2); | 218 | KVM_DEV_TYPE_ARM_VGIC_V2); |
221 | break; | 219 | break; |
222 | #ifdef CONFIG_KVM_ARM_VGIC_V3 | ||
223 | case KVM_DEV_TYPE_ARM_VGIC_V3: | 220 | case KVM_DEV_TYPE_ARM_VGIC_V3: |
224 | ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops, | 221 | ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops, |
225 | KVM_DEV_TYPE_ARM_VGIC_V3); | 222 | KVM_DEV_TYPE_ARM_VGIC_V3); |
223 | |||
224 | #ifdef CONFIG_KVM_ARM_VGIC_V3_ITS | ||
226 | if (ret) | 225 | if (ret) |
227 | break; | 226 | break; |
228 | ret = kvm_vgic_register_its_device(); | 227 | ret = kvm_vgic_register_its_device(); |
229 | break; | ||
230 | #endif | 228 | #endif |
229 | break; | ||
231 | } | 230 | } |
232 | 231 | ||
233 | return ret; | 232 | return ret; |
234 | } | 233 | } |
235 | 234 | ||
236 | /** vgic_attr_regs_access: allows user space to read/write VGIC registers | 235 | struct vgic_reg_attr { |
237 | * | 236 | struct kvm_vcpu *vcpu; |
238 | * @dev: kvm device handle | ||
239 | * @attr: kvm device attribute | ||
240 | * @reg: address the value is read or written | ||
241 | * @is_write: write flag | ||
242 | * | ||
243 | */ | ||
244 | static int vgic_attr_regs_access(struct kvm_device *dev, | ||
245 | struct kvm_device_attr *attr, | ||
246 | u32 *reg, bool is_write) | ||
247 | { | ||
248 | gpa_t addr; | 237 | gpa_t addr; |
249 | int cpuid, ret, c; | 238 | }; |
250 | struct kvm_vcpu *vcpu, *tmp_vcpu; | 239 | |
251 | int vcpu_lock_idx = -1; | 240 | static int parse_vgic_v2_attr(struct kvm_device *dev, |
241 | struct kvm_device_attr *attr, | ||
242 | struct vgic_reg_attr *reg_attr) | ||
243 | { | ||
244 | int cpuid; | ||
252 | 245 | ||
253 | cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> | 246 | cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> |
254 | KVM_DEV_ARM_VGIC_CPUID_SHIFT; | 247 | KVM_DEV_ARM_VGIC_CPUID_SHIFT; |
255 | vcpu = kvm_get_vcpu(dev->kvm, cpuid); | ||
256 | addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | ||
257 | 248 | ||
258 | mutex_lock(&dev->kvm->lock); | 249 | if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) |
250 | return -EINVAL; | ||
259 | 251 | ||
260 | ret = vgic_init(dev->kvm); | 252 | reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid); |
261 | if (ret) | 253 | reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; |
262 | goto out; | ||
263 | 254 | ||
264 | if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) { | 255 | return 0; |
265 | ret = -EINVAL; | 256 | } |
266 | goto out; | 257 | |
258 | /* unlocks vcpus from @vcpu_lock_idx and smaller */ | ||
259 | static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx) | ||
260 | { | ||
261 | struct kvm_vcpu *tmp_vcpu; | ||
262 | |||
263 | for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { | ||
264 | tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); | ||
265 | mutex_unlock(&tmp_vcpu->mutex); | ||
267 | } | 266 | } |
267 | } | ||
268 | |||
269 | static void unlock_all_vcpus(struct kvm *kvm) | ||
270 | { | ||
271 | unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1); | ||
272 | } | ||
273 | |||
274 | /* Returns true if all vcpus were locked, false otherwise */ | ||
275 | static bool lock_all_vcpus(struct kvm *kvm) | ||
276 | { | ||
277 | struct kvm_vcpu *tmp_vcpu; | ||
278 | int c; | ||
268 | 279 | ||
269 | /* | 280 | /* |
270 | * Any time a vcpu is run, vcpu_load is called which tries to grab the | 281 | * Any time a vcpu is run, vcpu_load is called which tries to grab the |
@@ -272,11 +283,49 @@ static int vgic_attr_regs_access(struct kvm_device *dev, | |||
272 | * that no other VCPUs are run and fiddle with the vgic state while we | 283 | * that no other VCPUs are run and fiddle with the vgic state while we |
273 | * access it. | 284 | * access it. |
274 | */ | 285 | */ |
275 | ret = -EBUSY; | 286 | kvm_for_each_vcpu(c, tmp_vcpu, kvm) { |
276 | kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) { | 287 | if (!mutex_trylock(&tmp_vcpu->mutex)) { |
277 | if (!mutex_trylock(&tmp_vcpu->mutex)) | 288 | unlock_vcpus(kvm, c - 1); |
278 | goto out; | 289 | return false; |
279 | vcpu_lock_idx = c; | 290 | } |
291 | } | ||
292 | |||
293 | return true; | ||
294 | } | ||
295 | |||
296 | /** | ||
297 | * vgic_attr_regs_access_v2 - allows user space to access VGIC v2 state | ||
298 | * | ||
299 | * @dev: kvm device handle | ||
300 | * @attr: kvm device attribute | ||
301 | * @reg: address the value is read or written | ||
302 | * @is_write: true if userspace is writing a register | ||
303 | */ | ||
304 | static int vgic_attr_regs_access_v2(struct kvm_device *dev, | ||
305 | struct kvm_device_attr *attr, | ||
306 | u32 *reg, bool is_write) | ||
307 | { | ||
308 | struct vgic_reg_attr reg_attr; | ||
309 | gpa_t addr; | ||
310 | struct kvm_vcpu *vcpu; | ||
311 | int ret; | ||
312 | |||
313 | ret = parse_vgic_v2_attr(dev, attr, ®_attr); | ||
314 | if (ret) | ||
315 | return ret; | ||
316 | |||
317 | vcpu = reg_attr.vcpu; | ||
318 | addr = reg_attr.addr; | ||
319 | |||
320 | mutex_lock(&dev->kvm->lock); | ||
321 | |||
322 | ret = vgic_init(dev->kvm); | ||
323 | if (ret) | ||
324 | goto out; | ||
325 | |||
326 | if (!lock_all_vcpus(dev->kvm)) { | ||
327 | ret = -EBUSY; | ||
328 | goto out; | ||
280 | } | 329 | } |
281 | 330 | ||
282 | switch (attr->group) { | 331 | switch (attr->group) { |
@@ -291,18 +340,12 @@ static int vgic_attr_regs_access(struct kvm_device *dev, | |||
291 | break; | 340 | break; |
292 | } | 341 | } |
293 | 342 | ||
343 | unlock_all_vcpus(dev->kvm); | ||
294 | out: | 344 | out: |
295 | for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { | ||
296 | tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx); | ||
297 | mutex_unlock(&tmp_vcpu->mutex); | ||
298 | } | ||
299 | |||
300 | mutex_unlock(&dev->kvm->lock); | 345 | mutex_unlock(&dev->kvm->lock); |
301 | return ret; | 346 | return ret; |
302 | } | 347 | } |
303 | 348 | ||
304 | /* V2 ops */ | ||
305 | |||
306 | static int vgic_v2_set_attr(struct kvm_device *dev, | 349 | static int vgic_v2_set_attr(struct kvm_device *dev, |
307 | struct kvm_device_attr *attr) | 350 | struct kvm_device_attr *attr) |
308 | { | 351 | { |
@@ -321,7 +364,7 @@ static int vgic_v2_set_attr(struct kvm_device *dev, | |||
321 | if (get_user(reg, uaddr)) | 364 | if (get_user(reg, uaddr)) |
322 | return -EFAULT; | 365 | return -EFAULT; |
323 | 366 | ||
324 | return vgic_attr_regs_access(dev, attr, ®, true); | 367 | return vgic_attr_regs_access_v2(dev, attr, ®, true); |
325 | } | 368 | } |
326 | } | 369 | } |
327 | 370 | ||
@@ -343,7 +386,7 @@ static int vgic_v2_get_attr(struct kvm_device *dev, | |||
343 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | 386 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; |
344 | u32 reg = 0; | 387 | u32 reg = 0; |
345 | 388 | ||
346 | ret = vgic_attr_regs_access(dev, attr, ®, false); | 389 | ret = vgic_attr_regs_access_v2(dev, attr, ®, false); |
347 | if (ret) | 390 | if (ret) |
348 | return ret; | 391 | return ret; |
349 | return put_user(reg, uaddr); | 392 | return put_user(reg, uaddr); |
@@ -387,10 +430,6 @@ struct kvm_device_ops kvm_arm_vgic_v2_ops = { | |||
387 | .has_attr = vgic_v2_has_attr, | 430 | .has_attr = vgic_v2_has_attr, |
388 | }; | 431 | }; |
389 | 432 | ||
390 | /* V3 ops */ | ||
391 | |||
392 | #ifdef CONFIG_KVM_ARM_VGIC_V3 | ||
393 | |||
394 | static int vgic_v3_set_attr(struct kvm_device *dev, | 433 | static int vgic_v3_set_attr(struct kvm_device *dev, |
395 | struct kvm_device_attr *attr) | 434 | struct kvm_device_attr *attr) |
396 | { | 435 | { |
@@ -433,5 +472,3 @@ struct kvm_device_ops kvm_arm_vgic_v3_ops = { | |||
433 | .get_attr = vgic_v3_get_attr, | 472 | .get_attr = vgic_v3_get_attr, |
434 | .has_attr = vgic_v3_has_attr, | 473 | .has_attr = vgic_v3_has_attr, |
435 | }; | 474 | }; |
436 | |||
437 | #endif /* CONFIG_KVM_ARM_VGIC_V3 */ | ||
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 90d81811fdda..0d3c76a4208b 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include "vgic-mmio.h" | 23 | #include "vgic-mmio.h" |
24 | 24 | ||
25 | /* extract @num bytes at @offset bytes offset in data */ | 25 | /* extract @num bytes at @offset bytes offset in data */ |
26 | unsigned long extract_bytes(unsigned long data, unsigned int offset, | 26 | unsigned long extract_bytes(u64 data, unsigned int offset, |
27 | unsigned int num) | 27 | unsigned int num) |
28 | { | 28 | { |
29 | return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0); | 29 | return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0); |
@@ -42,6 +42,7 @@ u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len, | |||
42 | return reg | ((u64)val << lower); | 42 | return reg | ((u64)val << lower); |
43 | } | 43 | } |
44 | 44 | ||
45 | #ifdef CONFIG_KVM_ARM_VGIC_V3_ITS | ||
45 | bool vgic_has_its(struct kvm *kvm) | 46 | bool vgic_has_its(struct kvm *kvm) |
46 | { | 47 | { |
47 | struct vgic_dist *dist = &kvm->arch.vgic; | 48 | struct vgic_dist *dist = &kvm->arch.vgic; |
@@ -51,6 +52,7 @@ bool vgic_has_its(struct kvm *kvm) | |||
51 | 52 | ||
52 | return dist->has_its; | 53 | return dist->has_its; |
53 | } | 54 | } |
55 | #endif | ||
54 | 56 | ||
55 | static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, | 57 | static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, |
56 | gpa_t addr, unsigned int len) | 58 | gpa_t addr, unsigned int len) |
@@ -179,7 +181,7 @@ static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu, | |||
179 | int target_vcpu_id = vcpu->vcpu_id; | 181 | int target_vcpu_id = vcpu->vcpu_id; |
180 | u64 value; | 182 | u64 value; |
181 | 183 | ||
182 | value = (mpidr & GENMASK(23, 0)) << 32; | 184 | value = (u64)(mpidr & GENMASK(23, 0)) << 32; |
183 | value |= ((target_vcpu_id & 0xffff) << 8); | 185 | value |= ((target_vcpu_id & 0xffff) << 8); |
184 | if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1) | 186 | if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1) |
185 | value |= GICR_TYPER_LAST; | 187 | value |= GICR_TYPER_LAST; |
@@ -609,7 +611,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg) | |||
609 | bool broadcast; | 611 | bool broadcast; |
610 | 612 | ||
611 | sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT; | 613 | sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT; |
612 | broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT); | 614 | broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT); |
613 | target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT; | 615 | target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT; |
614 | mpidr = SGI_AFFINITY_LEVEL(reg, 3); | 616 | mpidr = SGI_AFFINITY_LEVEL(reg, 3); |
615 | mpidr |= SGI_AFFINITY_LEVEL(reg, 2); | 617 | mpidr |= SGI_AFFINITY_LEVEL(reg, 2); |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 3bad3c5ed431..e18b30ddcdce 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c | |||
@@ -550,11 +550,9 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, | |||
550 | case VGIC_V2: | 550 | case VGIC_V2: |
551 | len = vgic_v2_init_dist_iodev(io_device); | 551 | len = vgic_v2_init_dist_iodev(io_device); |
552 | break; | 552 | break; |
553 | #ifdef CONFIG_KVM_ARM_VGIC_V3 | ||
554 | case VGIC_V3: | 553 | case VGIC_V3: |
555 | len = vgic_v3_init_dist_iodev(io_device); | 554 | len = vgic_v3_init_dist_iodev(io_device); |
556 | break; | 555 | break; |
557 | #endif | ||
558 | default: | 556 | default: |
559 | BUG_ON(1); | 557 | BUG_ON(1); |
560 | } | 558 | } |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h index 0b3ecf9d100e..4c34d39d44a0 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.h +++ b/virt/kvm/arm/vgic/vgic-mmio.h | |||
@@ -96,7 +96,7 @@ unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len); | |||
96 | void vgic_data_host_to_mmio_bus(void *buf, unsigned int len, | 96 | void vgic_data_host_to_mmio_bus(void *buf, unsigned int len, |
97 | unsigned long data); | 97 | unsigned long data); |
98 | 98 | ||
99 | unsigned long extract_bytes(unsigned long data, unsigned int offset, | 99 | unsigned long extract_bytes(u64 data, unsigned int offset, |
100 | unsigned int num); | 100 | unsigned int num); |
101 | 101 | ||
102 | u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len, | 102 | u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len, |
@@ -162,12 +162,10 @@ unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); | |||
162 | 162 | ||
163 | unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev); | 163 | unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev); |
164 | 164 | ||
165 | #ifdef CONFIG_KVM_ARM_VGIC_V3 | ||
166 | u64 vgic_sanitise_outer_cacheability(u64 reg); | 165 | u64 vgic_sanitise_outer_cacheability(u64 reg); |
167 | u64 vgic_sanitise_inner_cacheability(u64 reg); | 166 | u64 vgic_sanitise_inner_cacheability(u64 reg); |
168 | u64 vgic_sanitise_shareability(u64 reg); | 167 | u64 vgic_sanitise_shareability(u64 reg); |
169 | u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift, | 168 | u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift, |
170 | u64 (*sanitise_fn)(u64)); | 169 | u64 (*sanitise_fn)(u64)); |
171 | #endif | ||
172 | 170 | ||
173 | #endif | 171 | #endif |
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 0bf6709d1006..0a063af40565 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c | |||
@@ -278,12 +278,14 @@ int vgic_v2_map_resources(struct kvm *kvm) | |||
278 | goto out; | 278 | goto out; |
279 | } | 279 | } |
280 | 280 | ||
281 | ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, | 281 | if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) { |
282 | kvm_vgic_global_state.vcpu_base, | 282 | ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, |
283 | KVM_VGIC_V2_CPU_SIZE, true); | 283 | kvm_vgic_global_state.vcpu_base, |
284 | if (ret) { | 284 | KVM_VGIC_V2_CPU_SIZE, true); |
285 | kvm_err("Unable to remap VGIC CPU to VCPU\n"); | 285 | if (ret) { |
286 | goto out; | 286 | kvm_err("Unable to remap VGIC CPU to VCPU\n"); |
287 | goto out; | ||
288 | } | ||
287 | } | 289 | } |
288 | 290 | ||
289 | dist->ready = true; | 291 | dist->ready = true; |
@@ -294,6 +296,8 @@ out: | |||
294 | return ret; | 296 | return ret; |
295 | } | 297 | } |
296 | 298 | ||
299 | DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap); | ||
300 | |||
297 | /** | 301 | /** |
298 | * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT | 302 | * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT |
299 | * @node: pointer to the DT node | 303 | * @node: pointer to the DT node |
@@ -310,45 +314,51 @@ int vgic_v2_probe(const struct gic_kvm_info *info) | |||
310 | return -ENXIO; | 314 | return -ENXIO; |
311 | } | 315 | } |
312 | 316 | ||
313 | if (!PAGE_ALIGNED(info->vcpu.start)) { | 317 | if (!PAGE_ALIGNED(info->vcpu.start) || |
314 | kvm_err("GICV physical address 0x%llx not page aligned\n", | 318 | !PAGE_ALIGNED(resource_size(&info->vcpu))) { |
315 | (unsigned long long)info->vcpu.start); | 319 | kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n"); |
316 | return -ENXIO; | 320 | kvm_vgic_global_state.vcpu_base_va = ioremap(info->vcpu.start, |
317 | } | 321 | resource_size(&info->vcpu)); |
322 | if (!kvm_vgic_global_state.vcpu_base_va) { | ||
323 | kvm_err("Cannot ioremap GICV\n"); | ||
324 | return -ENOMEM; | ||
325 | } | ||
318 | 326 | ||
319 | if (!PAGE_ALIGNED(resource_size(&info->vcpu))) { | 327 | ret = create_hyp_io_mappings(kvm_vgic_global_state.vcpu_base_va, |
320 | kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n", | 328 | kvm_vgic_global_state.vcpu_base_va + resource_size(&info->vcpu), |
321 | (unsigned long long)resource_size(&info->vcpu), | 329 | info->vcpu.start); |
322 | PAGE_SIZE); | 330 | if (ret) { |
323 | return -ENXIO; | 331 | kvm_err("Cannot map GICV into hyp\n"); |
332 | goto out; | ||
333 | } | ||
334 | |||
335 | static_branch_enable(&vgic_v2_cpuif_trap); | ||
324 | } | 336 | } |
325 | 337 | ||
326 | kvm_vgic_global_state.vctrl_base = ioremap(info->vctrl.start, | 338 | kvm_vgic_global_state.vctrl_base = ioremap(info->vctrl.start, |
327 | resource_size(&info->vctrl)); | 339 | resource_size(&info->vctrl)); |
328 | if (!kvm_vgic_global_state.vctrl_base) { | 340 | if (!kvm_vgic_global_state.vctrl_base) { |
329 | kvm_err("Cannot ioremap GICH\n"); | 341 | kvm_err("Cannot ioremap GICH\n"); |
330 | return -ENOMEM; | 342 | ret = -ENOMEM; |
343 | goto out; | ||
331 | } | 344 | } |
332 | 345 | ||
333 | vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR); | 346 | vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR); |
334 | kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1; | 347 | kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1; |
335 | 348 | ||
336 | ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); | ||
337 | if (ret) { | ||
338 | kvm_err("Cannot register GICv2 KVM device\n"); | ||
339 | iounmap(kvm_vgic_global_state.vctrl_base); | ||
340 | return ret; | ||
341 | } | ||
342 | |||
343 | ret = create_hyp_io_mappings(kvm_vgic_global_state.vctrl_base, | 349 | ret = create_hyp_io_mappings(kvm_vgic_global_state.vctrl_base, |
344 | kvm_vgic_global_state.vctrl_base + | 350 | kvm_vgic_global_state.vctrl_base + |
345 | resource_size(&info->vctrl), | 351 | resource_size(&info->vctrl), |
346 | info->vctrl.start); | 352 | info->vctrl.start); |
347 | if (ret) { | 353 | if (ret) { |
348 | kvm_err("Cannot map VCTRL into hyp\n"); | 354 | kvm_err("Cannot map VCTRL into hyp\n"); |
349 | kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2); | 355 | goto out; |
350 | iounmap(kvm_vgic_global_state.vctrl_base); | 356 | } |
351 | return ret; | 357 | |
358 | ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); | ||
359 | if (ret) { | ||
360 | kvm_err("Cannot register GICv2 KVM device\n"); | ||
361 | goto out; | ||
352 | } | 362 | } |
353 | 363 | ||
354 | kvm_vgic_global_state.can_emulate_gicv2 = true; | 364 | kvm_vgic_global_state.can_emulate_gicv2 = true; |
@@ -359,4 +369,11 @@ int vgic_v2_probe(const struct gic_kvm_info *info) | |||
359 | kvm_info("vgic-v2@%llx\n", info->vctrl.start); | 369 | kvm_info("vgic-v2@%llx\n", info->vctrl.start); |
360 | 370 | ||
361 | return 0; | 371 | return 0; |
372 | out: | ||
373 | if (kvm_vgic_global_state.vctrl_base) | ||
374 | iounmap(kvm_vgic_global_state.vctrl_base); | ||
375 | if (kvm_vgic_global_state.vcpu_base_va) | ||
376 | iounmap(kvm_vgic_global_state.vcpu_base_va); | ||
377 | |||
378 | return ret; | ||
362 | } | 379 | } |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index e83b7fe4baae..2893d5ba523a 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #define DEBUG_SPINLOCK_BUG_ON(p) | 29 | #define DEBUG_SPINLOCK_BUG_ON(p) |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | struct vgic_global __section(.hyp.text) kvm_vgic_global_state; | 32 | struct vgic_global __section(.hyp.text) kvm_vgic_global_state = {.gicv3_cpuif = STATIC_KEY_FALSE_INIT,}; |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * Locking order is always: | 35 | * Locking order is always: |
@@ -645,6 +645,9 @@ next: | |||
645 | /* Sync back the hardware VGIC state into our emulation after a guest's run. */ | 645 | /* Sync back the hardware VGIC state into our emulation after a guest's run. */ |
646 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | 646 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) |
647 | { | 647 | { |
648 | if (unlikely(!vgic_initialized(vcpu->kvm))) | ||
649 | return; | ||
650 | |||
648 | vgic_process_maintenance_interrupt(vcpu); | 651 | vgic_process_maintenance_interrupt(vcpu); |
649 | vgic_fold_lr_state(vcpu); | 652 | vgic_fold_lr_state(vcpu); |
650 | vgic_prune_ap_list(vcpu); | 653 | vgic_prune_ap_list(vcpu); |
@@ -653,6 +656,9 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | |||
653 | /* Flush our emulation state into the GIC hardware before entering the guest. */ | 656 | /* Flush our emulation state into the GIC hardware before entering the guest. */ |
654 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | 657 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) |
655 | { | 658 | { |
659 | if (unlikely(!vgic_initialized(vcpu->kvm))) | ||
660 | return; | ||
661 | |||
656 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); | 662 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); |
657 | vgic_flush_lr_state(vcpu); | 663 | vgic_flush_lr_state(vcpu); |
658 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | 664 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); |
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 6c4625c46368..9d9e014765a2 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h | |||
@@ -72,7 +72,6 @@ static inline void vgic_get_irq_kref(struct vgic_irq *irq) | |||
72 | kref_get(&irq->refcount); | 72 | kref_get(&irq->refcount); |
73 | } | 73 | } |
74 | 74 | ||
75 | #ifdef CONFIG_KVM_ARM_VGIC_V3 | ||
76 | void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu); | 75 | void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu); |
77 | void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu); | 76 | void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu); |
78 | void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); | 77 | void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); |
@@ -84,63 +83,14 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu); | |||
84 | int vgic_v3_probe(const struct gic_kvm_info *info); | 83 | int vgic_v3_probe(const struct gic_kvm_info *info); |
85 | int vgic_v3_map_resources(struct kvm *kvm); | 84 | int vgic_v3_map_resources(struct kvm *kvm); |
86 | int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address); | 85 | int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address); |
86 | |||
87 | #ifdef CONFIG_KVM_ARM_VGIC_V3_ITS | ||
87 | int vgic_register_its_iodevs(struct kvm *kvm); | 88 | int vgic_register_its_iodevs(struct kvm *kvm); |
88 | bool vgic_has_its(struct kvm *kvm); | 89 | bool vgic_has_its(struct kvm *kvm); |
89 | int kvm_vgic_register_its_device(void); | 90 | int kvm_vgic_register_its_device(void); |
90 | void vgic_enable_lpis(struct kvm_vcpu *vcpu); | 91 | void vgic_enable_lpis(struct kvm_vcpu *vcpu); |
91 | int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi); | 92 | int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi); |
92 | #else | 93 | #else |
93 | static inline void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu) | ||
94 | { | ||
95 | } | ||
96 | |||
97 | static inline void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | ||
98 | { | ||
99 | } | ||
100 | |||
101 | static inline void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, | ||
102 | struct vgic_irq *irq, int lr) | ||
103 | { | ||
104 | } | ||
105 | |||
106 | static inline void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr) | ||
107 | { | ||
108 | } | ||
109 | |||
110 | static inline void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) | ||
111 | { | ||
112 | } | ||
113 | |||
114 | static inline | ||
115 | void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) | ||
116 | { | ||
117 | } | ||
118 | |||
119 | static inline | ||
120 | void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) | ||
121 | { | ||
122 | } | ||
123 | |||
124 | static inline void vgic_v3_enable(struct kvm_vcpu *vcpu) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | static inline int vgic_v3_probe(const struct gic_kvm_info *info) | ||
129 | { | ||
130 | return -ENODEV; | ||
131 | } | ||
132 | |||
133 | static inline int vgic_v3_map_resources(struct kvm *kvm) | ||
134 | { | ||
135 | return -ENODEV; | ||
136 | } | ||
137 | |||
138 | static inline int vgic_register_redist_iodevs(struct kvm *kvm, | ||
139 | gpa_t dist_base_address) | ||
140 | { | ||
141 | return -ENODEV; | ||
142 | } | ||
143 | |||
144 | static inline int vgic_register_its_iodevs(struct kvm *kvm) | 94 | static inline int vgic_register_its_iodevs(struct kvm *kvm) |
145 | { | 95 | { |
146 | return -ENODEV; | 96 | return -ENODEV; |