diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-16 12:55:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-16 12:55:35 -0400 |
commit | 10dc3747661bea9215417b659449bb7b8ed3df2c (patch) | |
tree | d943974b4941203a7db2fabe4896852cf0f16bc4 /arch/arm64/kvm | |
parent | 047486d8e7c2a7e8d75b068b69cb67b47364f5d4 (diff) | |
parent | f958ee745f70b60d0e41927cab2c073104bc70c2 (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini:
"One of the largest releases for KVM... Hardly any generic
changes, but lots of architecture-specific updates.
ARM:
- VHE support so that we can run the kernel at EL2 on ARMv8.1 systems
- PMU support for guests
- 32bit world switch rewritten in C
- various optimizations to the vgic save/restore code.
PPC:
- enabled KVM-VFIO integration ("VFIO device")
- optimizations to speed up IPIs between vcpus
- in-kernel handling of IOMMU hypercalls
- support for dynamic DMA windows (DDW).
s390:
- provide the floating point registers via sync regs;
- separated instruction vs. data accesses
- dirty log improvements for huge guests
- bugfixes and documentation improvements.
x86:
- Hyper-V VMBus hypercall userspace exit
- alternative implementation of lowest-priority interrupts using
vector hashing (for better VT-d posted interrupt support)
- fixed guest debugging with nested virtualizations
- improved interrupt tracking in the in-kernel IOAPIC
- generic infrastructure for tracking writes to guest
memory - currently its only use is to speedup the legacy shadow
paging (pre-EPT) case, but in the future it will be used for
virtual GPUs as well
- much cleanup (LAPIC, kvmclock, MMU, PIT), including ubsan fixes"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (217 commits)
KVM: x86: remove eager_fpu field of struct kvm_vcpu_arch
KVM: x86: disable MPX if host did not enable MPX XSAVE features
arm64: KVM: vgic-v3: Only wipe LRs on vcpu exit
arm64: KVM: vgic-v3: Reset LRs at boot time
arm64: KVM: vgic-v3: Do not save an LR known to be empty
arm64: KVM: vgic-v3: Save maintenance interrupt state only if required
arm64: KVM: vgic-v3: Avoid accessing ICH registers
KVM: arm/arm64: vgic-v2: Make GICD_SGIR quicker to hit
KVM: arm/arm64: vgic-v2: Only wipe LRs on vcpu exit
KVM: arm/arm64: vgic-v2: Reset LRs at boot time
KVM: arm/arm64: vgic-v2: Do not save an LR known to be empty
KVM: arm/arm64: vgic-v2: Move GICH_ELRSR saving to its own function
KVM: arm/arm64: vgic-v2: Save maintenance interrupt state only if required
KVM: arm/arm64: vgic-v2: Avoid accessing GICH registers
KVM: s390: allocate only one DMA page per VM
KVM: s390: enable STFLE interpretation only if enabled for the guest
KVM: s390: wake up when the VCPU cpu timer expires
KVM: s390: step the VCPU timer while in enabled wait
KVM: s390: protect VCPU cpu timer with a seqcount
KVM: s390: step VCPU cpu timer during kvm_run ioctl
...
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r-- | arch/arm64/kvm/Kconfig | 7 | ||||
-rw-r--r-- | arch/arm64/kvm/Makefile | 1 | ||||
-rw-r--r-- | arch/arm64/kvm/guest.c | 51 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp-init.S | 15 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp.S | 7 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/Makefile | 8 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/debug-sr.c | 4 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/entry.S | 6 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/hyp-entry.S | 109 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/hyp.h | 90 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/s2-setup.c | 43 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/switch.c | 206 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/sysreg-sr.c | 149 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/timer-sr.c | 71 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/tlb.c | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/vgic-v2-sr.c | 84 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/vgic-v3-sr.c | 341 | ||||
-rw-r--r-- | arch/arm64/kvm/reset.c | 7 | ||||
-rw-r--r-- | arch/arm64/kvm/sys_regs.c | 609 |
19 files changed, 1238 insertions, 572 deletions
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index a5272c07d1cb..de7450df7629 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig | |||
@@ -36,6 +36,7 @@ config KVM | |||
36 | select HAVE_KVM_EVENTFD | 36 | select HAVE_KVM_EVENTFD |
37 | select HAVE_KVM_IRQFD | 37 | select HAVE_KVM_IRQFD |
38 | select KVM_ARM_VGIC_V3 | 38 | select KVM_ARM_VGIC_V3 |
39 | select KVM_ARM_PMU if HW_PERF_EVENTS | ||
39 | ---help--- | 40 | ---help--- |
40 | Support hosting virtualized guest machines. | 41 | Support hosting virtualized guest machines. |
41 | We don't support KVM with 16K page tables yet, due to the multiple | 42 | We don't support KVM with 16K page tables yet, due to the multiple |
@@ -48,6 +49,12 @@ config KVM_ARM_HOST | |||
48 | ---help--- | 49 | ---help--- |
49 | Provides host support for ARM processors. | 50 | Provides host support for ARM processors. |
50 | 51 | ||
52 | config KVM_ARM_PMU | ||
53 | bool | ||
54 | ---help--- | ||
55 | Adds support for a virtual Performance Monitoring Unit (PMU) in | ||
56 | virtual machines. | ||
57 | |||
51 | source drivers/vhost/Kconfig | 58 | source drivers/vhost/Kconfig |
52 | 59 | ||
53 | endif # VIRTUALIZATION | 60 | endif # VIRTUALIZATION |
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index caee9ee8e12a..122cff482ac4 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile | |||
@@ -26,3 +26,4 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o | |||
26 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o | 26 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o |
27 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o | 27 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o |
28 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o | 28 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o |
29 | kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o | ||
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 9e54ad7c240a..32fad75bb9ff 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c | |||
@@ -380,3 +380,54 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
380 | } | 380 | } |
381 | return 0; | 381 | return 0; |
382 | } | 382 | } |
383 | |||
384 | int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, | ||
385 | struct kvm_device_attr *attr) | ||
386 | { | ||
387 | int ret; | ||
388 | |||
389 | switch (attr->group) { | ||
390 | case KVM_ARM_VCPU_PMU_V3_CTRL: | ||
391 | ret = kvm_arm_pmu_v3_set_attr(vcpu, attr); | ||
392 | break; | ||
393 | default: | ||
394 | ret = -ENXIO; | ||
395 | break; | ||
396 | } | ||
397 | |||
398 | return ret; | ||
399 | } | ||
400 | |||
401 | int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, | ||
402 | struct kvm_device_attr *attr) | ||
403 | { | ||
404 | int ret; | ||
405 | |||
406 | switch (attr->group) { | ||
407 | case KVM_ARM_VCPU_PMU_V3_CTRL: | ||
408 | ret = kvm_arm_pmu_v3_get_attr(vcpu, attr); | ||
409 | break; | ||
410 | default: | ||
411 | ret = -ENXIO; | ||
412 | break; | ||
413 | } | ||
414 | |||
415 | return ret; | ||
416 | } | ||
417 | |||
418 | int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, | ||
419 | struct kvm_device_attr *attr) | ||
420 | { | ||
421 | int ret; | ||
422 | |||
423 | switch (attr->group) { | ||
424 | case KVM_ARM_VCPU_PMU_V3_CTRL: | ||
425 | ret = kvm_arm_pmu_v3_has_attr(vcpu, attr); | ||
426 | break; | ||
427 | default: | ||
428 | ret = -ENXIO; | ||
429 | break; | ||
430 | } | ||
431 | |||
432 | return ret; | ||
433 | } | ||
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index d073b5a216f7..7d8747c6427c 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S | |||
@@ -87,26 +87,13 @@ __do_hyp_init: | |||
87 | #endif | 87 | #endif |
88 | /* | 88 | /* |
89 | * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in | 89 | * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in |
90 | * TCR_EL2 and VTCR_EL2. | 90 | * TCR_EL2. |
91 | */ | 91 | */ |
92 | mrs x5, ID_AA64MMFR0_EL1 | 92 | mrs x5, ID_AA64MMFR0_EL1 |
93 | bfi x4, x5, #16, #3 | 93 | bfi x4, x5, #16, #3 |
94 | 94 | ||
95 | msr tcr_el2, x4 | 95 | msr tcr_el2, x4 |
96 | 96 | ||
97 | ldr x4, =VTCR_EL2_FLAGS | ||
98 | bfi x4, x5, #16, #3 | ||
99 | /* | ||
100 | * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in | ||
101 | * VTCR_EL2. | ||
102 | */ | ||
103 | mrs x5, ID_AA64MMFR1_EL1 | ||
104 | ubfx x5, x5, #5, #1 | ||
105 | lsl x5, x5, #VTCR_EL2_VS | ||
106 | orr x4, x4, x5 | ||
107 | |||
108 | msr vtcr_el2, x4 | ||
109 | |||
110 | mrs x4, mair_el1 | 97 | mrs x4, mair_el1 |
111 | msr mair_el2, x4 | 98 | msr mair_el2, x4 |
112 | isb | 99 | isb |
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 0ccdcbbef3c2..0689a74e6ba0 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S | |||
@@ -17,7 +17,9 @@ | |||
17 | 17 | ||
18 | #include <linux/linkage.h> | 18 | #include <linux/linkage.h> |
19 | 19 | ||
20 | #include <asm/alternative.h> | ||
20 | #include <asm/assembler.h> | 21 | #include <asm/assembler.h> |
22 | #include <asm/cpufeature.h> | ||
21 | 23 | ||
22 | /* | 24 | /* |
23 | * u64 kvm_call_hyp(void *hypfn, ...); | 25 | * u64 kvm_call_hyp(void *hypfn, ...); |
@@ -38,6 +40,11 @@ | |||
38 | * arch/arm64/kernel/hyp_stub.S. | 40 | * arch/arm64/kernel/hyp_stub.S. |
39 | */ | 41 | */ |
40 | ENTRY(kvm_call_hyp) | 42 | ENTRY(kvm_call_hyp) |
43 | alternative_if_not ARM64_HAS_VIRT_HOST_EXTN | ||
41 | hvc #0 | 44 | hvc #0 |
42 | ret | 45 | ret |
46 | alternative_else | ||
47 | b __vhe_hyp_call | ||
48 | nop | ||
49 | alternative_endif | ||
43 | ENDPROC(kvm_call_hyp) | 50 | ENDPROC(kvm_call_hyp) |
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 826032bc3945..b6a8fc5ad1af 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile | |||
@@ -2,9 +2,12 @@ | |||
2 | # Makefile for Kernel-based Virtual Machine module, HYP part | 2 | # Makefile for Kernel-based Virtual Machine module, HYP part |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o | 5 | KVM=../../../../virt/kvm |
6 | |||
7 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o | ||
8 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o | ||
9 | |||
6 | obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o | 10 | obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o |
7 | obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o | ||
8 | obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o | 11 | obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o |
9 | obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o | 12 | obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o |
10 | obj-$(CONFIG_KVM_ARM_HOST) += entry.o | 13 | obj-$(CONFIG_KVM_ARM_HOST) += entry.o |
@@ -12,3 +15,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += switch.o | |||
12 | obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o | 15 | obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o |
13 | obj-$(CONFIG_KVM_ARM_HOST) += tlb.o | 16 | obj-$(CONFIG_KVM_ARM_HOST) += tlb.o |
14 | obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o | 17 | obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o |
18 | obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o | ||
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index c9c1e97501a9..053cf8b057c1 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c | |||
@@ -19,9 +19,7 @@ | |||
19 | #include <linux/kvm_host.h> | 19 | #include <linux/kvm_host.h> |
20 | 20 | ||
21 | #include <asm/kvm_asm.h> | 21 | #include <asm/kvm_asm.h> |
22 | #include <asm/kvm_mmu.h> | 22 | #include <asm/kvm_hyp.h> |
23 | |||
24 | #include "hyp.h" | ||
25 | 23 | ||
26 | #define read_debug(r,n) read_sysreg(r##n##_el1) | 24 | #define read_debug(r,n) read_sysreg(r##n##_el1) |
27 | #define write_debug(v,r,n) write_sysreg(v, r##n##_el1) | 25 | #define write_debug(v,r,n) write_sysreg(v, r##n##_el1) |
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index fd0fbe9b7e6a..ce9e5e5f28cf 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S | |||
@@ -130,9 +130,15 @@ ENDPROC(__guest_exit) | |||
130 | ENTRY(__fpsimd_guest_restore) | 130 | ENTRY(__fpsimd_guest_restore) |
131 | stp x4, lr, [sp, #-16]! | 131 | stp x4, lr, [sp, #-16]! |
132 | 132 | ||
133 | alternative_if_not ARM64_HAS_VIRT_HOST_EXTN | ||
133 | mrs x2, cptr_el2 | 134 | mrs x2, cptr_el2 |
134 | bic x2, x2, #CPTR_EL2_TFP | 135 | bic x2, x2, #CPTR_EL2_TFP |
135 | msr cptr_el2, x2 | 136 | msr cptr_el2, x2 |
137 | alternative_else | ||
138 | mrs x2, cpacr_el1 | ||
139 | orr x2, x2, #CPACR_EL1_FPEN | ||
140 | msr cpacr_el1, x2 | ||
141 | alternative_endif | ||
136 | isb | 142 | isb |
137 | 143 | ||
138 | mrs x3, tpidr_el2 | 144 | mrs x3, tpidr_el2 |
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 93e8d983c0bd..3488894397ff 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S | |||
@@ -19,7 +19,6 @@ | |||
19 | 19 | ||
20 | #include <asm/alternative.h> | 20 | #include <asm/alternative.h> |
21 | #include <asm/assembler.h> | 21 | #include <asm/assembler.h> |
22 | #include <asm/asm-offsets.h> | ||
23 | #include <asm/cpufeature.h> | 22 | #include <asm/cpufeature.h> |
24 | #include <asm/kvm_arm.h> | 23 | #include <asm/kvm_arm.h> |
25 | #include <asm/kvm_asm.h> | 24 | #include <asm/kvm_asm.h> |
@@ -38,10 +37,42 @@ | |||
38 | ldp x0, x1, [sp], #16 | 37 | ldp x0, x1, [sp], #16 |
39 | .endm | 38 | .endm |
40 | 39 | ||
40 | .macro do_el2_call | ||
41 | /* | ||
42 | * Shuffle the parameters before calling the function | ||
43 | * pointed to in x0. Assumes parameters in x[1,2,3]. | ||
44 | */ | ||
45 | sub sp, sp, #16 | ||
46 | str lr, [sp] | ||
47 | mov lr, x0 | ||
48 | mov x0, x1 | ||
49 | mov x1, x2 | ||
50 | mov x2, x3 | ||
51 | blr lr | ||
52 | ldr lr, [sp] | ||
53 | add sp, sp, #16 | ||
54 | .endm | ||
55 | |||
56 | ENTRY(__vhe_hyp_call) | ||
57 | do_el2_call | ||
58 | /* | ||
59 | * We used to rely on having an exception return to get | ||
60 | * an implicit isb. In the E2H case, we don't have it anymore. | ||
61 | * rather than changing all the leaf functions, just do it here | ||
62 | * before returning to the rest of the kernel. | ||
63 | */ | ||
64 | isb | ||
65 | ret | ||
66 | ENDPROC(__vhe_hyp_call) | ||
67 | |||
41 | el1_sync: // Guest trapped into EL2 | 68 | el1_sync: // Guest trapped into EL2 |
42 | save_x0_to_x3 | 69 | save_x0_to_x3 |
43 | 70 | ||
71 | alternative_if_not ARM64_HAS_VIRT_HOST_EXTN | ||
44 | mrs x1, esr_el2 | 72 | mrs x1, esr_el2 |
73 | alternative_else | ||
74 | mrs x1, esr_el1 | ||
75 | alternative_endif | ||
45 | lsr x2, x1, #ESR_ELx_EC_SHIFT | 76 | lsr x2, x1, #ESR_ELx_EC_SHIFT |
46 | 77 | ||
47 | cmp x2, #ESR_ELx_EC_HVC64 | 78 | cmp x2, #ESR_ELx_EC_HVC64 |
@@ -58,19 +89,13 @@ el1_sync: // Guest trapped into EL2 | |||
58 | mrs x0, vbar_el2 | 89 | mrs x0, vbar_el2 |
59 | b 2f | 90 | b 2f |
60 | 91 | ||
61 | 1: stp lr, xzr, [sp, #-16]! | 92 | 1: |
62 | |||
63 | /* | 93 | /* |
64 | * Compute the function address in EL2, and shuffle the parameters. | 94 | * Perform the EL2 call |
65 | */ | 95 | */ |
66 | kern_hyp_va x0 | 96 | kern_hyp_va x0 |
67 | mov lr, x0 | 97 | do_el2_call |
68 | mov x0, x1 | ||
69 | mov x1, x2 | ||
70 | mov x2, x3 | ||
71 | blr lr | ||
72 | 98 | ||
73 | ldp lr, xzr, [sp], #16 | ||
74 | 2: eret | 99 | 2: eret |
75 | 100 | ||
76 | el1_trap: | 101 | el1_trap: |
@@ -83,72 +108,10 @@ el1_trap: | |||
83 | cmp x2, #ESR_ELx_EC_FP_ASIMD | 108 | cmp x2, #ESR_ELx_EC_FP_ASIMD |
84 | b.eq __fpsimd_guest_restore | 109 | b.eq __fpsimd_guest_restore |
85 | 110 | ||
86 | cmp x2, #ESR_ELx_EC_DABT_LOW | 111 | mrs x0, tpidr_el2 |
87 | mov x0, #ESR_ELx_EC_IABT_LOW | ||
88 | ccmp x2, x0, #4, ne | ||
89 | b.ne 1f // Not an abort we care about | ||
90 | |||
91 | /* This is an abort. Check for permission fault */ | ||
92 | alternative_if_not ARM64_WORKAROUND_834220 | ||
93 | and x2, x1, #ESR_ELx_FSC_TYPE | ||
94 | cmp x2, #FSC_PERM | ||
95 | b.ne 1f // Not a permission fault | ||
96 | alternative_else | ||
97 | nop // Use the permission fault path to | ||
98 | nop // check for a valid S1 translation, | ||
99 | nop // regardless of the ESR value. | ||
100 | alternative_endif | ||
101 | |||
102 | /* | ||
103 | * Check for Stage-1 page table walk, which is guaranteed | ||
104 | * to give a valid HPFAR_EL2. | ||
105 | */ | ||
106 | tbnz x1, #7, 1f // S1PTW is set | ||
107 | |||
108 | /* Preserve PAR_EL1 */ | ||
109 | mrs x3, par_el1 | ||
110 | stp x3, xzr, [sp, #-16]! | ||
111 | |||
112 | /* | ||
113 | * Permission fault, HPFAR_EL2 is invalid. | ||
114 | * Resolve the IPA the hard way using the guest VA. | ||
115 | * Stage-1 translation already validated the memory access rights. | ||
116 | * As such, we can use the EL1 translation regime, and don't have | ||
117 | * to distinguish between EL0 and EL1 access. | ||
118 | */ | ||
119 | mrs x2, far_el2 | ||
120 | at s1e1r, x2 | ||
121 | isb | ||
122 | |||
123 | /* Read result */ | ||
124 | mrs x3, par_el1 | ||
125 | ldp x0, xzr, [sp], #16 // Restore PAR_EL1 from the stack | ||
126 | msr par_el1, x0 | ||
127 | tbnz x3, #0, 3f // Bail out if we failed the translation | ||
128 | ubfx x3, x3, #12, #36 // Extract IPA | ||
129 | lsl x3, x3, #4 // and present it like HPFAR | ||
130 | b 2f | ||
131 | |||
132 | 1: mrs x3, hpfar_el2 | ||
133 | mrs x2, far_el2 | ||
134 | |||
135 | 2: mrs x0, tpidr_el2 | ||
136 | str w1, [x0, #VCPU_ESR_EL2] | ||
137 | str x2, [x0, #VCPU_FAR_EL2] | ||
138 | str x3, [x0, #VCPU_HPFAR_EL2] | ||
139 | |||
140 | mov x1, #ARM_EXCEPTION_TRAP | 112 | mov x1, #ARM_EXCEPTION_TRAP |
141 | b __guest_exit | 113 | b __guest_exit |
142 | 114 | ||
143 | /* | ||
144 | * Translation failed. Just return to the guest and | ||
145 | * let it fault again. Another CPU is probably playing | ||
146 | * behind our back. | ||
147 | */ | ||
148 | 3: restore_x0_to_x3 | ||
149 | |||
150 | eret | ||
151 | |||
152 | el1_irq: | 115 | el1_irq: |
153 | save_x0_to_x3 | 116 | save_x0_to_x3 |
154 | mrs x0, tpidr_el2 | 117 | mrs x0, tpidr_el2 |
diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h deleted file mode 100644 index fb275178b6af..000000000000 --- a/arch/arm64/kvm/hyp/hyp.h +++ /dev/null | |||
@@ -1,90 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM64_KVM_HYP_H__ | ||
19 | #define __ARM64_KVM_HYP_H__ | ||
20 | |||
21 | #include <linux/compiler.h> | ||
22 | #include <linux/kvm_host.h> | ||
23 | #include <asm/kvm_mmu.h> | ||
24 | #include <asm/sysreg.h> | ||
25 | |||
26 | #define __hyp_text __section(.hyp.text) notrace | ||
27 | |||
28 | #define kern_hyp_va(v) (typeof(v))((unsigned long)(v) & HYP_PAGE_OFFSET_MASK) | ||
29 | #define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \ | ||
30 | + PAGE_OFFSET) | ||
31 | |||
32 | /** | ||
33 | * hyp_alternate_select - Generates patchable code sequences that are | ||
34 | * used to switch between two implementations of a function, depending | ||
35 | * on the availability of a feature. | ||
36 | * | ||
37 | * @fname: a symbol name that will be defined as a function returning a | ||
38 | * function pointer whose type will match @orig and @alt | ||
39 | * @orig: A pointer to the default function, as returned by @fname when | ||
40 | * @cond doesn't hold | ||
41 | * @alt: A pointer to the alternate function, as returned by @fname | ||
42 | * when @cond holds | ||
43 | * @cond: a CPU feature (as described in asm/cpufeature.h) | ||
44 | */ | ||
45 | #define hyp_alternate_select(fname, orig, alt, cond) \ | ||
46 | typeof(orig) * __hyp_text fname(void) \ | ||
47 | { \ | ||
48 | typeof(alt) *val = orig; \ | ||
49 | asm volatile(ALTERNATIVE("nop \n", \ | ||
50 | "mov %0, %1 \n", \ | ||
51 | cond) \ | ||
52 | : "+r" (val) : "r" (alt)); \ | ||
53 | return val; \ | ||
54 | } | ||
55 | |||
56 | void __vgic_v2_save_state(struct kvm_vcpu *vcpu); | ||
57 | void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); | ||
58 | |||
59 | void __vgic_v3_save_state(struct kvm_vcpu *vcpu); | ||
60 | void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); | ||
61 | |||
62 | void __timer_save_state(struct kvm_vcpu *vcpu); | ||
63 | void __timer_restore_state(struct kvm_vcpu *vcpu); | ||
64 | |||
65 | void __sysreg_save_state(struct kvm_cpu_context *ctxt); | ||
66 | void __sysreg_restore_state(struct kvm_cpu_context *ctxt); | ||
67 | void __sysreg32_save_state(struct kvm_vcpu *vcpu); | ||
68 | void __sysreg32_restore_state(struct kvm_vcpu *vcpu); | ||
69 | |||
70 | void __debug_save_state(struct kvm_vcpu *vcpu, | ||
71 | struct kvm_guest_debug_arch *dbg, | ||
72 | struct kvm_cpu_context *ctxt); | ||
73 | void __debug_restore_state(struct kvm_vcpu *vcpu, | ||
74 | struct kvm_guest_debug_arch *dbg, | ||
75 | struct kvm_cpu_context *ctxt); | ||
76 | void __debug_cond_save_host_state(struct kvm_vcpu *vcpu); | ||
77 | void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu); | ||
78 | |||
79 | void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); | ||
80 | void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); | ||
81 | static inline bool __fpsimd_enabled(void) | ||
82 | { | ||
83 | return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP); | ||
84 | } | ||
85 | |||
86 | u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); | ||
87 | void __noreturn __hyp_do_panic(unsigned long, ...); | ||
88 | |||
89 | #endif /* __ARM64_KVM_HYP_H__ */ | ||
90 | |||
diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c new file mode 100644 index 000000000000..bfc54fd82797 --- /dev/null +++ b/arch/arm64/kvm/hyp/s2-setup.c | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2016 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include <linux/types.h> | ||
19 | #include <asm/kvm_arm.h> | ||
20 | #include <asm/kvm_asm.h> | ||
21 | #include <asm/kvm_hyp.h> | ||
22 | |||
23 | void __hyp_text __init_stage2_translation(void) | ||
24 | { | ||
25 | u64 val = VTCR_EL2_FLAGS; | ||
26 | u64 tmp; | ||
27 | |||
28 | /* | ||
29 | * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS | ||
30 | * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while | ||
31 | * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2... | ||
32 | */ | ||
33 | val |= (read_sysreg(id_aa64mmfr0_el1) & 7) << 16; | ||
34 | |||
35 | /* | ||
36 | * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS | ||
37 | * bit in VTCR_EL2. | ||
38 | */ | ||
39 | tmp = (read_sysreg(id_aa64mmfr1_el1) >> 4) & 0xf; | ||
40 | val |= (tmp == 2) ? VTCR_EL2_VS : 0; | ||
41 | |||
42 | write_sysreg(val, vtcr_el2); | ||
43 | } | ||
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index f0e7bdfae134..437cfad5e3d8 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c | |||
@@ -15,7 +15,53 @@ | |||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "hyp.h" | 18 | #include <linux/types.h> |
19 | #include <asm/kvm_asm.h> | ||
20 | #include <asm/kvm_hyp.h> | ||
21 | |||
22 | static bool __hyp_text __fpsimd_enabled_nvhe(void) | ||
23 | { | ||
24 | return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP); | ||
25 | } | ||
26 | |||
27 | static bool __hyp_text __fpsimd_enabled_vhe(void) | ||
28 | { | ||
29 | return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN); | ||
30 | } | ||
31 | |||
32 | static hyp_alternate_select(__fpsimd_is_enabled, | ||
33 | __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe, | ||
34 | ARM64_HAS_VIRT_HOST_EXTN); | ||
35 | |||
36 | bool __hyp_text __fpsimd_enabled(void) | ||
37 | { | ||
38 | return __fpsimd_is_enabled()(); | ||
39 | } | ||
40 | |||
41 | static void __hyp_text __activate_traps_vhe(void) | ||
42 | { | ||
43 | u64 val; | ||
44 | |||
45 | val = read_sysreg(cpacr_el1); | ||
46 | val |= CPACR_EL1_TTA; | ||
47 | val &= ~CPACR_EL1_FPEN; | ||
48 | write_sysreg(val, cpacr_el1); | ||
49 | |||
50 | write_sysreg(__kvm_hyp_vector, vbar_el1); | ||
51 | } | ||
52 | |||
53 | static void __hyp_text __activate_traps_nvhe(void) | ||
54 | { | ||
55 | u64 val; | ||
56 | |||
57 | val = CPTR_EL2_DEFAULT; | ||
58 | val |= CPTR_EL2_TTA | CPTR_EL2_TFP; | ||
59 | write_sysreg(val, cptr_el2); | ||
60 | } | ||
61 | |||
62 | static hyp_alternate_select(__activate_traps_arch, | ||
63 | __activate_traps_nvhe, __activate_traps_vhe, | ||
64 | ARM64_HAS_VIRT_HOST_EXTN); | ||
19 | 65 | ||
20 | static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) | 66 | static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) |
21 | { | 67 | { |
@@ -36,20 +82,37 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) | |||
36 | write_sysreg(val, hcr_el2); | 82 | write_sysreg(val, hcr_el2); |
37 | /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ | 83 | /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ |
38 | write_sysreg(1 << 15, hstr_el2); | 84 | write_sysreg(1 << 15, hstr_el2); |
85 | /* Make sure we trap PMU access from EL0 to EL2 */ | ||
86 | write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); | ||
87 | write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); | ||
88 | __activate_traps_arch()(); | ||
89 | } | ||
39 | 90 | ||
40 | val = CPTR_EL2_DEFAULT; | 91 | static void __hyp_text __deactivate_traps_vhe(void) |
41 | val |= CPTR_EL2_TTA | CPTR_EL2_TFP; | 92 | { |
42 | write_sysreg(val, cptr_el2); | 93 | extern char vectors[]; /* kernel exception vectors */ |
43 | 94 | ||
44 | write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); | 95 | write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); |
96 | write_sysreg(CPACR_EL1_FPEN, cpacr_el1); | ||
97 | write_sysreg(vectors, vbar_el1); | ||
45 | } | 98 | } |
46 | 99 | ||
47 | static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) | 100 | static void __hyp_text __deactivate_traps_nvhe(void) |
48 | { | 101 | { |
49 | write_sysreg(HCR_RW, hcr_el2); | 102 | write_sysreg(HCR_RW, hcr_el2); |
103 | write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); | ||
104 | } | ||
105 | |||
106 | static hyp_alternate_select(__deactivate_traps_arch, | ||
107 | __deactivate_traps_nvhe, __deactivate_traps_vhe, | ||
108 | ARM64_HAS_VIRT_HOST_EXTN); | ||
109 | |||
110 | static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) | ||
111 | { | ||
112 | __deactivate_traps_arch()(); | ||
50 | write_sysreg(0, hstr_el2); | 113 | write_sysreg(0, hstr_el2); |
51 | write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); | 114 | write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); |
52 | write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); | 115 | write_sysreg(0, pmuserenr_el0); |
53 | } | 116 | } |
54 | 117 | ||
55 | static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) | 118 | static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) |
@@ -89,6 +152,86 @@ static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) | |||
89 | __vgic_call_restore_state()(vcpu); | 152 | __vgic_call_restore_state()(vcpu); |
90 | } | 153 | } |
91 | 154 | ||
155 | static bool __hyp_text __true_value(void) | ||
156 | { | ||
157 | return true; | ||
158 | } | ||
159 | |||
160 | static bool __hyp_text __false_value(void) | ||
161 | { | ||
162 | return false; | ||
163 | } | ||
164 | |||
165 | static hyp_alternate_select(__check_arm_834220, | ||
166 | __false_value, __true_value, | ||
167 | ARM64_WORKAROUND_834220); | ||
168 | |||
169 | static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) | ||
170 | { | ||
171 | u64 par, tmp; | ||
172 | |||
173 | /* | ||
174 | * Resolve the IPA the hard way using the guest VA. | ||
175 | * | ||
176 | * Stage-1 translation already validated the memory access | ||
177 | * rights. As such, we can use the EL1 translation regime, and | ||
178 | * don't have to distinguish between EL0 and EL1 access. | ||
179 | * | ||
180 | * We do need to save/restore PAR_EL1 though, as we haven't | ||
181 | * saved the guest context yet, and we may return early... | ||
182 | */ | ||
183 | par = read_sysreg(par_el1); | ||
184 | asm volatile("at s1e1r, %0" : : "r" (far)); | ||
185 | isb(); | ||
186 | |||
187 | tmp = read_sysreg(par_el1); | ||
188 | write_sysreg(par, par_el1); | ||
189 | |||
190 | if (unlikely(tmp & 1)) | ||
191 | return false; /* Translation failed, back to guest */ | ||
192 | |||
193 | /* Convert PAR to HPFAR format */ | ||
194 | *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4; | ||
195 | return true; | ||
196 | } | ||
197 | |||
198 | static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) | ||
199 | { | ||
200 | u64 esr = read_sysreg_el2(esr); | ||
201 | u8 ec = esr >> ESR_ELx_EC_SHIFT; | ||
202 | u64 hpfar, far; | ||
203 | |||
204 | vcpu->arch.fault.esr_el2 = esr; | ||
205 | |||
206 | if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) | ||
207 | return true; | ||
208 | |||
209 | far = read_sysreg_el2(far); | ||
210 | |||
211 | /* | ||
212 | * The HPFAR can be invalid if the stage 2 fault did not | ||
213 | * happen during a stage 1 page table walk (the ESR_EL2.S1PTW | ||
214 | * bit is clear) and one of the two following cases are true: | ||
215 | * 1. The fault was due to a permission fault | ||
216 | * 2. The processor carries errata 834220 | ||
217 | * | ||
218 | * Therefore, for all non S1PTW faults where we either have a | ||
219 | * permission fault or the errata workaround is enabled, we | ||
220 | * resolve the IPA using the AT instruction. | ||
221 | */ | ||
222 | if (!(esr & ESR_ELx_S1PTW) && | ||
223 | (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { | ||
224 | if (!__translate_far_to_hpfar(far, &hpfar)) | ||
225 | return false; | ||
226 | } else { | ||
227 | hpfar = read_sysreg(hpfar_el2); | ||
228 | } | ||
229 | |||
230 | vcpu->arch.fault.far_el2 = far; | ||
231 | vcpu->arch.fault.hpfar_el2 = hpfar; | ||
232 | return true; | ||
233 | } | ||
234 | |||
92 | static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) | 235 | static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) |
93 | { | 236 | { |
94 | struct kvm_cpu_context *host_ctxt; | 237 | struct kvm_cpu_context *host_ctxt; |
@@ -102,7 +245,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) | |||
102 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); | 245 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); |
103 | guest_ctxt = &vcpu->arch.ctxt; | 246 | guest_ctxt = &vcpu->arch.ctxt; |
104 | 247 | ||
105 | __sysreg_save_state(host_ctxt); | 248 | __sysreg_save_host_state(host_ctxt); |
106 | __debug_cond_save_host_state(vcpu); | 249 | __debug_cond_save_host_state(vcpu); |
107 | 250 | ||
108 | __activate_traps(vcpu); | 251 | __activate_traps(vcpu); |
@@ -116,16 +259,20 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) | |||
116 | * to Cortex-A57 erratum #852523. | 259 | * to Cortex-A57 erratum #852523. |
117 | */ | 260 | */ |
118 | __sysreg32_restore_state(vcpu); | 261 | __sysreg32_restore_state(vcpu); |
119 | __sysreg_restore_state(guest_ctxt); | 262 | __sysreg_restore_guest_state(guest_ctxt); |
120 | __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); | 263 | __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); |
121 | 264 | ||
122 | /* Jump in the fire! */ | 265 | /* Jump in the fire! */ |
266 | again: | ||
123 | exit_code = __guest_enter(vcpu, host_ctxt); | 267 | exit_code = __guest_enter(vcpu, host_ctxt); |
124 | /* And we're baaack! */ | 268 | /* And we're baaack! */ |
125 | 269 | ||
270 | if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) | ||
271 | goto again; | ||
272 | |||
126 | fp_enabled = __fpsimd_enabled(); | 273 | fp_enabled = __fpsimd_enabled(); |
127 | 274 | ||
128 | __sysreg_save_state(guest_ctxt); | 275 | __sysreg_save_guest_state(guest_ctxt); |
129 | __sysreg32_save_state(vcpu); | 276 | __sysreg32_save_state(vcpu); |
130 | __timer_save_state(vcpu); | 277 | __timer_save_state(vcpu); |
131 | __vgic_save_state(vcpu); | 278 | __vgic_save_state(vcpu); |
@@ -133,7 +280,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) | |||
133 | __deactivate_traps(vcpu); | 280 | __deactivate_traps(vcpu); |
134 | __deactivate_vm(vcpu); | 281 | __deactivate_vm(vcpu); |
135 | 282 | ||
136 | __sysreg_restore_state(host_ctxt); | 283 | __sysreg_restore_host_state(host_ctxt); |
137 | 284 | ||
138 | if (fp_enabled) { | 285 | if (fp_enabled) { |
139 | __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs); | 286 | __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs); |
@@ -150,11 +297,34 @@ __alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu); | |||
150 | 297 | ||
151 | static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; | 298 | static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; |
152 | 299 | ||
153 | void __hyp_text __noreturn __hyp_panic(void) | 300 | static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par) |
154 | { | 301 | { |
155 | unsigned long str_va = (unsigned long)__hyp_panic_string; | 302 | unsigned long str_va = (unsigned long)__hyp_panic_string; |
156 | u64 spsr = read_sysreg(spsr_el2); | 303 | |
157 | u64 elr = read_sysreg(elr_el2); | 304 | __hyp_do_panic(hyp_kern_va(str_va), |
305 | spsr, elr, | ||
306 | read_sysreg(esr_el2), read_sysreg_el2(far), | ||
307 | read_sysreg(hpfar_el2), par, | ||
308 | (void *)read_sysreg(tpidr_el2)); | ||
309 | } | ||
310 | |||
311 | static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par) | ||
312 | { | ||
313 | panic(__hyp_panic_string, | ||
314 | spsr, elr, | ||
315 | read_sysreg_el2(esr), read_sysreg_el2(far), | ||
316 | read_sysreg(hpfar_el2), par, | ||
317 | (void *)read_sysreg(tpidr_el2)); | ||
318 | } | ||
319 | |||
320 | static hyp_alternate_select(__hyp_call_panic, | ||
321 | __hyp_call_panic_nvhe, __hyp_call_panic_vhe, | ||
322 | ARM64_HAS_VIRT_HOST_EXTN); | ||
323 | |||
324 | void __hyp_text __noreturn __hyp_panic(void) | ||
325 | { | ||
326 | u64 spsr = read_sysreg_el2(spsr); | ||
327 | u64 elr = read_sysreg_el2(elr); | ||
158 | u64 par = read_sysreg(par_el1); | 328 | u64 par = read_sysreg(par_el1); |
159 | 329 | ||
160 | if (read_sysreg(vttbr_el2)) { | 330 | if (read_sysreg(vttbr_el2)) { |
@@ -165,15 +335,11 @@ void __hyp_text __noreturn __hyp_panic(void) | |||
165 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); | 335 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); |
166 | __deactivate_traps(vcpu); | 336 | __deactivate_traps(vcpu); |
167 | __deactivate_vm(vcpu); | 337 | __deactivate_vm(vcpu); |
168 | __sysreg_restore_state(host_ctxt); | 338 | __sysreg_restore_host_state(host_ctxt); |
169 | } | 339 | } |
170 | 340 | ||
171 | /* Call panic for real */ | 341 | /* Call panic for real */ |
172 | __hyp_do_panic(hyp_kern_va(str_va), | 342 | __hyp_call_panic()(spsr, elr, par); |
173 | spsr, elr, | ||
174 | read_sysreg(esr_el2), read_sysreg(far_el2), | ||
175 | read_sysreg(hpfar_el2), par, | ||
176 | (void *)read_sysreg(tpidr_el2)); | ||
177 | 343 | ||
178 | unreachable(); | 344 | unreachable(); |
179 | } | 345 | } |
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 425630980229..0f7c40eb3f53 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c | |||
@@ -19,75 +19,122 @@ | |||
19 | #include <linux/kvm_host.h> | 19 | #include <linux/kvm_host.h> |
20 | 20 | ||
21 | #include <asm/kvm_asm.h> | 21 | #include <asm/kvm_asm.h> |
22 | #include <asm/kvm_mmu.h> | 22 | #include <asm/kvm_hyp.h> |
23 | 23 | ||
24 | #include "hyp.h" | 24 | /* Yes, this does nothing, on purpose */ |
25 | static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { } | ||
25 | 26 | ||
26 | /* ctxt is already in the HYP VA space */ | 27 | /* |
27 | void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) | 28 | * Non-VHE: Both host and guest must save everything. |
29 | * | ||
30 | * VHE: Host must save tpidr*_el[01], actlr_el1, sp0, pc, pstate, and | ||
31 | * guest must save everything. | ||
32 | */ | ||
33 | |||
34 | static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) | ||
28 | { | 35 | { |
29 | ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2); | ||
30 | ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); | ||
31 | ctxt->sys_regs[SCTLR_EL1] = read_sysreg(sctlr_el1); | ||
32 | ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); | 36 | ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); |
33 | ctxt->sys_regs[CPACR_EL1] = read_sysreg(cpacr_el1); | ||
34 | ctxt->sys_regs[TTBR0_EL1] = read_sysreg(ttbr0_el1); | ||
35 | ctxt->sys_regs[TTBR1_EL1] = read_sysreg(ttbr1_el1); | ||
36 | ctxt->sys_regs[TCR_EL1] = read_sysreg(tcr_el1); | ||
37 | ctxt->sys_regs[ESR_EL1] = read_sysreg(esr_el1); | ||
38 | ctxt->sys_regs[AFSR0_EL1] = read_sysreg(afsr0_el1); | ||
39 | ctxt->sys_regs[AFSR1_EL1] = read_sysreg(afsr1_el1); | ||
40 | ctxt->sys_regs[FAR_EL1] = read_sysreg(far_el1); | ||
41 | ctxt->sys_regs[MAIR_EL1] = read_sysreg(mair_el1); | ||
42 | ctxt->sys_regs[VBAR_EL1] = read_sysreg(vbar_el1); | ||
43 | ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg(contextidr_el1); | ||
44 | ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); | 37 | ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); |
45 | ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); | 38 | ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); |
46 | ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); | 39 | ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); |
47 | ctxt->sys_regs[AMAIR_EL1] = read_sysreg(amair_el1); | 40 | ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); |
48 | ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg(cntkctl_el1); | 41 | ctxt->gp_regs.regs.pc = read_sysreg_el2(elr); |
42 | ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr); | ||
43 | } | ||
44 | |||
45 | static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) | ||
46 | { | ||
47 | ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2); | ||
48 | ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); | ||
49 | ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr); | ||
50 | ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(cpacr); | ||
51 | ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(ttbr0); | ||
52 | ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(ttbr1); | ||
53 | ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(tcr); | ||
54 | ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(esr); | ||
55 | ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(afsr0); | ||
56 | ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(afsr1); | ||
57 | ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(far); | ||
58 | ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(mair); | ||
59 | ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(vbar); | ||
60 | ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(contextidr); | ||
61 | ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair); | ||
62 | ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl); | ||
49 | ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1); | 63 | ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1); |
50 | ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); | 64 | ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); |
51 | 65 | ||
52 | ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); | ||
53 | ctxt->gp_regs.regs.pc = read_sysreg(elr_el2); | ||
54 | ctxt->gp_regs.regs.pstate = read_sysreg(spsr_el2); | ||
55 | ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1); | 66 | ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1); |
56 | ctxt->gp_regs.elr_el1 = read_sysreg(elr_el1); | 67 | ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr); |
57 | ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1); | 68 | ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr); |
69 | } | ||
70 | |||
71 | static hyp_alternate_select(__sysreg_call_save_host_state, | ||
72 | __sysreg_save_state, __sysreg_do_nothing, | ||
73 | ARM64_HAS_VIRT_HOST_EXTN); | ||
74 | |||
75 | void __hyp_text __sysreg_save_host_state(struct kvm_cpu_context *ctxt) | ||
76 | { | ||
77 | __sysreg_call_save_host_state()(ctxt); | ||
78 | __sysreg_save_common_state(ctxt); | ||
79 | } | ||
80 | |||
81 | void __hyp_text __sysreg_save_guest_state(struct kvm_cpu_context *ctxt) | ||
82 | { | ||
83 | __sysreg_save_state(ctxt); | ||
84 | __sysreg_save_common_state(ctxt); | ||
58 | } | 85 | } |
59 | 86 | ||
60 | void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) | 87 | static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) |
61 | { | 88 | { |
62 | write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); | ||
63 | write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); | ||
64 | write_sysreg(ctxt->sys_regs[SCTLR_EL1], sctlr_el1); | ||
65 | write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); | 89 | write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); |
66 | write_sysreg(ctxt->sys_regs[CPACR_EL1], cpacr_el1); | ||
67 | write_sysreg(ctxt->sys_regs[TTBR0_EL1], ttbr0_el1); | ||
68 | write_sysreg(ctxt->sys_regs[TTBR1_EL1], ttbr1_el1); | ||
69 | write_sysreg(ctxt->sys_regs[TCR_EL1], tcr_el1); | ||
70 | write_sysreg(ctxt->sys_regs[ESR_EL1], esr_el1); | ||
71 | write_sysreg(ctxt->sys_regs[AFSR0_EL1], afsr0_el1); | ||
72 | write_sysreg(ctxt->sys_regs[AFSR1_EL1], afsr1_el1); | ||
73 | write_sysreg(ctxt->sys_regs[FAR_EL1], far_el1); | ||
74 | write_sysreg(ctxt->sys_regs[MAIR_EL1], mair_el1); | ||
75 | write_sysreg(ctxt->sys_regs[VBAR_EL1], vbar_el1); | ||
76 | write_sysreg(ctxt->sys_regs[CONTEXTIDR_EL1], contextidr_el1); | ||
77 | write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); | 90 | write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); |
78 | write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); | 91 | write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); |
79 | write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); | 92 | write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); |
80 | write_sysreg(ctxt->sys_regs[AMAIR_EL1], amair_el1); | 93 | write_sysreg(ctxt->gp_regs.regs.sp, sp_el0); |
81 | write_sysreg(ctxt->sys_regs[CNTKCTL_EL1], cntkctl_el1); | 94 | write_sysreg_el2(ctxt->gp_regs.regs.pc, elr); |
82 | write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); | 95 | write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr); |
83 | write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); | 96 | } |
84 | 97 | ||
85 | write_sysreg(ctxt->gp_regs.regs.sp, sp_el0); | 98 | static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) |
86 | write_sysreg(ctxt->gp_regs.regs.pc, elr_el2); | 99 | { |
87 | write_sysreg(ctxt->gp_regs.regs.pstate, spsr_el2); | 100 | write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); |
88 | write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); | 101 | write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); |
89 | write_sysreg(ctxt->gp_regs.elr_el1, elr_el1); | 102 | write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], sctlr); |
90 | write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1); | 103 | write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], cpacr); |
104 | write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], ttbr0); | ||
105 | write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], ttbr1); | ||
106 | write_sysreg_el1(ctxt->sys_regs[TCR_EL1], tcr); | ||
107 | write_sysreg_el1(ctxt->sys_regs[ESR_EL1], esr); | ||
108 | write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], afsr0); | ||
109 | write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], afsr1); | ||
110 | write_sysreg_el1(ctxt->sys_regs[FAR_EL1], far); | ||
111 | write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], mair); | ||
112 | write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], vbar); | ||
113 | write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],contextidr); | ||
114 | write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair); | ||
115 | write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl); | ||
116 | write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); | ||
117 | write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); | ||
118 | |||
119 | write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); | ||
120 | write_sysreg_el1(ctxt->gp_regs.elr_el1, elr); | ||
121 | write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr); | ||
122 | } | ||
123 | |||
124 | static hyp_alternate_select(__sysreg_call_restore_host_state, | ||
125 | __sysreg_restore_state, __sysreg_do_nothing, | ||
126 | ARM64_HAS_VIRT_HOST_EXTN); | ||
127 | |||
128 | void __hyp_text __sysreg_restore_host_state(struct kvm_cpu_context *ctxt) | ||
129 | { | ||
130 | __sysreg_call_restore_host_state()(ctxt); | ||
131 | __sysreg_restore_common_state(ctxt); | ||
132 | } | ||
133 | |||
134 | void __hyp_text __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt) | ||
135 | { | ||
136 | __sysreg_restore_state(ctxt); | ||
137 | __sysreg_restore_common_state(ctxt); | ||
91 | } | 138 | } |
92 | 139 | ||
93 | void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) | 140 | void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) |
diff --git a/arch/arm64/kvm/hyp/timer-sr.c b/arch/arm64/kvm/hyp/timer-sr.c deleted file mode 100644 index 1051e5d7320f..000000000000 --- a/arch/arm64/kvm/hyp/timer-sr.c +++ /dev/null | |||
@@ -1,71 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012-2015 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include <clocksource/arm_arch_timer.h> | ||
19 | #include <linux/compiler.h> | ||
20 | #include <linux/kvm_host.h> | ||
21 | |||
22 | #include <asm/kvm_mmu.h> | ||
23 | |||
24 | #include "hyp.h" | ||
25 | |||
26 | /* vcpu is already in the HYP VA space */ | ||
27 | void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) | ||
28 | { | ||
29 | struct kvm *kvm = kern_hyp_va(vcpu->kvm); | ||
30 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
31 | u64 val; | ||
32 | |||
33 | if (kvm->arch.timer.enabled) { | ||
34 | timer->cntv_ctl = read_sysreg(cntv_ctl_el0); | ||
35 | timer->cntv_cval = read_sysreg(cntv_cval_el0); | ||
36 | } | ||
37 | |||
38 | /* Disable the virtual timer */ | ||
39 | write_sysreg(0, cntv_ctl_el0); | ||
40 | |||
41 | /* Allow physical timer/counter access for the host */ | ||
42 | val = read_sysreg(cnthctl_el2); | ||
43 | val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; | ||
44 | write_sysreg(val, cnthctl_el2); | ||
45 | |||
46 | /* Clear cntvoff for the host */ | ||
47 | write_sysreg(0, cntvoff_el2); | ||
48 | } | ||
49 | |||
50 | void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) | ||
51 | { | ||
52 | struct kvm *kvm = kern_hyp_va(vcpu->kvm); | ||
53 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
54 | u64 val; | ||
55 | |||
56 | /* | ||
57 | * Disallow physical timer access for the guest | ||
58 | * Physical counter access is allowed | ||
59 | */ | ||
60 | val = read_sysreg(cnthctl_el2); | ||
61 | val &= ~CNTHCTL_EL1PCEN; | ||
62 | val |= CNTHCTL_EL1PCTEN; | ||
63 | write_sysreg(val, cnthctl_el2); | ||
64 | |||
65 | if (kvm->arch.timer.enabled) { | ||
66 | write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); | ||
67 | write_sysreg(timer->cntv_cval, cntv_cval_el0); | ||
68 | isb(); | ||
69 | write_sysreg(timer->cntv_ctl, cntv_ctl_el0); | ||
70 | } | ||
71 | } | ||
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 2a7e0d838698..be8177cdd3bf 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c | |||
@@ -15,7 +15,7 @@ | |||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "hyp.h" | 18 | #include <asm/kvm_hyp.h> |
19 | 19 | ||
20 | static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | 20 | static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
21 | { | 21 | { |
diff --git a/arch/arm64/kvm/hyp/vgic-v2-sr.c b/arch/arm64/kvm/hyp/vgic-v2-sr.c deleted file mode 100644 index e71761238cfc..000000000000 --- a/arch/arm64/kvm/hyp/vgic-v2-sr.c +++ /dev/null | |||
@@ -1,84 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012-2015 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include <linux/compiler.h> | ||
19 | #include <linux/irqchip/arm-gic.h> | ||
20 | #include <linux/kvm_host.h> | ||
21 | |||
22 | #include <asm/kvm_mmu.h> | ||
23 | |||
24 | #include "hyp.h" | ||
25 | |||
26 | /* vcpu is already in the HYP VA space */ | ||
27 | void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) | ||
28 | { | ||
29 | struct kvm *kvm = kern_hyp_va(vcpu->kvm); | ||
30 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; | ||
31 | struct vgic_dist *vgic = &kvm->arch.vgic; | ||
32 | void __iomem *base = kern_hyp_va(vgic->vctrl_base); | ||
33 | u32 eisr0, eisr1, elrsr0, elrsr1; | ||
34 | int i, nr_lr; | ||
35 | |||
36 | if (!base) | ||
37 | return; | ||
38 | |||
39 | nr_lr = vcpu->arch.vgic_cpu.nr_lr; | ||
40 | cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR); | ||
41 | cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR); | ||
42 | eisr0 = readl_relaxed(base + GICH_EISR0); | ||
43 | elrsr0 = readl_relaxed(base + GICH_ELRSR0); | ||
44 | if (unlikely(nr_lr > 32)) { | ||
45 | eisr1 = readl_relaxed(base + GICH_EISR1); | ||
46 | elrsr1 = readl_relaxed(base + GICH_ELRSR1); | ||
47 | } else { | ||
48 | eisr1 = elrsr1 = 0; | ||
49 | } | ||
50 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
51 | cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1; | ||
52 | cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1; | ||
53 | #else | ||
54 | cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0; | ||
55 | cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; | ||
56 | #endif | ||
57 | cpu_if->vgic_apr = readl_relaxed(base + GICH_APR); | ||
58 | |||
59 | writel_relaxed(0, base + GICH_HCR); | ||
60 | |||
61 | for (i = 0; i < nr_lr; i++) | ||
62 | cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); | ||
63 | } | ||
64 | |||
65 | /* vcpu is already in the HYP VA space */ | ||
66 | void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu) | ||
67 | { | ||
68 | struct kvm *kvm = kern_hyp_va(vcpu->kvm); | ||
69 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; | ||
70 | struct vgic_dist *vgic = &kvm->arch.vgic; | ||
71 | void __iomem *base = kern_hyp_va(vgic->vctrl_base); | ||
72 | int i, nr_lr; | ||
73 | |||
74 | if (!base) | ||
75 | return; | ||
76 | |||
77 | writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR); | ||
78 | writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR); | ||
79 | writel_relaxed(cpu_if->vgic_apr, base + GICH_APR); | ||
80 | |||
81 | nr_lr = vcpu->arch.vgic_cpu.nr_lr; | ||
82 | for (i = 0; i < nr_lr; i++) | ||
83 | writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4)); | ||
84 | } | ||
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 5dd2a26444ec..fff7cd42b3a3 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c | |||
@@ -19,9 +19,7 @@ | |||
19 | #include <linux/irqchip/arm-gic-v3.h> | 19 | #include <linux/irqchip/arm-gic-v3.h> |
20 | #include <linux/kvm_host.h> | 20 | #include <linux/kvm_host.h> |
21 | 21 | ||
22 | #include <asm/kvm_mmu.h> | 22 | #include <asm/kvm_hyp.h> |
23 | |||
24 | #include "hyp.h" | ||
25 | 23 | ||
26 | #define vtr_to_max_lr_idx(v) ((v) & 0xf) | 24 | #define vtr_to_max_lr_idx(v) ((v) & 0xf) |
27 | #define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1) | 25 | #define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1) |
@@ -39,12 +37,133 @@ | |||
39 | asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\ | 37 | asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\ |
40 | } while (0) | 38 | } while (0) |
41 | 39 | ||
42 | /* vcpu is already in the HYP VA space */ | 40 | static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) |
41 | { | ||
42 | switch (lr & 0xf) { | ||
43 | case 0: | ||
44 | return read_gicreg(ICH_LR0_EL2); | ||
45 | case 1: | ||
46 | return read_gicreg(ICH_LR1_EL2); | ||
47 | case 2: | ||
48 | return read_gicreg(ICH_LR2_EL2); | ||
49 | case 3: | ||
50 | return read_gicreg(ICH_LR3_EL2); | ||
51 | case 4: | ||
52 | return read_gicreg(ICH_LR4_EL2); | ||
53 | case 5: | ||
54 | return read_gicreg(ICH_LR5_EL2); | ||
55 | case 6: | ||
56 | return read_gicreg(ICH_LR6_EL2); | ||
57 | case 7: | ||
58 | return read_gicreg(ICH_LR7_EL2); | ||
59 | case 8: | ||
60 | return read_gicreg(ICH_LR8_EL2); | ||
61 | case 9: | ||
62 | return read_gicreg(ICH_LR9_EL2); | ||
63 | case 10: | ||
64 | return read_gicreg(ICH_LR10_EL2); | ||
65 | case 11: | ||
66 | return read_gicreg(ICH_LR11_EL2); | ||
67 | case 12: | ||
68 | return read_gicreg(ICH_LR12_EL2); | ||
69 | case 13: | ||
70 | return read_gicreg(ICH_LR13_EL2); | ||
71 | case 14: | ||
72 | return read_gicreg(ICH_LR14_EL2); | ||
73 | case 15: | ||
74 | return read_gicreg(ICH_LR15_EL2); | ||
75 | } | ||
76 | |||
77 | unreachable(); | ||
78 | } | ||
79 | |||
80 | static void __hyp_text __gic_v3_set_lr(u64 val, int lr) | ||
81 | { | ||
82 | switch (lr & 0xf) { | ||
83 | case 0: | ||
84 | write_gicreg(val, ICH_LR0_EL2); | ||
85 | break; | ||
86 | case 1: | ||
87 | write_gicreg(val, ICH_LR1_EL2); | ||
88 | break; | ||
89 | case 2: | ||
90 | write_gicreg(val, ICH_LR2_EL2); | ||
91 | break; | ||
92 | case 3: | ||
93 | write_gicreg(val, ICH_LR3_EL2); | ||
94 | break; | ||
95 | case 4: | ||
96 | write_gicreg(val, ICH_LR4_EL2); | ||
97 | break; | ||
98 | case 5: | ||
99 | write_gicreg(val, ICH_LR5_EL2); | ||
100 | break; | ||
101 | case 6: | ||
102 | write_gicreg(val, ICH_LR6_EL2); | ||
103 | break; | ||
104 | case 7: | ||
105 | write_gicreg(val, ICH_LR7_EL2); | ||
106 | break; | ||
107 | case 8: | ||
108 | write_gicreg(val, ICH_LR8_EL2); | ||
109 | break; | ||
110 | case 9: | ||
111 | write_gicreg(val, ICH_LR9_EL2); | ||
112 | break; | ||
113 | case 10: | ||
114 | write_gicreg(val, ICH_LR10_EL2); | ||
115 | break; | ||
116 | case 11: | ||
117 | write_gicreg(val, ICH_LR11_EL2); | ||
118 | break; | ||
119 | case 12: | ||
120 | write_gicreg(val, ICH_LR12_EL2); | ||
121 | break; | ||
122 | case 13: | ||
123 | write_gicreg(val, ICH_LR13_EL2); | ||
124 | break; | ||
125 | case 14: | ||
126 | write_gicreg(val, ICH_LR14_EL2); | ||
127 | break; | ||
128 | case 15: | ||
129 | write_gicreg(val, ICH_LR15_EL2); | ||
130 | break; | ||
131 | } | ||
132 | } | ||
133 | |||
134 | static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, int nr_lr) | ||
135 | { | ||
136 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | ||
137 | int i; | ||
138 | bool expect_mi; | ||
139 | |||
140 | expect_mi = !!(cpu_if->vgic_hcr & ICH_HCR_UIE); | ||
141 | |||
142 | for (i = 0; i < nr_lr; i++) { | ||
143 | if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) | ||
144 | continue; | ||
145 | |||
146 | expect_mi |= (!(cpu_if->vgic_lr[i] & ICH_LR_HW) && | ||
147 | (cpu_if->vgic_lr[i] & ICH_LR_EOI)); | ||
148 | } | ||
149 | |||
150 | if (expect_mi) { | ||
151 | cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2); | ||
152 | |||
153 | if (cpu_if->vgic_misr & ICH_MISR_EOI) | ||
154 | cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2); | ||
155 | else | ||
156 | cpu_if->vgic_eisr = 0; | ||
157 | } else { | ||
158 | cpu_if->vgic_misr = 0; | ||
159 | cpu_if->vgic_eisr = 0; | ||
160 | } | ||
161 | } | ||
162 | |||
43 | void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | 163 | void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) |
44 | { | 164 | { |
45 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | 165 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; |
46 | u64 val; | 166 | u64 val; |
47 | u32 max_lr_idx, nr_pri_bits; | ||
48 | 167 | ||
49 | /* | 168 | /* |
50 | * Make sure stores to the GIC via the memory mapped interface | 169 | * Make sure stores to the GIC via the memory mapped interface |
@@ -53,68 +172,66 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
53 | dsb(st); | 172 | dsb(st); |
54 | 173 | ||
55 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); | 174 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); |
56 | cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2); | ||
57 | cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2); | ||
58 | cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); | ||
59 | 175 | ||
60 | write_gicreg(0, ICH_HCR_EL2); | 176 | if (vcpu->arch.vgic_cpu.live_lrs) { |
61 | val = read_gicreg(ICH_VTR_EL2); | 177 | int i; |
62 | max_lr_idx = vtr_to_max_lr_idx(val); | 178 | u32 max_lr_idx, nr_pri_bits; |
63 | nr_pri_bits = vtr_to_nr_pri_bits(val); | ||
64 | 179 | ||
65 | switch (max_lr_idx) { | 180 | cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); |
66 | case 15: | ||
67 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)] = read_gicreg(ICH_LR15_EL2); | ||
68 | case 14: | ||
69 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)] = read_gicreg(ICH_LR14_EL2); | ||
70 | case 13: | ||
71 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)] = read_gicreg(ICH_LR13_EL2); | ||
72 | case 12: | ||
73 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)] = read_gicreg(ICH_LR12_EL2); | ||
74 | case 11: | ||
75 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)] = read_gicreg(ICH_LR11_EL2); | ||
76 | case 10: | ||
77 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)] = read_gicreg(ICH_LR10_EL2); | ||
78 | case 9: | ||
79 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)] = read_gicreg(ICH_LR9_EL2); | ||
80 | case 8: | ||
81 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)] = read_gicreg(ICH_LR8_EL2); | ||
82 | case 7: | ||
83 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)] = read_gicreg(ICH_LR7_EL2); | ||
84 | case 6: | ||
85 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)] = read_gicreg(ICH_LR6_EL2); | ||
86 | case 5: | ||
87 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)] = read_gicreg(ICH_LR5_EL2); | ||
88 | case 4: | ||
89 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)] = read_gicreg(ICH_LR4_EL2); | ||
90 | case 3: | ||
91 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)] = read_gicreg(ICH_LR3_EL2); | ||
92 | case 2: | ||
93 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)] = read_gicreg(ICH_LR2_EL2); | ||
94 | case 1: | ||
95 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)] = read_gicreg(ICH_LR1_EL2); | ||
96 | case 0: | ||
97 | cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)] = read_gicreg(ICH_LR0_EL2); | ||
98 | } | ||
99 | 181 | ||
100 | switch (nr_pri_bits) { | 182 | write_gicreg(0, ICH_HCR_EL2); |
101 | case 7: | 183 | val = read_gicreg(ICH_VTR_EL2); |
102 | cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2); | 184 | max_lr_idx = vtr_to_max_lr_idx(val); |
103 | cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2); | 185 | nr_pri_bits = vtr_to_nr_pri_bits(val); |
104 | case 6: | ||
105 | cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2); | ||
106 | default: | ||
107 | cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2); | ||
108 | } | ||
109 | 186 | ||
110 | switch (nr_pri_bits) { | 187 | save_maint_int_state(vcpu, max_lr_idx + 1); |
111 | case 7: | 188 | |
112 | cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2); | 189 | for (i = 0; i <= max_lr_idx; i++) { |
113 | cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2); | 190 | if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) |
114 | case 6: | 191 | continue; |
115 | cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2); | 192 | |
116 | default: | 193 | if (cpu_if->vgic_elrsr & (1 << i)) { |
117 | cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2); | 194 | cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; |
195 | continue; | ||
196 | } | ||
197 | |||
198 | cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); | ||
199 | __gic_v3_set_lr(0, i); | ||
200 | } | ||
201 | |||
202 | switch (nr_pri_bits) { | ||
203 | case 7: | ||
204 | cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2); | ||
205 | cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2); | ||
206 | case 6: | ||
207 | cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2); | ||
208 | default: | ||
209 | cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2); | ||
210 | } | ||
211 | |||
212 | switch (nr_pri_bits) { | ||
213 | case 7: | ||
214 | cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2); | ||
215 | cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2); | ||
216 | case 6: | ||
217 | cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2); | ||
218 | default: | ||
219 | cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2); | ||
220 | } | ||
221 | |||
222 | vcpu->arch.vgic_cpu.live_lrs = 0; | ||
223 | } else { | ||
224 | cpu_if->vgic_misr = 0; | ||
225 | cpu_if->vgic_eisr = 0; | ||
226 | cpu_if->vgic_elrsr = 0xffff; | ||
227 | cpu_if->vgic_ap0r[0] = 0; | ||
228 | cpu_if->vgic_ap0r[1] = 0; | ||
229 | cpu_if->vgic_ap0r[2] = 0; | ||
230 | cpu_if->vgic_ap0r[3] = 0; | ||
231 | cpu_if->vgic_ap1r[0] = 0; | ||
232 | cpu_if->vgic_ap1r[1] = 0; | ||
233 | cpu_if->vgic_ap1r[2] = 0; | ||
234 | cpu_if->vgic_ap1r[3] = 0; | ||
118 | } | 235 | } |
119 | 236 | ||
120 | val = read_gicreg(ICC_SRE_EL2); | 237 | val = read_gicreg(ICC_SRE_EL2); |
@@ -128,6 +245,8 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |||
128 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | 245 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; |
129 | u64 val; | 246 | u64 val; |
130 | u32 max_lr_idx, nr_pri_bits; | 247 | u32 max_lr_idx, nr_pri_bits; |
248 | u16 live_lrs = 0; | ||
249 | int i; | ||
131 | 250 | ||
132 | /* | 251 | /* |
133 | * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a | 252 | * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a |
@@ -140,66 +259,46 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |||
140 | write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1); | 259 | write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1); |
141 | isb(); | 260 | isb(); |
142 | 261 | ||
143 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); | ||
144 | write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); | ||
145 | |||
146 | val = read_gicreg(ICH_VTR_EL2); | 262 | val = read_gicreg(ICH_VTR_EL2); |
147 | max_lr_idx = vtr_to_max_lr_idx(val); | 263 | max_lr_idx = vtr_to_max_lr_idx(val); |
148 | nr_pri_bits = vtr_to_nr_pri_bits(val); | 264 | nr_pri_bits = vtr_to_nr_pri_bits(val); |
149 | 265 | ||
150 | switch (nr_pri_bits) { | 266 | for (i = 0; i <= max_lr_idx; i++) { |
151 | case 7: | 267 | if (cpu_if->vgic_lr[i] & ICH_LR_STATE) |
152 | write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); | 268 | live_lrs |= (1 << i); |
153 | write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); | ||
154 | case 6: | ||
155 | write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2); | ||
156 | default: | ||
157 | write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); | ||
158 | } | 269 | } |
159 | 270 | ||
160 | switch (nr_pri_bits) { | 271 | write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); |
161 | case 7: | ||
162 | write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); | ||
163 | write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2); | ||
164 | case 6: | ||
165 | write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2); | ||
166 | default: | ||
167 | write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2); | ||
168 | } | ||
169 | 272 | ||
170 | switch (max_lr_idx) { | 273 | if (live_lrs) { |
171 | case 15: | 274 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); |
172 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2); | 275 | |
173 | case 14: | 276 | switch (nr_pri_bits) { |
174 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)], ICH_LR14_EL2); | 277 | case 7: |
175 | case 13: | 278 | write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); |
176 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)], ICH_LR13_EL2); | 279 | write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); |
177 | case 12: | 280 | case 6: |
178 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)], ICH_LR12_EL2); | 281 | write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2); |
179 | case 11: | 282 | default: |
180 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)], ICH_LR11_EL2); | 283 | write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); |
181 | case 10: | 284 | } |
182 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)], ICH_LR10_EL2); | 285 | |
183 | case 9: | 286 | switch (nr_pri_bits) { |
184 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)], ICH_LR9_EL2); | 287 | case 7: |
185 | case 8: | 288 | write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); |
186 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)], ICH_LR8_EL2); | 289 | write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2); |
187 | case 7: | 290 | case 6: |
188 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)], ICH_LR7_EL2); | 291 | write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2); |
189 | case 6: | 292 | default: |
190 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)], ICH_LR6_EL2); | 293 | write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2); |
191 | case 5: | 294 | } |
192 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)], ICH_LR5_EL2); | 295 | |
193 | case 4: | 296 | for (i = 0; i <= max_lr_idx; i++) { |
194 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)], ICH_LR4_EL2); | 297 | if (!(live_lrs & (1 << i))) |
195 | case 3: | 298 | continue; |
196 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)], ICH_LR3_EL2); | 299 | |
197 | case 2: | 300 | __gic_v3_set_lr(cpu_if->vgic_lr[i], i); |
198 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)], ICH_LR2_EL2); | 301 | } |
199 | case 1: | ||
200 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)], ICH_LR1_EL2); | ||
201 | case 0: | ||
202 | write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)], ICH_LR0_EL2); | ||
203 | } | 302 | } |
204 | 303 | ||
205 | /* | 304 | /* |
@@ -209,6 +308,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |||
209 | */ | 308 | */ |
210 | isb(); | 309 | isb(); |
211 | dsb(sy); | 310 | dsb(sy); |
311 | vcpu->arch.vgic_cpu.live_lrs = live_lrs; | ||
212 | 312 | ||
213 | /* | 313 | /* |
214 | * Prevent the guest from touching the GIC system registers if | 314 | * Prevent the guest from touching the GIC system registers if |
@@ -220,6 +320,15 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |||
220 | } | 320 | } |
221 | } | 321 | } |
222 | 322 | ||
323 | void __hyp_text __vgic_v3_init_lrs(void) | ||
324 | { | ||
325 | int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2)); | ||
326 | int i; | ||
327 | |||
328 | for (i = 0; i <= max_lr_idx; i++) | ||
329 | __gic_v3_set_lr(0, i); | ||
330 | } | ||
331 | |||
223 | static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void) | 332 | static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void) |
224 | { | 333 | { |
225 | return read_gicreg(ICH_VTR_EL2); | 334 | return read_gicreg(ICH_VTR_EL2); |
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index f34745cb3d23..9677bf069bcc 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c | |||
@@ -77,7 +77,11 @@ int kvm_arch_dev_ioctl_check_extension(long ext) | |||
77 | case KVM_CAP_GUEST_DEBUG_HW_WPS: | 77 | case KVM_CAP_GUEST_DEBUG_HW_WPS: |
78 | r = get_num_wrps(); | 78 | r = get_num_wrps(); |
79 | break; | 79 | break; |
80 | case KVM_CAP_ARM_PMU_V3: | ||
81 | r = kvm_arm_support_pmu_v3(); | ||
82 | break; | ||
80 | case KVM_CAP_SET_GUEST_DEBUG: | 83 | case KVM_CAP_SET_GUEST_DEBUG: |
84 | case KVM_CAP_VCPU_ATTRIBUTES: | ||
81 | r = 1; | 85 | r = 1; |
82 | break; | 86 | break; |
83 | default: | 87 | default: |
@@ -120,6 +124,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
120 | /* Reset system registers */ | 124 | /* Reset system registers */ |
121 | kvm_reset_sys_regs(vcpu); | 125 | kvm_reset_sys_regs(vcpu); |
122 | 126 | ||
127 | /* Reset PMU */ | ||
128 | kvm_pmu_vcpu_reset(vcpu); | ||
129 | |||
123 | /* Reset timer */ | 130 | /* Reset timer */ |
124 | return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); | 131 | return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); |
125 | } | 132 | } |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2e90371cfb37..61ba59104845 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -20,6 +20,7 @@ | |||
20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/bsearch.h> | ||
23 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
24 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
25 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
@@ -34,6 +35,7 @@ | |||
34 | #include <asm/kvm_emulate.h> | 35 | #include <asm/kvm_emulate.h> |
35 | #include <asm/kvm_host.h> | 36 | #include <asm/kvm_host.h> |
36 | #include <asm/kvm_mmu.h> | 37 | #include <asm/kvm_mmu.h> |
38 | #include <asm/perf_event.h> | ||
37 | 39 | ||
38 | #include <trace/events/kvm.h> | 40 | #include <trace/events/kvm.h> |
39 | 41 | ||
@@ -439,6 +441,344 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | |||
439 | vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr; | 441 | vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr; |
440 | } | 442 | } |
441 | 443 | ||
444 | static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | ||
445 | { | ||
446 | u64 pmcr, val; | ||
447 | |||
448 | asm volatile("mrs %0, pmcr_el0\n" : "=r" (pmcr)); | ||
449 | /* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) is reset to UNKNOWN | ||
450 | * except PMCR.E resetting to zero. | ||
451 | */ | ||
452 | val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) | ||
453 | | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); | ||
454 | vcpu_sys_reg(vcpu, PMCR_EL0) = val; | ||
455 | } | ||
456 | |||
457 | static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu) | ||
458 | { | ||
459 | u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); | ||
460 | |||
461 | return !((reg & ARMV8_PMU_USERENR_EN) || vcpu_mode_priv(vcpu)); | ||
462 | } | ||
463 | |||
464 | static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu) | ||
465 | { | ||
466 | u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); | ||
467 | |||
468 | return !((reg & (ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN)) | ||
469 | || vcpu_mode_priv(vcpu)); | ||
470 | } | ||
471 | |||
472 | static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu) | ||
473 | { | ||
474 | u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); | ||
475 | |||
476 | return !((reg & (ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN)) | ||
477 | || vcpu_mode_priv(vcpu)); | ||
478 | } | ||
479 | |||
480 | static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu) | ||
481 | { | ||
482 | u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); | ||
483 | |||
484 | return !((reg & (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN)) | ||
485 | || vcpu_mode_priv(vcpu)); | ||
486 | } | ||
487 | |||
488 | static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | ||
489 | const struct sys_reg_desc *r) | ||
490 | { | ||
491 | u64 val; | ||
492 | |||
493 | if (!kvm_arm_pmu_v3_ready(vcpu)) | ||
494 | return trap_raz_wi(vcpu, p, r); | ||
495 | |||
496 | if (pmu_access_el0_disabled(vcpu)) | ||
497 | return false; | ||
498 | |||
499 | if (p->is_write) { | ||
500 | /* Only update writeable bits of PMCR */ | ||
501 | val = vcpu_sys_reg(vcpu, PMCR_EL0); | ||
502 | val &= ~ARMV8_PMU_PMCR_MASK; | ||
503 | val |= p->regval & ARMV8_PMU_PMCR_MASK; | ||
504 | vcpu_sys_reg(vcpu, PMCR_EL0) = val; | ||
505 | kvm_pmu_handle_pmcr(vcpu, val); | ||
506 | } else { | ||
507 | /* PMCR.P & PMCR.C are RAZ */ | ||
508 | val = vcpu_sys_reg(vcpu, PMCR_EL0) | ||
509 | & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); | ||
510 | p->regval = val; | ||
511 | } | ||
512 | |||
513 | return true; | ||
514 | } | ||
515 | |||
516 | static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | ||
517 | const struct sys_reg_desc *r) | ||
518 | { | ||
519 | if (!kvm_arm_pmu_v3_ready(vcpu)) | ||
520 | return trap_raz_wi(vcpu, p, r); | ||
521 | |||
522 | if (pmu_access_event_counter_el0_disabled(vcpu)) | ||
523 | return false; | ||
524 | |||
525 | if (p->is_write) | ||
526 | vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; | ||
527 | else | ||
528 | /* return PMSELR.SEL field */ | ||
529 | p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0) | ||
530 | & ARMV8_PMU_COUNTER_MASK; | ||
531 | |||
532 | return true; | ||
533 | } | ||
534 | |||
535 | static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | ||
536 | const struct sys_reg_desc *r) | ||
537 | { | ||
538 | u64 pmceid; | ||
539 | |||
540 | if (!kvm_arm_pmu_v3_ready(vcpu)) | ||
541 | return trap_raz_wi(vcpu, p, r); | ||
542 | |||
543 | BUG_ON(p->is_write); | ||
544 | |||
545 | if (pmu_access_el0_disabled(vcpu)) | ||
546 | return false; | ||
547 | |||
548 | if (!(p->Op2 & 1)) | ||
549 | asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid)); | ||
550 | else | ||
551 | asm volatile("mrs %0, pmceid1_el0\n" : "=r" (pmceid)); | ||
552 | |||
553 | p->regval = pmceid; | ||
554 | |||
555 | return true; | ||
556 | } | ||
557 | |||
558 | static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) | ||
559 | { | ||
560 | u64 pmcr, val; | ||
561 | |||
562 | pmcr = vcpu_sys_reg(vcpu, PMCR_EL0); | ||
563 | val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; | ||
564 | if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) | ||
565 | return false; | ||
566 | |||
567 | return true; | ||
568 | } | ||
569 | |||
570 | static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, | ||
571 | struct sys_reg_params *p, | ||
572 | const struct sys_reg_desc *r) | ||
573 | { | ||
574 | u64 idx; | ||
575 | |||
576 | if (!kvm_arm_pmu_v3_ready(vcpu)) | ||
577 | return trap_raz_wi(vcpu, p, r); | ||
578 | |||
579 | if (r->CRn == 9 && r->CRm == 13) { | ||
580 | if (r->Op2 == 2) { | ||
581 | /* PMXEVCNTR_EL0 */ | ||
582 | if (pmu_access_event_counter_el0_disabled(vcpu)) | ||
583 | return false; | ||
584 | |||
585 | idx = vcpu_sys_reg(vcpu, PMSELR_EL0) | ||
586 | & ARMV8_PMU_COUNTER_MASK; | ||
587 | } else if (r->Op2 == 0) { | ||
588 | /* PMCCNTR_EL0 */ | ||
589 | if (pmu_access_cycle_counter_el0_disabled(vcpu)) | ||
590 | return false; | ||
591 | |||
592 | idx = ARMV8_PMU_CYCLE_IDX; | ||
593 | } else { | ||
594 | BUG(); | ||
595 | } | ||
596 | } else if (r->CRn == 14 && (r->CRm & 12) == 8) { | ||
597 | /* PMEVCNTRn_EL0 */ | ||
598 | if (pmu_access_event_counter_el0_disabled(vcpu)) | ||
599 | return false; | ||
600 | |||
601 | idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); | ||
602 | } else { | ||
603 | BUG(); | ||
604 | } | ||
605 | |||
606 | if (!pmu_counter_idx_valid(vcpu, idx)) | ||
607 | return false; | ||
608 | |||
609 | if (p->is_write) { | ||
610 | if (pmu_access_el0_disabled(vcpu)) | ||
611 | return false; | ||
612 | |||
613 | kvm_pmu_set_counter_value(vcpu, idx, p->regval); | ||
614 | } else { | ||
615 | p->regval = kvm_pmu_get_counter_value(vcpu, idx); | ||
616 | } | ||
617 | |||
618 | return true; | ||
619 | } | ||
620 | |||
621 | static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | ||
622 | const struct sys_reg_desc *r) | ||
623 | { | ||
624 | u64 idx, reg; | ||
625 | |||
626 | if (!kvm_arm_pmu_v3_ready(vcpu)) | ||
627 | return trap_raz_wi(vcpu, p, r); | ||
628 | |||
629 | if (pmu_access_el0_disabled(vcpu)) | ||
630 | return false; | ||
631 | |||
632 | if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { | ||
633 | /* PMXEVTYPER_EL0 */ | ||
634 | idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; | ||
635 | reg = PMEVTYPER0_EL0 + idx; | ||
636 | } else if (r->CRn == 14 && (r->CRm & 12) == 12) { | ||
637 | idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); | ||
638 | if (idx == ARMV8_PMU_CYCLE_IDX) | ||
639 | reg = PMCCFILTR_EL0; | ||
640 | else | ||
641 | /* PMEVTYPERn_EL0 */ | ||
642 | reg = PMEVTYPER0_EL0 + idx; | ||
643 | } else { | ||
644 | BUG(); | ||
645 | } | ||
646 | |||
647 | if (!pmu_counter_idx_valid(vcpu, idx)) | ||
648 | return false; | ||
649 | |||
650 | if (p->is_write) { | ||
651 | kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); | ||
652 | vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; | ||
653 | } else { | ||
654 | p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; | ||
655 | } | ||
656 | |||
657 | return true; | ||
658 | } | ||
659 | |||
660 | static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | ||
661 | const struct sys_reg_desc *r) | ||
662 | { | ||
663 | u64 val, mask; | ||
664 | |||
665 | if (!kvm_arm_pmu_v3_ready(vcpu)) | ||
666 | return trap_raz_wi(vcpu, p, r); | ||
667 | |||
668 | if (pmu_access_el0_disabled(vcpu)) | ||
669 | return false; | ||
670 | |||
671 | mask = kvm_pmu_valid_counter_mask(vcpu); | ||
672 | if (p->is_write) { | ||
673 | val = p->regval & mask; | ||
674 | if (r->Op2 & 0x1) { | ||
675 | /* accessing PMCNTENSET_EL0 */ | ||
676 | vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; | ||
677 | kvm_pmu_enable_counter(vcpu, val); | ||
678 | } else { | ||
679 | /* accessing PMCNTENCLR_EL0 */ | ||
680 | vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; | ||
681 | kvm_pmu_disable_counter(vcpu, val); | ||
682 | } | ||
683 | } else { | ||
684 | p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask; | ||
685 | } | ||
686 | |||
687 | return true; | ||
688 | } | ||
689 | |||
690 | static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | ||
691 | const struct sys_reg_desc *r) | ||
692 | { | ||
693 | u64 mask = kvm_pmu_valid_counter_mask(vcpu); | ||
694 | |||
695 | if (!kvm_arm_pmu_v3_ready(vcpu)) | ||
696 | return trap_raz_wi(vcpu, p, r); | ||
697 | |||
698 | if (!vcpu_mode_priv(vcpu)) | ||
699 | return false; | ||
700 | |||
701 | if (p->is_write) { | ||
702 | u64 val = p->regval & mask; | ||
703 | |||
704 | if (r->Op2 & 0x1) | ||
705 | /* accessing PMINTENSET_EL1 */ | ||
706 | vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val; | ||
707 | else | ||
708 | /* accessing PMINTENCLR_EL1 */ | ||
709 | vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val; | ||
710 | } else { | ||
711 | p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask; | ||
712 | } | ||
713 | |||
714 | return true; | ||
715 | } | ||
716 | |||
717 | static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | ||
718 | const struct sys_reg_desc *r) | ||
719 | { | ||
720 | u64 mask = kvm_pmu_valid_counter_mask(vcpu); | ||
721 | |||
722 | if (!kvm_arm_pmu_v3_ready(vcpu)) | ||
723 | return trap_raz_wi(vcpu, p, r); | ||
724 | |||
725 | if (pmu_access_el0_disabled(vcpu)) | ||
726 | return false; | ||
727 | |||
728 | if (p->is_write) { | ||
729 | if (r->CRm & 0x2) | ||
730 | /* accessing PMOVSSET_EL0 */ | ||
731 | kvm_pmu_overflow_set(vcpu, p->regval & mask); | ||
732 | else | ||
733 | /* accessing PMOVSCLR_EL0 */ | ||
734 | vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); | ||
735 | } else { | ||
736 | p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask; | ||
737 | } | ||
738 | |||
739 | return true; | ||
740 | } | ||
741 | |||
742 | static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | ||
743 | const struct sys_reg_desc *r) | ||
744 | { | ||
745 | u64 mask; | ||
746 | |||
747 | if (!kvm_arm_pmu_v3_ready(vcpu)) | ||
748 | return trap_raz_wi(vcpu, p, r); | ||
749 | |||
750 | if (pmu_write_swinc_el0_disabled(vcpu)) | ||
751 | return false; | ||
752 | |||
753 | if (p->is_write) { | ||
754 | mask = kvm_pmu_valid_counter_mask(vcpu); | ||
755 | kvm_pmu_software_increment(vcpu, p->regval & mask); | ||
756 | return true; | ||
757 | } | ||
758 | |||
759 | return false; | ||
760 | } | ||
761 | |||
762 | static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | ||
763 | const struct sys_reg_desc *r) | ||
764 | { | ||
765 | if (!kvm_arm_pmu_v3_ready(vcpu)) | ||
766 | return trap_raz_wi(vcpu, p, r); | ||
767 | |||
768 | if (p->is_write) { | ||
769 | if (!vcpu_mode_priv(vcpu)) | ||
770 | return false; | ||
771 | |||
772 | vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval | ||
773 | & ARMV8_PMU_USERENR_MASK; | ||
774 | } else { | ||
775 | p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0) | ||
776 | & ARMV8_PMU_USERENR_MASK; | ||
777 | } | ||
778 | |||
779 | return true; | ||
780 | } | ||
781 | |||
442 | /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ | 782 | /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ |
443 | #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ | 783 | #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ |
444 | /* DBGBVRn_EL1 */ \ | 784 | /* DBGBVRn_EL1 */ \ |
@@ -454,6 +794,20 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | |||
454 | { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ | 794 | { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ |
455 | trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } | 795 | trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } |
456 | 796 | ||
797 | /* Macro to expand the PMEVCNTRn_EL0 register */ | ||
798 | #define PMU_PMEVCNTR_EL0(n) \ | ||
799 | /* PMEVCNTRn_EL0 */ \ | ||
800 | { Op0(0b11), Op1(0b011), CRn(0b1110), \ | ||
801 | CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ | ||
802 | access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), } | ||
803 | |||
804 | /* Macro to expand the PMEVTYPERn_EL0 register */ | ||
805 | #define PMU_PMEVTYPER_EL0(n) \ | ||
806 | /* PMEVTYPERn_EL0 */ \ | ||
807 | { Op0(0b11), Op1(0b011), CRn(0b1110), \ | ||
808 | CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ | ||
809 | access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } | ||
810 | |||
457 | /* | 811 | /* |
458 | * Architected system registers. | 812 | * Architected system registers. |
459 | * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 | 813 | * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 |
@@ -583,10 +937,10 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
583 | 937 | ||
584 | /* PMINTENSET_EL1 */ | 938 | /* PMINTENSET_EL1 */ |
585 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), | 939 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), |
586 | trap_raz_wi }, | 940 | access_pminten, reset_unknown, PMINTENSET_EL1 }, |
587 | /* PMINTENCLR_EL1 */ | 941 | /* PMINTENCLR_EL1 */ |
588 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), | 942 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), |
589 | trap_raz_wi }, | 943 | access_pminten, NULL, PMINTENSET_EL1 }, |
590 | 944 | ||
591 | /* MAIR_EL1 */ | 945 | /* MAIR_EL1 */ |
592 | { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), | 946 | { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), |
@@ -623,43 +977,46 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
623 | 977 | ||
624 | /* PMCR_EL0 */ | 978 | /* PMCR_EL0 */ |
625 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), | 979 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), |
626 | trap_raz_wi }, | 980 | access_pmcr, reset_pmcr, }, |
627 | /* PMCNTENSET_EL0 */ | 981 | /* PMCNTENSET_EL0 */ |
628 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), | 982 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), |
629 | trap_raz_wi }, | 983 | access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, |
630 | /* PMCNTENCLR_EL0 */ | 984 | /* PMCNTENCLR_EL0 */ |
631 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), | 985 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), |
632 | trap_raz_wi }, | 986 | access_pmcnten, NULL, PMCNTENSET_EL0 }, |
633 | /* PMOVSCLR_EL0 */ | 987 | /* PMOVSCLR_EL0 */ |
634 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), | 988 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), |
635 | trap_raz_wi }, | 989 | access_pmovs, NULL, PMOVSSET_EL0 }, |
636 | /* PMSWINC_EL0 */ | 990 | /* PMSWINC_EL0 */ |
637 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), | 991 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), |
638 | trap_raz_wi }, | 992 | access_pmswinc, reset_unknown, PMSWINC_EL0 }, |
639 | /* PMSELR_EL0 */ | 993 | /* PMSELR_EL0 */ |
640 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), | 994 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), |
641 | trap_raz_wi }, | 995 | access_pmselr, reset_unknown, PMSELR_EL0 }, |
642 | /* PMCEID0_EL0 */ | 996 | /* PMCEID0_EL0 */ |
643 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), | 997 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), |
644 | trap_raz_wi }, | 998 | access_pmceid }, |
645 | /* PMCEID1_EL0 */ | 999 | /* PMCEID1_EL0 */ |
646 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), | 1000 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), |
647 | trap_raz_wi }, | 1001 | access_pmceid }, |
648 | /* PMCCNTR_EL0 */ | 1002 | /* PMCCNTR_EL0 */ |
649 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), | 1003 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), |
650 | trap_raz_wi }, | 1004 | access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 }, |
651 | /* PMXEVTYPER_EL0 */ | 1005 | /* PMXEVTYPER_EL0 */ |
652 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), | 1006 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), |
653 | trap_raz_wi }, | 1007 | access_pmu_evtyper }, |
654 | /* PMXEVCNTR_EL0 */ | 1008 | /* PMXEVCNTR_EL0 */ |
655 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), | 1009 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), |
656 | trap_raz_wi }, | 1010 | access_pmu_evcntr }, |
657 | /* PMUSERENR_EL0 */ | 1011 | /* PMUSERENR_EL0 |
1012 | * This register resets as unknown in 64bit mode while it resets as zero | ||
1013 | * in 32bit mode. Here we choose to reset it as zero for consistency. | ||
1014 | */ | ||
658 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), | 1015 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), |
659 | trap_raz_wi }, | 1016 | access_pmuserenr, reset_val, PMUSERENR_EL0, 0 }, |
660 | /* PMOVSSET_EL0 */ | 1017 | /* PMOVSSET_EL0 */ |
661 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), | 1018 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), |
662 | trap_raz_wi }, | 1019 | access_pmovs, reset_unknown, PMOVSSET_EL0 }, |
663 | 1020 | ||
664 | /* TPIDR_EL0 */ | 1021 | /* TPIDR_EL0 */ |
665 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), | 1022 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), |
@@ -668,6 +1025,77 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
668 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), | 1025 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), |
669 | NULL, reset_unknown, TPIDRRO_EL0 }, | 1026 | NULL, reset_unknown, TPIDRRO_EL0 }, |
670 | 1027 | ||
1028 | /* PMEVCNTRn_EL0 */ | ||
1029 | PMU_PMEVCNTR_EL0(0), | ||
1030 | PMU_PMEVCNTR_EL0(1), | ||
1031 | PMU_PMEVCNTR_EL0(2), | ||
1032 | PMU_PMEVCNTR_EL0(3), | ||
1033 | PMU_PMEVCNTR_EL0(4), | ||
1034 | PMU_PMEVCNTR_EL0(5), | ||
1035 | PMU_PMEVCNTR_EL0(6), | ||
1036 | PMU_PMEVCNTR_EL0(7), | ||
1037 | PMU_PMEVCNTR_EL0(8), | ||
1038 | PMU_PMEVCNTR_EL0(9), | ||
1039 | PMU_PMEVCNTR_EL0(10), | ||
1040 | PMU_PMEVCNTR_EL0(11), | ||
1041 | PMU_PMEVCNTR_EL0(12), | ||
1042 | PMU_PMEVCNTR_EL0(13), | ||
1043 | PMU_PMEVCNTR_EL0(14), | ||
1044 | PMU_PMEVCNTR_EL0(15), | ||
1045 | PMU_PMEVCNTR_EL0(16), | ||
1046 | PMU_PMEVCNTR_EL0(17), | ||
1047 | PMU_PMEVCNTR_EL0(18), | ||
1048 | PMU_PMEVCNTR_EL0(19), | ||
1049 | PMU_PMEVCNTR_EL0(20), | ||
1050 | PMU_PMEVCNTR_EL0(21), | ||
1051 | PMU_PMEVCNTR_EL0(22), | ||
1052 | PMU_PMEVCNTR_EL0(23), | ||
1053 | PMU_PMEVCNTR_EL0(24), | ||
1054 | PMU_PMEVCNTR_EL0(25), | ||
1055 | PMU_PMEVCNTR_EL0(26), | ||
1056 | PMU_PMEVCNTR_EL0(27), | ||
1057 | PMU_PMEVCNTR_EL0(28), | ||
1058 | PMU_PMEVCNTR_EL0(29), | ||
1059 | PMU_PMEVCNTR_EL0(30), | ||
1060 | /* PMEVTYPERn_EL0 */ | ||
1061 | PMU_PMEVTYPER_EL0(0), | ||
1062 | PMU_PMEVTYPER_EL0(1), | ||
1063 | PMU_PMEVTYPER_EL0(2), | ||
1064 | PMU_PMEVTYPER_EL0(3), | ||
1065 | PMU_PMEVTYPER_EL0(4), | ||
1066 | PMU_PMEVTYPER_EL0(5), | ||
1067 | PMU_PMEVTYPER_EL0(6), | ||
1068 | PMU_PMEVTYPER_EL0(7), | ||
1069 | PMU_PMEVTYPER_EL0(8), | ||
1070 | PMU_PMEVTYPER_EL0(9), | ||
1071 | PMU_PMEVTYPER_EL0(10), | ||
1072 | PMU_PMEVTYPER_EL0(11), | ||
1073 | PMU_PMEVTYPER_EL0(12), | ||
1074 | PMU_PMEVTYPER_EL0(13), | ||
1075 | PMU_PMEVTYPER_EL0(14), | ||
1076 | PMU_PMEVTYPER_EL0(15), | ||
1077 | PMU_PMEVTYPER_EL0(16), | ||
1078 | PMU_PMEVTYPER_EL0(17), | ||
1079 | PMU_PMEVTYPER_EL0(18), | ||
1080 | PMU_PMEVTYPER_EL0(19), | ||
1081 | PMU_PMEVTYPER_EL0(20), | ||
1082 | PMU_PMEVTYPER_EL0(21), | ||
1083 | PMU_PMEVTYPER_EL0(22), | ||
1084 | PMU_PMEVTYPER_EL0(23), | ||
1085 | PMU_PMEVTYPER_EL0(24), | ||
1086 | PMU_PMEVTYPER_EL0(25), | ||
1087 | PMU_PMEVTYPER_EL0(26), | ||
1088 | PMU_PMEVTYPER_EL0(27), | ||
1089 | PMU_PMEVTYPER_EL0(28), | ||
1090 | PMU_PMEVTYPER_EL0(29), | ||
1091 | PMU_PMEVTYPER_EL0(30), | ||
1092 | /* PMCCFILTR_EL0 | ||
1093 | * This register resets as unknown in 64bit mode while it resets as zero | ||
1094 | * in 32bit mode. Here we choose to reset it as zero for consistency. | ||
1095 | */ | ||
1096 | { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111), | ||
1097 | access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 }, | ||
1098 | |||
671 | /* DACR32_EL2 */ | 1099 | /* DACR32_EL2 */ |
672 | { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), | 1100 | { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), |
673 | NULL, reset_unknown, DACR32_EL2 }, | 1101 | NULL, reset_unknown, DACR32_EL2 }, |
@@ -857,6 +1285,20 @@ static const struct sys_reg_desc cp14_64_regs[] = { | |||
857 | { Op1( 0), CRm( 2), .access = trap_raz_wi }, | 1285 | { Op1( 0), CRm( 2), .access = trap_raz_wi }, |
858 | }; | 1286 | }; |
859 | 1287 | ||
1288 | /* Macro to expand the PMEVCNTRn register */ | ||
1289 | #define PMU_PMEVCNTR(n) \ | ||
1290 | /* PMEVCNTRn */ \ | ||
1291 | { Op1(0), CRn(0b1110), \ | ||
1292 | CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ | ||
1293 | access_pmu_evcntr } | ||
1294 | |||
1295 | /* Macro to expand the PMEVTYPERn register */ | ||
1296 | #define PMU_PMEVTYPER(n) \ | ||
1297 | /* PMEVTYPERn */ \ | ||
1298 | { Op1(0), CRn(0b1110), \ | ||
1299 | CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ | ||
1300 | access_pmu_evtyper } | ||
1301 | |||
860 | /* | 1302 | /* |
861 | * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, | 1303 | * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, |
862 | * depending on the way they are accessed (as a 32bit or a 64bit | 1304 | * depending on the way they are accessed (as a 32bit or a 64bit |
@@ -885,19 +1327,21 @@ static const struct sys_reg_desc cp15_regs[] = { | |||
885 | { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, | 1327 | { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, |
886 | 1328 | ||
887 | /* PMU */ | 1329 | /* PMU */ |
888 | { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi }, | 1330 | { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr }, |
889 | { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi }, | 1331 | { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten }, |
890 | { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi }, | 1332 | { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten }, |
891 | { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, | 1333 | { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs }, |
892 | { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi }, | 1334 | { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc }, |
893 | { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi }, | 1335 | { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, |
894 | { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi }, | 1336 | { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, |
895 | { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, | 1337 | { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, |
896 | { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi }, | 1338 | { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, |
897 | { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi }, | 1339 | { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, |
898 | { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, | 1340 | { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, |
899 | { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, | 1341 | { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr }, |
900 | { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi }, | 1342 | { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten }, |
1343 | { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, | ||
1344 | { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs }, | ||
901 | 1345 | ||
902 | { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, | 1346 | { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, |
903 | { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, | 1347 | { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, |
@@ -908,10 +1352,78 @@ static const struct sys_reg_desc cp15_regs[] = { | |||
908 | { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, | 1352 | { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, |
909 | 1353 | ||
910 | { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, | 1354 | { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, |
1355 | |||
1356 | /* PMEVCNTRn */ | ||
1357 | PMU_PMEVCNTR(0), | ||
1358 | PMU_PMEVCNTR(1), | ||
1359 | PMU_PMEVCNTR(2), | ||
1360 | PMU_PMEVCNTR(3), | ||
1361 | PMU_PMEVCNTR(4), | ||
1362 | PMU_PMEVCNTR(5), | ||
1363 | PMU_PMEVCNTR(6), | ||
1364 | PMU_PMEVCNTR(7), | ||
1365 | PMU_PMEVCNTR(8), | ||
1366 | PMU_PMEVCNTR(9), | ||
1367 | PMU_PMEVCNTR(10), | ||
1368 | PMU_PMEVCNTR(11), | ||
1369 | PMU_PMEVCNTR(12), | ||
1370 | PMU_PMEVCNTR(13), | ||
1371 | PMU_PMEVCNTR(14), | ||
1372 | PMU_PMEVCNTR(15), | ||
1373 | PMU_PMEVCNTR(16), | ||
1374 | PMU_PMEVCNTR(17), | ||
1375 | PMU_PMEVCNTR(18), | ||
1376 | PMU_PMEVCNTR(19), | ||
1377 | PMU_PMEVCNTR(20), | ||
1378 | PMU_PMEVCNTR(21), | ||
1379 | PMU_PMEVCNTR(22), | ||
1380 | PMU_PMEVCNTR(23), | ||
1381 | PMU_PMEVCNTR(24), | ||
1382 | PMU_PMEVCNTR(25), | ||
1383 | PMU_PMEVCNTR(26), | ||
1384 | PMU_PMEVCNTR(27), | ||
1385 | PMU_PMEVCNTR(28), | ||
1386 | PMU_PMEVCNTR(29), | ||
1387 | PMU_PMEVCNTR(30), | ||
1388 | /* PMEVTYPERn */ | ||
1389 | PMU_PMEVTYPER(0), | ||
1390 | PMU_PMEVTYPER(1), | ||
1391 | PMU_PMEVTYPER(2), | ||
1392 | PMU_PMEVTYPER(3), | ||
1393 | PMU_PMEVTYPER(4), | ||
1394 | PMU_PMEVTYPER(5), | ||
1395 | PMU_PMEVTYPER(6), | ||
1396 | PMU_PMEVTYPER(7), | ||
1397 | PMU_PMEVTYPER(8), | ||
1398 | PMU_PMEVTYPER(9), | ||
1399 | PMU_PMEVTYPER(10), | ||
1400 | PMU_PMEVTYPER(11), | ||
1401 | PMU_PMEVTYPER(12), | ||
1402 | PMU_PMEVTYPER(13), | ||
1403 | PMU_PMEVTYPER(14), | ||
1404 | PMU_PMEVTYPER(15), | ||
1405 | PMU_PMEVTYPER(16), | ||
1406 | PMU_PMEVTYPER(17), | ||
1407 | PMU_PMEVTYPER(18), | ||
1408 | PMU_PMEVTYPER(19), | ||
1409 | PMU_PMEVTYPER(20), | ||
1410 | PMU_PMEVTYPER(21), | ||
1411 | PMU_PMEVTYPER(22), | ||
1412 | PMU_PMEVTYPER(23), | ||
1413 | PMU_PMEVTYPER(24), | ||
1414 | PMU_PMEVTYPER(25), | ||
1415 | PMU_PMEVTYPER(26), | ||
1416 | PMU_PMEVTYPER(27), | ||
1417 | PMU_PMEVTYPER(28), | ||
1418 | PMU_PMEVTYPER(29), | ||
1419 | PMU_PMEVTYPER(30), | ||
1420 | /* PMCCFILTR */ | ||
1421 | { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper }, | ||
911 | }; | 1422 | }; |
912 | 1423 | ||
913 | static const struct sys_reg_desc cp15_64_regs[] = { | 1424 | static const struct sys_reg_desc cp15_64_regs[] = { |
914 | { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, | 1425 | { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, |
1426 | { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr }, | ||
915 | { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, | 1427 | { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, |
916 | { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, | 1428 | { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, |
917 | }; | 1429 | }; |
@@ -942,29 +1454,32 @@ static const struct sys_reg_desc *get_target_table(unsigned target, | |||
942 | } | 1454 | } |
943 | } | 1455 | } |
944 | 1456 | ||
1457 | #define reg_to_match_value(x) \ | ||
1458 | ({ \ | ||
1459 | unsigned long val; \ | ||
1460 | val = (x)->Op0 << 14; \ | ||
1461 | val |= (x)->Op1 << 11; \ | ||
1462 | val |= (x)->CRn << 7; \ | ||
1463 | val |= (x)->CRm << 3; \ | ||
1464 | val |= (x)->Op2; \ | ||
1465 | val; \ | ||
1466 | }) | ||
1467 | |||
1468 | static int match_sys_reg(const void *key, const void *elt) | ||
1469 | { | ||
1470 | const unsigned long pval = (unsigned long)key; | ||
1471 | const struct sys_reg_desc *r = elt; | ||
1472 | |||
1473 | return pval - reg_to_match_value(r); | ||
1474 | } | ||
1475 | |||
945 | static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, | 1476 | static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, |
946 | const struct sys_reg_desc table[], | 1477 | const struct sys_reg_desc table[], |
947 | unsigned int num) | 1478 | unsigned int num) |
948 | { | 1479 | { |
949 | unsigned int i; | 1480 | unsigned long pval = reg_to_match_value(params); |
950 | |||
951 | for (i = 0; i < num; i++) { | ||
952 | const struct sys_reg_desc *r = &table[i]; | ||
953 | |||
954 | if (params->Op0 != r->Op0) | ||
955 | continue; | ||
956 | if (params->Op1 != r->Op1) | ||
957 | continue; | ||
958 | if (params->CRn != r->CRn) | ||
959 | continue; | ||
960 | if (params->CRm != r->CRm) | ||
961 | continue; | ||
962 | if (params->Op2 != r->Op2) | ||
963 | continue; | ||
964 | 1481 | ||
965 | return r; | 1482 | return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg); |
966 | } | ||
967 | return NULL; | ||
968 | } | 1483 | } |
969 | 1484 | ||
970 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) | 1485 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) |