diff options
| author | Marc Zyngier <marc.zyngier@arm.com> | 2018-02-06 12:56:15 -0500 |
|---|---|---|
| committer | Catalin Marinas <catalin.marinas@arm.com> | 2018-02-06 17:54:07 -0500 |
| commit | f72af90c3783d924337624659b43e2d36f1b36b4 (patch) | |
| tree | d0d7039ad9b14dd45e38275b2d6486b2d5cdf6b0 /arch/arm64 | |
| parent | 6167ec5c9145cdf493722dfd80a5d48bafc4a18a (diff) | |
arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling
We want SMCCC_ARCH_WORKAROUND_1 to be fast. As fast as possible.
So let's intercept it as early as we can by testing for the
function call number as soon as we've identified a HVC call
coming from the guest.
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64')
| -rw-r--r-- | arch/arm64/kvm/hyp/hyp-entry.S | 20 |
1 files changed, 18 insertions, 2 deletions
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index e4f37b9dd47c..f36464bd57c5 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ | 16 | */ |
| 17 | 17 | ||
| 18 | #include <linux/arm-smccc.h> | ||
| 18 | #include <linux/linkage.h> | 19 | #include <linux/linkage.h> |
| 19 | 20 | ||
| 20 | #include <asm/alternative.h> | 21 | #include <asm/alternative.h> |
| @@ -64,10 +65,11 @@ alternative_endif | |||
| 64 | lsr x0, x1, #ESR_ELx_EC_SHIFT | 65 | lsr x0, x1, #ESR_ELx_EC_SHIFT |
| 65 | 66 | ||
| 66 | cmp x0, #ESR_ELx_EC_HVC64 | 67 | cmp x0, #ESR_ELx_EC_HVC64 |
| 68 | ccmp x0, #ESR_ELx_EC_HVC32, #4, ne | ||
| 67 | b.ne el1_trap | 69 | b.ne el1_trap |
| 68 | 70 | ||
| 69 | mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest | 71 | mrs x1, vttbr_el2 // If vttbr is valid, the guest |
| 70 | cbnz x1, el1_trap // called HVC | 72 | cbnz x1, el1_hvc_guest // called HVC |
| 71 | 73 | ||
| 72 | /* Here, we're pretty sure the host called HVC. */ | 74 | /* Here, we're pretty sure the host called HVC. */ |
| 73 | ldp x0, x1, [sp], #16 | 75 | ldp x0, x1, [sp], #16 |
| @@ -100,6 +102,20 @@ alternative_endif | |||
| 100 | 102 | ||
| 101 | eret | 103 | eret |
| 102 | 104 | ||
| 105 | el1_hvc_guest: | ||
| 106 | /* | ||
| 107 | * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1. | ||
| 108 | * The workaround has already been applied on the host, | ||
| 109 | * so let's quickly get back to the guest. We don't bother | ||
| 110 | * restoring x1, as it can be clobbered anyway. | ||
| 111 | */ | ||
| 112 | ldr x1, [sp] // Guest's x0 | ||
| 113 | eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1 | ||
| 114 | cbnz w1, el1_trap | ||
| 115 | mov x0, x1 | ||
| 116 | add sp, sp, #16 | ||
| 117 | eret | ||
| 118 | |||
| 103 | el1_trap: | 119 | el1_trap: |
| 104 | /* | 120 | /* |
| 105 | * x0: ESR_EC | 121 | * x0: ESR_EC |
